code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
;window.Modernizr=function(a,b,c){function D(a){j.cssText=a}function E(a,b){return D(n.join(a+";")+(b||""))}function F(a,b){return typeof a===b}function G(a,b){return!!~(""+a).indexOf(b)}function H(a,b){for(var d in a){var e=a[d];if(!G(e,"-")&&j[e]!==c)return b=="pfx"?e:!0}return!1}function I(a,b,d){for(var e in a){var f=b[a[e]];if(f!==c)return d===!1?a[e]:F(f,"function")?f.bind(d||b):f}return!1}function J(a,b,c){var d=a.charAt(0).toUpperCase()+a.slice(1),e=(a+" "+p.join(d+" ")+d).split(" ");return F(b,"string")||F(b,"undefined")?H(e,b):(e=(a+" "+q.join(d+" ")+d).split(" "),I(e,b,c))}function K(){e.input=function(c){for(var d=0,e=c.length;d<e;d++)u[c[d]]=c[d]in k;return u.list&&(u.list=!!b.createElement("datalist")&&!!a.HTMLDataListElement),u}("autocomplete autofocus list placeholder max min multiple pattern required step".split(" ")),e.inputtypes=function(a){for(var d=0,e,f,h,i=a.length;d<i;d++)k.setAttribute("type",f=a[d]),e=k.type!=="text",e&&(k.value=l,k.style.cssText="position:absolute;visibility:hidden;",/^range$/.test(f)&&k.style.WebkitAppearance!==c?(g.appendChild(k),h=b.defaultView,e=h.getComputedStyle&&h.getComputedStyle(k,null).WebkitAppearance!=="textfield"&&k.offsetHeight!==0,g.removeChild(k)):/^(search|tel)$/.test(f)||(/^(url|email)$/.test(f)?e=k.checkValidity&&k.checkValidity()===!1:e=k.value!=l)),t[a[d]]=!!e;return t}("search tel url email datetime date month week time datetime-local number range color".split(" "))}var d="2.6.2",e={},f=!0,g=b.documentElement,h="modernizr",i=b.createElement(h),j=i.style,k=b.createElement("input"),l=":)",m={}.toString,n=" -webkit- -moz- -o- -ms- ".split(" "),o="Webkit Moz O ms",p=o.split(" "),q=o.toLowerCase().split(" "),r={svg:"http://www.w3.org/2000/svg"},s={},t={},u={},v=[],w=v.slice,x,y=function(a,c,d,e){var f,i,j,k,l=b.createElement("div"),m=b.body,n=m||b.createElement("body");if(parseInt(d,10))while(d--)j=b.createElement("div"),j.id=e?e[d]:h+(d+1),l.appendChild(j);return f=["­",'<style id="s',h,'">',a,"</style>"].join(""),l.id=h,(m?l:n).innerHTML+=f,n.appendChild(l),m||(n.style.background="",n.style.overflow="hidden",k=g.style.overflow,g.style.overflow="hidden",g.appendChild(n)),i=c(l,a),m?l.parentNode.removeChild(l):(n.parentNode.removeChild(n),g.style.overflow=k),!!i},z=function(b){var c=a.matchMedia||a.msMatchMedia;if(c)return c(b).matches;var d;return y("@media "+b+" { #"+h+" { position: absolute; } }",function(b){d=(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle)["position"]=="absolute"}),d},A=function(){function d(d,e){e=e||b.createElement(a[d]||"div"),d="on"+d;var f=d in e;return f||(e.setAttribute||(e=b.createElement("div")),e.setAttribute&&e.removeAttribute&&(e.setAttribute(d,""),f=F(e[d],"function"),F(e[d],"undefined")||(e[d]=c),e.removeAttribute(d))),e=null,f}var a={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return d}(),B={}.hasOwnProperty,C;!F(B,"undefined")&&!F(B.call,"undefined")?C=function(a,b){return B.call(a,b)}:C=function(a,b){return b in a&&F(a.constructor.prototype[b],"undefined")},Function.prototype.bind||(Function.prototype.bind=function(b){var c=this;if(typeof c!="function")throw new TypeError;var d=w.call(arguments,1),e=function(){if(this instanceof e){var a=function(){};a.prototype=c.prototype;var f=new a,g=c.apply(f,d.concat(w.call(arguments)));return Object(g)===g?g:f}return c.apply(b,d.concat(w.call(arguments)))};return e}),s.flexbox=function(){return J("flexWrap")},s.canvas=function(){var a=b.createElement("canvas");return!!a.getContext&&!!a.getContext("2d")},s.canvastext=function(){return!!e.canvas&&!!F(b.createElement("canvas").getContext("2d").fillText,"function")},s.webgl=function(){return!!a.WebGLRenderingContext},s.touch=function(){var c;return"ontouchstart"in a||a.DocumentTouch&&b instanceof DocumentTouch?c=!0:y(["@media (",n.join("touch-enabled),("),h,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(a){c=a.offsetTop===9}),c},s.geolocation=function(){return"geolocation"in navigator},s.postmessage=function(){return!!a.postMessage},s.websqldatabase=function(){return!!a.openDatabase},s.indexedDB=function(){return!!J("indexedDB",a)},s.hashchange=function(){return A("hashchange",a)&&(b.documentMode===c||b.documentMode>7)},s.history=function(){return!!a.history&&!!history.pushState},s.draganddrop=function(){var a=b.createElement("div");return"draggable"in a||"ondragstart"in a&&"ondrop"in a},s.websockets=function(){return"WebSocket"in a||"MozWebSocket"in a},s.rgba=function(){return D("background-color:rgba(150,255,150,.5)"),G(j.backgroundColor,"rgba")},s.hsla=function(){return D("background-color:hsla(120,40%,100%,.5)"),G(j.backgroundColor,"rgba")||G(j.backgroundColor,"hsla")},s.multiplebgs=function(){return D("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(j.background)},s.backgroundsize=function(){return J("backgroundSize")},s.borderimage=function(){return J("borderImage")},s.borderradius=function(){return J("borderRadius")},s.boxshadow=function(){return J("boxShadow")},s.textshadow=function(){return b.createElement("div").style.textShadow===""},s.opacity=function(){return E("opacity:.55"),/^0.55$/.test(j.opacity)},s.cssanimations=function(){return J("animationName")},s.csscolumns=function(){return J("columnCount")},s.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";return D((a+"-webkit- ".split(" ").join(b+a)+n.join(c+a)).slice(0,-a.length)),G(j.backgroundImage,"gradient")},s.cssreflections=function(){return J("boxReflect")},s.csstransforms=function(){return!!J("transform")},s.csstransforms3d=function(){var a=!!J("perspective");return a&&"webkitPerspective"in g.style&&y("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(b,c){a=b.offsetLeft===9&&b.offsetHeight===3}),a},s.csstransitions=function(){return J("transition")},s.fontface=function(){var a;return y('@font-face {font-family:"font";src:url("https://")}',function(c,d){var e=b.getElementById("smodernizr"),f=e.sheet||e.styleSheet,g=f?f.cssRules&&f.cssRules[0]?f.cssRules[0].cssText:f.cssText||"":"";a=/src/i.test(g)&&g.indexOf(d.split(" ")[0])===0}),a},s.generatedcontent=function(){var a;return y(["#",h,"{font:0/0 a}#",h,':after{content:"',l,'";visibility:hidden;font:3px/1 a}'].join(""),function(b){a=b.offsetHeight>=3}),a},s.video=function(){var a=b.createElement("video"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),c.h264=a.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,"")}catch(d){}return c},s.audio=function(){var a=b.createElement("audio"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),c.mp3=a.canPlayType("audio/mpeg;").replace(/^no$/,""),c.wav=a.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),c.m4a=(a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;")).replace(/^no$/,"")}catch(d){}return c},s.localstorage=function(){try{return localStorage.setItem(h,h),localStorage.removeItem(h),!0}catch(a){return!1}},s.sessionstorage=function(){try{return sessionStorage.setItem(h,h),sessionStorage.removeItem(h),!0}catch(a){return!1}},s.webworkers=function(){return!!a.Worker},s.applicationcache=function(){return!!a.applicationCache},s.svg=function(){return!!b.createElementNS&&!!b.createElementNS(r.svg,"svg").createSVGRect},s.inlinesvg=function(){var a=b.createElement("div");return a.innerHTML="<svg/>",(a.firstChild&&a.firstChild.namespaceURI)==r.svg},s.smil=function(){return!!b.createElementNS&&/SVGAnimate/.test(m.call(b.createElementNS(r.svg,"animate")))},s.svgclippaths=function(){return!!b.createElementNS&&/SVGClipPath/.test(m.call(b.createElementNS(r.svg,"clipPath")))};for(var L in s)C(s,L)&&(x=L.toLowerCase(),e[x]=s[L](),v.push((e[x]?"":"no-")+x));return e.input||K(),e.addTest=function(a,b){if(typeof a=="object")for(var d in a)C(a,d)&&e.addTest(d,a[d]);else{a=a.toLowerCase();if(e[a]!==c)return e;b=typeof b=="function"?b():b,typeof f!="undefined"&&f&&(g.className+=" "+(b?"":"no-")+a),e[a]=b}return e},D(""),i=k=null,function(a,b){function k(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function l(){var a=r.elements;return typeof a=="string"?a.split(" "):a}function m(a){var b=i[a[g]];return b||(b={},h++,a[g]=h,i[h]=b),b}function n(a,c,f){c||(c=b);if(j)return c.createElement(a);f||(f=m(c));var g;return f.cache[a]?g=f.cache[a].cloneNode():e.test(a)?g=(f.cache[a]=f.createElem(a)).cloneNode():g=f.createElem(a),g.canHaveChildren&&!d.test(a)?f.frag.appendChild(g):g}function o(a,c){a||(a=b);if(j)return a.createDocumentFragment();c=c||m(a);var d=c.frag.cloneNode(),e=0,f=l(),g=f.length;for(;e<g;e++)d.createElement(f[e]);return d}function p(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return r.shivMethods?n(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+l().join().replace(/\w+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(r,b.frag)}function q(a){a||(a=b);var c=m(a);return r.shivCSS&&!f&&!c.hasCSS&&(c.hasCSS=!!k(a,"article,aside,figcaption,figure,footer,header,hgroup,nav,section{display:block}mark{background:#FF0;color:#000}")),j||p(a,c),a}var c=a.html5||{},d=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,e=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,f,g="_html5shiv",h=0,i={},j;(function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",f="hidden"in a,j=a.childNodes.length==1||function(){b.createElement("a");var a=b.createDocumentFragment();return typeof a.cloneNode=="undefined"||typeof a.createDocumentFragment=="undefined"||typeof a.createElement=="undefined"}()}catch(c){f=!0,j=!0}})();var r={elements:c.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:c.shivCSS!==!1,supportsUnknownElements:j,shivMethods:c.shivMethods!==!1,type:"default",shivDocument:q,createElement:n,createDocumentFragment:o};a.html5=r,q(b)}(this,b),e._version=d,e._prefixes=n,e._domPrefixes=q,e._cssomPrefixes=p,e.mq=z,e.hasEvent=A,e.testProp=function(a){return H([a])},e.testAllProps=J,e.testStyles=y,e.prefixed=function(a,b,c){return b?J(a,b,c):J(a,"pfx")},g.className=g.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(f?" js "+v.join(" "):""),e}(this,this.document),function(a,b,c){function d(a){return"[object Function]"==o.call(a)}function e(a){return"string"==typeof a}function f(){}function g(a){return!a||"loaded"==a||"complete"==a||"uninitialized"==a}function h(){var a=p.shift();q=1,a?a.t?m(function(){("c"==a.t?B.injectCss:B.injectJs)(a.s,0,a.a,a.x,a.e,1)},0):(a(),h()):q=0}function i(a,c,d,e,f,i,j){function k(b){if(!o&&g(l.readyState)&&(u.r=o=1,!q&&h(),l.onload=l.onreadystatechange=null,b)){"img"!=a&&m(function(){t.removeChild(l)},50);for(var d in y[c])y[c].hasOwnProperty(d)&&y[c][d].onload()}}var j=j||B.errorTimeout,l=b.createElement(a),o=0,r=0,u={t:d,s:c,e:f,a:i,x:j};1===y[c]&&(r=1,y[c]=[]),"object"==a?l.data=c:(l.src=c,l.type=a),l.width=l.height="0",l.onerror=l.onload=l.onreadystatechange=function(){k.call(this,r)},p.splice(e,0,u),"img"!=a&&(r||2===y[c]?(t.insertBefore(l,s?null:n),m(k,j)):y[c].push(l))}function j(a,b,c,d,f){return q=0,b=b||"j",e(a)?i("c"==b?v:u,a,b,this.i++,c,d,f):(p.splice(this.i++,0,a),1==p.length&&h()),this}function k(){var a=B;return a.loader={load:j,i:0},a}var l=b.documentElement,m=a.setTimeout,n=b.getElementsByTagName("script")[0],o={}.toString,p=[],q=0,r="MozAppearance"in l.style,s=r&&!!b.createRange().compareNode,t=s?l:n.parentNode,l=a.opera&&"[object Opera]"==o.call(a.opera),l=!!b.attachEvent&&!l,u=r?"object":l?"script":"img",v=l?"script":u,w=Array.isArray||function(a){return"[object Array]"==o.call(a)},x=[],y={},z={timeout:function(a,b){return b.length&&(a.timeout=b[0]),a}},A,B;B=function(a){function b(a){var a=a.split("!"),b=x.length,c=a.pop(),d=a.length,c={url:c,origUrl:c,prefixes:a},e,f,g;for(f=0;f<d;f++)g=a[f].split("="),(e=z[g.shift()])&&(c=e(c,g));for(f=0;f<b;f++)c=x[f](c);return c}function g(a,e,f,g,h){var i=b(a),j=i.autoCallback;i.url.split(".").pop().split("?").shift(),i.bypass||(e&&(e=d(e)?e:e[a]||e[g]||e[a.split("/").pop().split("?")[0]]),i.instead?i.instead(a,e,f,g,h):(y[i.url]?i.noexec=!0:y[i.url]=1,f.load(i.url,i.forceCSS||!i.forceJS&&"css"==i.url.split(".").pop().split("?").shift()?"c":c,i.noexec,i.attrs,i.timeout),(d(e)||d(j))&&f.load(function(){k(),e&&e(i.origUrl,h,g),j&&j(i.origUrl,h,g),y[i.url]=2})))}function h(a,b){function c(a,c){if(a){if(e(a))c||(j=function(){var a=[].slice.call(arguments);k.apply(this,a),l()}),g(a,j,b,0,h);else if(Object(a)===a)for(n in m=function(){var b=0,c;for(c in a)a.hasOwnProperty(c)&&b++;return b}(),a)a.hasOwnProperty(n)&&(!c&&!--m&&(d(j)?j=function(){var a=[].slice.call(arguments);k.apply(this,a),l()}:j[n]=function(a){return function(){var b=[].slice.call(arguments);a&&a.apply(this,b),l()}}(k[n])),g(a[n],j,b,n,h))}else!c&&l()}var h=!!a.test,i=a.load||a.both,j=a.callback||f,k=j,l=a.complete||f,m,n;c(h?a.yep:a.nope,!!i),i&&c(i)}var i,j,l=this.yepnope.loader;if(e(a))g(a,0,l,0);else if(w(a))for(i=0;i<a.length;i++)j=a[i],e(j)?g(j,0,l,0):w(j)?B(j):Object(j)===j&&h(j,l);else Object(a)===a&&h(a,l)},B.addPrefix=function(a,b){z[a]=b},B.addFilter=function(a){x.push(a)},B.errorTimeout=1e4,null==b.readyState&&b.addEventListener&&(b.readyState="loading",b.addEventListener("DOMContentLoaded",A=function(){b.removeEventListener("DOMContentLoaded",A,0),b.readyState="complete"},0)),a.yepnope=k(),a.yepnope.executeStack=h,a.yepnope.injectJs=function(a,c,d,e,i,j){var k=b.createElement("script"),l,o,e=e||B.errorTimeout;k.src=a;for(o in d)k.setAttribute(o,d[o]);c=j?h:c||f,k.onreadystatechange=k.onload=function(){!l&&g(k.readyState)&&(l=1,c(),k.onload=k.onreadystatechange=null)},m(function(){l||(l=1,c(1))},e),i?k.onload():n.parentNode.insertBefore(k,n)},a.yepnope.injectCss=function(a,c,d,e,g,i){var e=b.createElement("link"),j,c=i?h:c||f;e.href=a,e.rel="stylesheet",e.type="text/css";for(j in d)e.setAttribute(j,d[j]);g||(n.parentNode.insertBefore(e,n),m(c,0))}}(this,document),Modernizr.load=function(){yepnope.apply(window,[].slice.call(arguments,0))};
|
sciPyFoam
|
/sciPyFoam-0.4.1.tar.gz/sciPyFoam-0.4.1/docs/source/themes/rtd/static/js/modernizr.min.js
|
modernizr.min.js
|
;window.Modernizr=function(a,b,c){function D(a){j.cssText=a}function E(a,b){return D(n.join(a+";")+(b||""))}function F(a,b){return typeof a===b}function G(a,b){return!!~(""+a).indexOf(b)}function H(a,b){for(var d in a){var e=a[d];if(!G(e,"-")&&j[e]!==c)return b=="pfx"?e:!0}return!1}function I(a,b,d){for(var e in a){var f=b[a[e]];if(f!==c)return d===!1?a[e]:F(f,"function")?f.bind(d||b):f}return!1}function J(a,b,c){var d=a.charAt(0).toUpperCase()+a.slice(1),e=(a+" "+p.join(d+" ")+d).split(" ");return F(b,"string")||F(b,"undefined")?H(e,b):(e=(a+" "+q.join(d+" ")+d).split(" "),I(e,b,c))}function K(){e.input=function(c){for(var d=0,e=c.length;d<e;d++)u[c[d]]=c[d]in k;return u.list&&(u.list=!!b.createElement("datalist")&&!!a.HTMLDataListElement),u}("autocomplete autofocus list placeholder max min multiple pattern required step".split(" ")),e.inputtypes=function(a){for(var d=0,e,f,h,i=a.length;d<i;d++)k.setAttribute("type",f=a[d]),e=k.type!=="text",e&&(k.value=l,k.style.cssText="position:absolute;visibility:hidden;",/^range$/.test(f)&&k.style.WebkitAppearance!==c?(g.appendChild(k),h=b.defaultView,e=h.getComputedStyle&&h.getComputedStyle(k,null).WebkitAppearance!=="textfield"&&k.offsetHeight!==0,g.removeChild(k)):/^(search|tel)$/.test(f)||(/^(url|email)$/.test(f)?e=k.checkValidity&&k.checkValidity()===!1:e=k.value!=l)),t[a[d]]=!!e;return t}("search tel url email datetime date month week time datetime-local number range color".split(" "))}var d="2.6.2",e={},f=!0,g=b.documentElement,h="modernizr",i=b.createElement(h),j=i.style,k=b.createElement("input"),l=":)",m={}.toString,n=" -webkit- -moz- -o- -ms- ".split(" "),o="Webkit Moz O ms",p=o.split(" "),q=o.toLowerCase().split(" "),r={svg:"http://www.w3.org/2000/svg"},s={},t={},u={},v=[],w=v.slice,x,y=function(a,c,d,e){var f,i,j,k,l=b.createElement("div"),m=b.body,n=m||b.createElement("body");if(parseInt(d,10))while(d--)j=b.createElement("div"),j.id=e?e[d]:h+(d+1),l.appendChild(j);return f=["­",'<style id="s',h,'">',a,"</style>"].join(""),l.id=h,(m?l:n).innerHTML+=f,n.appendChild(l),m||(n.style.background="",n.style.overflow="hidden",k=g.style.overflow,g.style.overflow="hidden",g.appendChild(n)),i=c(l,a),m?l.parentNode.removeChild(l):(n.parentNode.removeChild(n),g.style.overflow=k),!!i},z=function(b){var c=a.matchMedia||a.msMatchMedia;if(c)return c(b).matches;var d;return y("@media "+b+" { #"+h+" { position: absolute; } }",function(b){d=(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle)["position"]=="absolute"}),d},A=function(){function d(d,e){e=e||b.createElement(a[d]||"div"),d="on"+d;var f=d in e;return f||(e.setAttribute||(e=b.createElement("div")),e.setAttribute&&e.removeAttribute&&(e.setAttribute(d,""),f=F(e[d],"function"),F(e[d],"undefined")||(e[d]=c),e.removeAttribute(d))),e=null,f}var a={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return d}(),B={}.hasOwnProperty,C;!F(B,"undefined")&&!F(B.call,"undefined")?C=function(a,b){return B.call(a,b)}:C=function(a,b){return b in a&&F(a.constructor.prototype[b],"undefined")},Function.prototype.bind||(Function.prototype.bind=function(b){var c=this;if(typeof c!="function")throw new TypeError;var d=w.call(arguments,1),e=function(){if(this instanceof e){var a=function(){};a.prototype=c.prototype;var f=new a,g=c.apply(f,d.concat(w.call(arguments)));return Object(g)===g?g:f}return c.apply(b,d.concat(w.call(arguments)))};return e}),s.flexbox=function(){return J("flexWrap")},s.canvas=function(){var a=b.createElement("canvas");return!!a.getContext&&!!a.getContext("2d")},s.canvastext=function(){return!!e.canvas&&!!F(b.createElement("canvas").getContext("2d").fillText,"function")},s.webgl=function(){return!!a.WebGLRenderingContext},s.touch=function(){var c;return"ontouchstart"in a||a.DocumentTouch&&b instanceof DocumentTouch?c=!0:y(["@media (",n.join("touch-enabled),("),h,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(a){c=a.offsetTop===9}),c},s.geolocation=function(){return"geolocation"in navigator},s.postmessage=function(){return!!a.postMessage},s.websqldatabase=function(){return!!a.openDatabase},s.indexedDB=function(){return!!J("indexedDB",a)},s.hashchange=function(){return A("hashchange",a)&&(b.documentMode===c||b.documentMode>7)},s.history=function(){return!!a.history&&!!history.pushState},s.draganddrop=function(){var a=b.createElement("div");return"draggable"in a||"ondragstart"in a&&"ondrop"in a},s.websockets=function(){return"WebSocket"in a||"MozWebSocket"in a},s.rgba=function(){return D("background-color:rgba(150,255,150,.5)"),G(j.backgroundColor,"rgba")},s.hsla=function(){return D("background-color:hsla(120,40%,100%,.5)"),G(j.backgroundColor,"rgba")||G(j.backgroundColor,"hsla")},s.multiplebgs=function(){return D("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(j.background)},s.backgroundsize=function(){return J("backgroundSize")},s.borderimage=function(){return J("borderImage")},s.borderradius=function(){return J("borderRadius")},s.boxshadow=function(){return J("boxShadow")},s.textshadow=function(){return b.createElement("div").style.textShadow===""},s.opacity=function(){return E("opacity:.55"),/^0.55$/.test(j.opacity)},s.cssanimations=function(){return J("animationName")},s.csscolumns=function(){return J("columnCount")},s.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";return D((a+"-webkit- ".split(" ").join(b+a)+n.join(c+a)).slice(0,-a.length)),G(j.backgroundImage,"gradient")},s.cssreflections=function(){return J("boxReflect")},s.csstransforms=function(){return!!J("transform")},s.csstransforms3d=function(){var a=!!J("perspective");return a&&"webkitPerspective"in g.style&&y("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(b,c){a=b.offsetLeft===9&&b.offsetHeight===3}),a},s.csstransitions=function(){return J("transition")},s.fontface=function(){var a;return y('@font-face {font-family:"font";src:url("https://")}',function(c,d){var e=b.getElementById("smodernizr"),f=e.sheet||e.styleSheet,g=f?f.cssRules&&f.cssRules[0]?f.cssRules[0].cssText:f.cssText||"":"";a=/src/i.test(g)&&g.indexOf(d.split(" ")[0])===0}),a},s.generatedcontent=function(){var a;return y(["#",h,"{font:0/0 a}#",h,':after{content:"',l,'";visibility:hidden;font:3px/1 a}'].join(""),function(b){a=b.offsetHeight>=3}),a},s.video=function(){var a=b.createElement("video"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),c.h264=a.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,"")}catch(d){}return c},s.audio=function(){var a=b.createElement("audio"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),c.mp3=a.canPlayType("audio/mpeg;").replace(/^no$/,""),c.wav=a.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),c.m4a=(a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;")).replace(/^no$/,"")}catch(d){}return c},s.localstorage=function(){try{return localStorage.setItem(h,h),localStorage.removeItem(h),!0}catch(a){return!1}},s.sessionstorage=function(){try{return sessionStorage.setItem(h,h),sessionStorage.removeItem(h),!0}catch(a){return!1}},s.webworkers=function(){return!!a.Worker},s.applicationcache=function(){return!!a.applicationCache},s.svg=function(){return!!b.createElementNS&&!!b.createElementNS(r.svg,"svg").createSVGRect},s.inlinesvg=function(){var a=b.createElement("div");return a.innerHTML="<svg/>",(a.firstChild&&a.firstChild.namespaceURI)==r.svg},s.smil=function(){return!!b.createElementNS&&/SVGAnimate/.test(m.call(b.createElementNS(r.svg,"animate")))},s.svgclippaths=function(){return!!b.createElementNS&&/SVGClipPath/.test(m.call(b.createElementNS(r.svg,"clipPath")))};for(var L in s)C(s,L)&&(x=L.toLowerCase(),e[x]=s[L](),v.push((e[x]?"":"no-")+x));return e.input||K(),e.addTest=function(a,b){if(typeof a=="object")for(var d in a)C(a,d)&&e.addTest(d,a[d]);else{a=a.toLowerCase();if(e[a]!==c)return e;b=typeof b=="function"?b():b,typeof f!="undefined"&&f&&(g.className+=" "+(b?"":"no-")+a),e[a]=b}return e},D(""),i=k=null,function(a,b){function k(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function l(){var a=r.elements;return typeof a=="string"?a.split(" "):a}function m(a){var b=i[a[g]];return b||(b={},h++,a[g]=h,i[h]=b),b}function n(a,c,f){c||(c=b);if(j)return c.createElement(a);f||(f=m(c));var g;return f.cache[a]?g=f.cache[a].cloneNode():e.test(a)?g=(f.cache[a]=f.createElem(a)).cloneNode():g=f.createElem(a),g.canHaveChildren&&!d.test(a)?f.frag.appendChild(g):g}function o(a,c){a||(a=b);if(j)return a.createDocumentFragment();c=c||m(a);var d=c.frag.cloneNode(),e=0,f=l(),g=f.length;for(;e<g;e++)d.createElement(f[e]);return d}function p(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return r.shivMethods?n(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+l().join().replace(/\w+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(r,b.frag)}function q(a){a||(a=b);var c=m(a);return r.shivCSS&&!f&&!c.hasCSS&&(c.hasCSS=!!k(a,"article,aside,figcaption,figure,footer,header,hgroup,nav,section{display:block}mark{background:#FF0;color:#000}")),j||p(a,c),a}var c=a.html5||{},d=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,e=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,f,g="_html5shiv",h=0,i={},j;(function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",f="hidden"in a,j=a.childNodes.length==1||function(){b.createElement("a");var a=b.createDocumentFragment();return typeof a.cloneNode=="undefined"||typeof a.createDocumentFragment=="undefined"||typeof a.createElement=="undefined"}()}catch(c){f=!0,j=!0}})();var r={elements:c.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:c.shivCSS!==!1,supportsUnknownElements:j,shivMethods:c.shivMethods!==!1,type:"default",shivDocument:q,createElement:n,createDocumentFragment:o};a.html5=r,q(b)}(this,b),e._version=d,e._prefixes=n,e._domPrefixes=q,e._cssomPrefixes=p,e.mq=z,e.hasEvent=A,e.testProp=function(a){return H([a])},e.testAllProps=J,e.testStyles=y,e.prefixed=function(a,b,c){return b?J(a,b,c):J(a,"pfx")},g.className=g.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(f?" js "+v.join(" "):""),e}(this,this.document),function(a,b,c){function d(a){return"[object Function]"==o.call(a)}function e(a){return"string"==typeof a}function f(){}function g(a){return!a||"loaded"==a||"complete"==a||"uninitialized"==a}function h(){var a=p.shift();q=1,a?a.t?m(function(){("c"==a.t?B.injectCss:B.injectJs)(a.s,0,a.a,a.x,a.e,1)},0):(a(),h()):q=0}function i(a,c,d,e,f,i,j){function k(b){if(!o&&g(l.readyState)&&(u.r=o=1,!q&&h(),l.onload=l.onreadystatechange=null,b)){"img"!=a&&m(function(){t.removeChild(l)},50);for(var d in y[c])y[c].hasOwnProperty(d)&&y[c][d].onload()}}var j=j||B.errorTimeout,l=b.createElement(a),o=0,r=0,u={t:d,s:c,e:f,a:i,x:j};1===y[c]&&(r=1,y[c]=[]),"object"==a?l.data=c:(l.src=c,l.type=a),l.width=l.height="0",l.onerror=l.onload=l.onreadystatechange=function(){k.call(this,r)},p.splice(e,0,u),"img"!=a&&(r||2===y[c]?(t.insertBefore(l,s?null:n),m(k,j)):y[c].push(l))}function j(a,b,c,d,f){return q=0,b=b||"j",e(a)?i("c"==b?v:u,a,b,this.i++,c,d,f):(p.splice(this.i++,0,a),1==p.length&&h()),this}function k(){var a=B;return a.loader={load:j,i:0},a}var l=b.documentElement,m=a.setTimeout,n=b.getElementsByTagName("script")[0],o={}.toString,p=[],q=0,r="MozAppearance"in l.style,s=r&&!!b.createRange().compareNode,t=s?l:n.parentNode,l=a.opera&&"[object Opera]"==o.call(a.opera),l=!!b.attachEvent&&!l,u=r?"object":l?"script":"img",v=l?"script":u,w=Array.isArray||function(a){return"[object Array]"==o.call(a)},x=[],y={},z={timeout:function(a,b){return b.length&&(a.timeout=b[0]),a}},A,B;B=function(a){function b(a){var a=a.split("!"),b=x.length,c=a.pop(),d=a.length,c={url:c,origUrl:c,prefixes:a},e,f,g;for(f=0;f<d;f++)g=a[f].split("="),(e=z[g.shift()])&&(c=e(c,g));for(f=0;f<b;f++)c=x[f](c);return c}function g(a,e,f,g,h){var i=b(a),j=i.autoCallback;i.url.split(".").pop().split("?").shift(),i.bypass||(e&&(e=d(e)?e:e[a]||e[g]||e[a.split("/").pop().split("?")[0]]),i.instead?i.instead(a,e,f,g,h):(y[i.url]?i.noexec=!0:y[i.url]=1,f.load(i.url,i.forceCSS||!i.forceJS&&"css"==i.url.split(".").pop().split("?").shift()?"c":c,i.noexec,i.attrs,i.timeout),(d(e)||d(j))&&f.load(function(){k(),e&&e(i.origUrl,h,g),j&&j(i.origUrl,h,g),y[i.url]=2})))}function h(a,b){function c(a,c){if(a){if(e(a))c||(j=function(){var a=[].slice.call(arguments);k.apply(this,a),l()}),g(a,j,b,0,h);else if(Object(a)===a)for(n in m=function(){var b=0,c;for(c in a)a.hasOwnProperty(c)&&b++;return b}(),a)a.hasOwnProperty(n)&&(!c&&!--m&&(d(j)?j=function(){var a=[].slice.call(arguments);k.apply(this,a),l()}:j[n]=function(a){return function(){var b=[].slice.call(arguments);a&&a.apply(this,b),l()}}(k[n])),g(a[n],j,b,n,h))}else!c&&l()}var h=!!a.test,i=a.load||a.both,j=a.callback||f,k=j,l=a.complete||f,m,n;c(h?a.yep:a.nope,!!i),i&&c(i)}var i,j,l=this.yepnope.loader;if(e(a))g(a,0,l,0);else if(w(a))for(i=0;i<a.length;i++)j=a[i],e(j)?g(j,0,l,0):w(j)?B(j):Object(j)===j&&h(j,l);else Object(a)===a&&h(a,l)},B.addPrefix=function(a,b){z[a]=b},B.addFilter=function(a){x.push(a)},B.errorTimeout=1e4,null==b.readyState&&b.addEventListener&&(b.readyState="loading",b.addEventListener("DOMContentLoaded",A=function(){b.removeEventListener("DOMContentLoaded",A,0),b.readyState="complete"},0)),a.yepnope=k(),a.yepnope.executeStack=h,a.yepnope.injectJs=function(a,c,d,e,i,j){var k=b.createElement("script"),l,o,e=e||B.errorTimeout;k.src=a;for(o in d)k.setAttribute(o,d[o]);c=j?h:c||f,k.onreadystatechange=k.onload=function(){!l&&g(k.readyState)&&(l=1,c(),k.onload=k.onreadystatechange=null)},m(function(){l||(l=1,c(1))},e),i?k.onload():n.parentNode.insertBefore(k,n)},a.yepnope.injectCss=function(a,c,d,e,g,i){var e=b.createElement("link"),j,c=i?h:c||f;e.href=a,e.rel="stylesheet",e.type="text/css";for(j in d)e.setAttribute(j,d[j]);g||(n.parentNode.insertBefore(e,n),m(c,0))}}(this,document),Modernizr.load=function(){yepnope.apply(window,[].slice.call(arguments,0))};
| 0.039021 | 0.25243 |
import codecs
import os
import sys
import urllib
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.statemachine import StringList
from jinja2 import FileSystemLoader, Environment
import sphinx.util
class JinjaDirective(Directive):
has_content = True
optional_arguments = 1
option_spec = {
"file": directives.path,
"header_char": directives.unchanged,
"debug": directives.unchanged,
}
app = None
def run(self):
node = nodes.Element()
node.document = self.state.document
env = self.state.document.settings.env
docname = env.docname
template_filename = self.options.get("file")
debug_template = self.options.get("debug")
cxt = (self.app.config.jinja_contexts[self.arguments[0]].copy()
if self.arguments else {})
cxt["options"] = {
"header_char": self.options.get("header_char")
}
if template_filename:
if debug_template is not None:
print('')
print('********** Begin Jinja Debug Output: Template Before Processing **********')
print('********** From {} **********'.format(docname))
reference_uri = directives.uri(os.path.join('source', template_filename))
template_path = urllib.url2pathname(reference_uri)
encoded_path = template_path.encode(sys.getfilesystemencoding())
imagerealpath = os.path.abspath(encoded_path)
with codecs.open(imagerealpath, encoding='utf-8') as f:
print(f.read())
print('********** End Jinja Debug Output: Template Before Processing **********')
print('')
tpl = Environment(
loader=FileSystemLoader(
self.app.config.jinja_base, followlinks=True)
).get_template(template_filename)
else:
if debug_template is not None:
print('')
print('********** Begin Jinja Debug Output: Template Before Processing **********')
print('********** From {} **********'.format(docname))
print('\n'.join(self.content))
print('********** End Jinja Debug Output: Template Before Processing **********')
print('')
tpl = Environment(
loader=FileSystemLoader(
self.app.config.jinja_base, followlinks=True)
).from_string('\n'.join(self.content))
new_content = tpl.render(**cxt)
if debug_template is not None:
print('')
print('********** Begin Jinja Debug Output: Template After Processing **********')
print(new_content)
print('********** End Jinja Debug Output: Template After Processing **********')
print('')
new_content = StringList(new_content.splitlines(), source='')
sphinx.util.nested_parse_with_titles(
self.state, new_content, node)
return node.children
def setup(app):
JinjaDirective.app = app
app.add_directive('jinja', JinjaDirective)
app.add_config_value('jinja_contexts', {}, 'env')
app.add_config_value('jinja_base', os.path.abspath('.'), 'env')
return {'parallel_read_safe': True, 'parallel_write_safe': True}
|
sciPyFoam
|
/sciPyFoam-0.4.1.tar.gz/sciPyFoam-0.4.1/docs/source/_extensions/jinja.py
|
jinja.py
|
import codecs
import os
import sys
import urllib
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.statemachine import StringList
from jinja2 import FileSystemLoader, Environment
import sphinx.util
class JinjaDirective(Directive):
has_content = True
optional_arguments = 1
option_spec = {
"file": directives.path,
"header_char": directives.unchanged,
"debug": directives.unchanged,
}
app = None
def run(self):
node = nodes.Element()
node.document = self.state.document
env = self.state.document.settings.env
docname = env.docname
template_filename = self.options.get("file")
debug_template = self.options.get("debug")
cxt = (self.app.config.jinja_contexts[self.arguments[0]].copy()
if self.arguments else {})
cxt["options"] = {
"header_char": self.options.get("header_char")
}
if template_filename:
if debug_template is not None:
print('')
print('********** Begin Jinja Debug Output: Template Before Processing **********')
print('********** From {} **********'.format(docname))
reference_uri = directives.uri(os.path.join('source', template_filename))
template_path = urllib.url2pathname(reference_uri)
encoded_path = template_path.encode(sys.getfilesystemencoding())
imagerealpath = os.path.abspath(encoded_path)
with codecs.open(imagerealpath, encoding='utf-8') as f:
print(f.read())
print('********** End Jinja Debug Output: Template Before Processing **********')
print('')
tpl = Environment(
loader=FileSystemLoader(
self.app.config.jinja_base, followlinks=True)
).get_template(template_filename)
else:
if debug_template is not None:
print('')
print('********** Begin Jinja Debug Output: Template Before Processing **********')
print('********** From {} **********'.format(docname))
print('\n'.join(self.content))
print('********** End Jinja Debug Output: Template Before Processing **********')
print('')
tpl = Environment(
loader=FileSystemLoader(
self.app.config.jinja_base, followlinks=True)
).from_string('\n'.join(self.content))
new_content = tpl.render(**cxt)
if debug_template is not None:
print('')
print('********** Begin Jinja Debug Output: Template After Processing **********')
print(new_content)
print('********** End Jinja Debug Output: Template After Processing **********')
print('')
new_content = StringList(new_content.splitlines(), source='')
sphinx.util.nested_parse_with_titles(
self.state, new_content, node)
return node.children
def setup(app):
JinjaDirective.app = app
app.add_directive('jinja', JinjaDirective)
app.add_config_value('jinja_contexts', {}, 'env')
app.add_config_value('jinja_base', os.path.abspath('.'), 'env')
return {'parallel_read_safe': True, 'parallel_write_safe': True}
| 0.226014 | 0.101589 |
from paraview.simple import *
import paraview as pv
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# get active source.
resultfoam = GetActiveSource()
# resultfoam.SkipZeroTime = 0
# check whether T exist
convert_T=False
alldata = pv.servermanager.Fetch(resultfoam)
if(alldata.GetBlock(0).GetPointData().GetArray("T")==None):
convert_T=False
else:
convert_T=True
renderView1 = GetActiveViewOrCreate('RenderView')
if(convert_T):
# create a new 'Calculator'
calculator1 = Calculator(Input=resultfoam)
calculator1.Function = 'T-273.15'
calculator1.ResultArrayName = 'T_degC'
RenameSource('K2degC', calculator1)
# SetActiveSource(calculator1)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
resultfoamDisplay = Show(GetActiveSource(), renderView1)
# get color transfer function/color map for 'p'
pLUT = GetColorTransferFunction('T_degC')
# get opacity transfer function/opacity map for 'p'
pPWF = GetOpacityTransferFunction('T_degC')
# trace defaults for the display properties.
resultfoamDisplay.Representation = 'Surface'
# reset view to fit data
renderView1.ResetCamera()
# show color bar/color legend
resultfoamDisplay.SetScalarBarVisibility(renderView1, True)
# update the view to ensure updated data information
renderView1.Update()
# set scalar coloring
ColorBy(resultfoamDisplay, ('POINTS', 'T_degC'))
# Hide the scalar bar for this color map if no visible data is colored by it.
HideScalarBarIfNotNeeded(pLUT, renderView1)
# rescale color and/or opacity maps used to include current data range
resultfoamDisplay.RescaleTransferFunctionToDataRange(True, False)
# show color bar/color legend
resultfoamDisplay.SetScalarBarVisibility(renderView1, True)
tsteps = resultfoam.TimestepValues
name_time='Time_second'
if(len(tsteps)>1):
# create a new 'Annotate Time Filter'
annotateTimeFilter1 = AnnotateTimeFilter(Input=resultfoam)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
annotateTimeFilter1Display = Show(annotateTimeFilter1, renderView1)
# update the view to ensure updated data information
renderView1.Update()
# Properties modified on annotateTimeFilter1
dt=(tsteps[-1]-tsteps[0])/(len(tsteps)-1)
if(dt>(86400*365)):
annotateTimeFilter1.Format = 'Time: %.0f years'
annotateTimeFilter1.Scale = 3.17e-08
name_time='Time_year'
elif(dt>86400):
annotateTimeFilter1.Format = 'Time: %.0f days'
annotateTimeFilter1.Scale = 1.1574074074074073e-05
name_time='Time_day'
elif(dt>3600):
annotateTimeFilter1.Format = 'Time: %.0f hours'
annotateTimeFilter1.Scale = 0.0002777777777777778
name_time='Time_hour'
elif(dt>60):
annotateTimeFilter1.Format = 'Time: %.0f minutes'
annotateTimeFilter1.Scale = 0.016666666666666666
name_time='Time_minute'
else:
annotateTimeFilter1.Format = 'Time: %.2f seconds'
annotateTimeFilter1.Scale = 1
name_time='Time_second'
# Properties modified on annotateTimeFilter1Display
annotateTimeFilter1Display.Bold = 1
annotateTimeFilter1Display.FontSize = 5
# update the view to ensure updated data information
renderView1.Update()
# rename source object
RenameSource(name_time, annotateTimeFilter1)
# set active source
if(convert_T):
SetActiveSource(calculator1)
renderView1.ResetCamera()
# current camera placement for renderView1
renderView1.CameraPosition = [2000.0, -3000.0, 7965.728650875111]
renderView1.CameraFocalPoint = [2000.0, -3000.0, 0.5]
renderView1.CameraParallelScale = 2061.5528734427357
# #### uncomment the following to render all views
# # RenderAllViews()
# # alternatively, if you want to write images, you can use SaveScreenshot(...).
renderView1.Update()
Hide(resultfoam, renderView1)
|
sciPyFoam
|
/sciPyFoam-0.4.1.tar.gz/sciPyFoam-0.4.1/example/cases/blockMesh/showTimeYear.py
|
showTimeYear.py
|
from paraview.simple import *
import paraview as pv
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# get active source.
resultfoam = GetActiveSource()
# resultfoam.SkipZeroTime = 0
# check whether T exist
convert_T=False
alldata = pv.servermanager.Fetch(resultfoam)
if(alldata.GetBlock(0).GetPointData().GetArray("T")==None):
convert_T=False
else:
convert_T=True
renderView1 = GetActiveViewOrCreate('RenderView')
if(convert_T):
# create a new 'Calculator'
calculator1 = Calculator(Input=resultfoam)
calculator1.Function = 'T-273.15'
calculator1.ResultArrayName = 'T_degC'
RenameSource('K2degC', calculator1)
# SetActiveSource(calculator1)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
resultfoamDisplay = Show(GetActiveSource(), renderView1)
# get color transfer function/color map for 'p'
pLUT = GetColorTransferFunction('T_degC')
# get opacity transfer function/opacity map for 'p'
pPWF = GetOpacityTransferFunction('T_degC')
# trace defaults for the display properties.
resultfoamDisplay.Representation = 'Surface'
# reset view to fit data
renderView1.ResetCamera()
# show color bar/color legend
resultfoamDisplay.SetScalarBarVisibility(renderView1, True)
# update the view to ensure updated data information
renderView1.Update()
# set scalar coloring
ColorBy(resultfoamDisplay, ('POINTS', 'T_degC'))
# Hide the scalar bar for this color map if no visible data is colored by it.
HideScalarBarIfNotNeeded(pLUT, renderView1)
# rescale color and/or opacity maps used to include current data range
resultfoamDisplay.RescaleTransferFunctionToDataRange(True, False)
# show color bar/color legend
resultfoamDisplay.SetScalarBarVisibility(renderView1, True)
tsteps = resultfoam.TimestepValues
name_time='Time_second'
if(len(tsteps)>1):
# create a new 'Annotate Time Filter'
annotateTimeFilter1 = AnnotateTimeFilter(Input=resultfoam)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
annotateTimeFilter1Display = Show(annotateTimeFilter1, renderView1)
# update the view to ensure updated data information
renderView1.Update()
# Properties modified on annotateTimeFilter1
dt=(tsteps[-1]-tsteps[0])/(len(tsteps)-1)
if(dt>(86400*365)):
annotateTimeFilter1.Format = 'Time: %.0f years'
annotateTimeFilter1.Scale = 3.17e-08
name_time='Time_year'
elif(dt>86400):
annotateTimeFilter1.Format = 'Time: %.0f days'
annotateTimeFilter1.Scale = 1.1574074074074073e-05
name_time='Time_day'
elif(dt>3600):
annotateTimeFilter1.Format = 'Time: %.0f hours'
annotateTimeFilter1.Scale = 0.0002777777777777778
name_time='Time_hour'
elif(dt>60):
annotateTimeFilter1.Format = 'Time: %.0f minutes'
annotateTimeFilter1.Scale = 0.016666666666666666
name_time='Time_minute'
else:
annotateTimeFilter1.Format = 'Time: %.2f seconds'
annotateTimeFilter1.Scale = 1
name_time='Time_second'
# Properties modified on annotateTimeFilter1Display
annotateTimeFilter1Display.Bold = 1
annotateTimeFilter1Display.FontSize = 5
# update the view to ensure updated data information
renderView1.Update()
# rename source object
RenameSource(name_time, annotateTimeFilter1)
# set active source
if(convert_T):
SetActiveSource(calculator1)
renderView1.ResetCamera()
# current camera placement for renderView1
renderView1.CameraPosition = [2000.0, -3000.0, 7965.728650875111]
renderView1.CameraFocalPoint = [2000.0, -3000.0, 0.5]
renderView1.CameraParallelScale = 2061.5528734427357
# #### uncomment the following to render all views
# # RenderAllViews()
# # alternatively, if you want to write images, you can use SaveScreenshot(...).
renderView1.Update()
Hide(resultfoam, renderView1)
| 0.559049 | 0.399812 |
import logging.handlers
from datetime import datetime
from os import listdir
from pathlib import Path
class TimedRotatingFileHandler(logging.handlers.TimedRotatingFileHandler):
def __init__(self,
filename,
when='midnight',
interval=1,
backup_count=90,
encoding=None,
delay=False,
utc=False,
at_time=None):
self.file = Path(filename)
self.directory = self.file.parent
self.directory.mkdir(parents=True, exist_ok=True)
kwargs = {
'when': when,
'interval': interval,
'backupCount': backup_count,
'encoding': encoding,
'delay': delay,
'utc': utc,
'atTime': at_time
}
super().__init__(filename, **kwargs)
self.namer = self._namer
# Add references
self.baseFilename = self.__getattribute__('baseFilename')
self.suffix = self.__getattribute__('suffix')
self.extMatch = self.__getattribute__('extMatch')
self.backupCount = self.__getattribute__('backupCount')
self.__setattr__('getFilesToDelete', self._get_files_to_delete)
def _namer(self, default):
"""
Define a custom name of old files
:param default: Used by superclass. It contains last modification time (str)
:return: new filename (str)
"""
fmt = self.suffix
dtstr = default[len(self.baseFilename + '.'):]
dt = datetime.strptime(dtstr, self.suffix)
return self.directory / dt.strftime(f'{fmt}{self.file.suffix}')
def _get_files_to_delete(self):
"""
Override method of superclass because there is a custom namer function
:return: list of files to delete
"""
result = []
for file in listdir(self.directory):
if self.extMatch.match(file):
result.append(self.directory / file)
if len(result) >= self.backupCount:
return sorted(result)[:len(result) - self.backupCount]
return []
|
scia
|
/handlers/timedRotatingFileHandler.py
|
timedRotatingFileHandler.py
|
import logging.handlers
from datetime import datetime
from os import listdir
from pathlib import Path
class TimedRotatingFileHandler(logging.handlers.TimedRotatingFileHandler):
def __init__(self,
filename,
when='midnight',
interval=1,
backup_count=90,
encoding=None,
delay=False,
utc=False,
at_time=None):
self.file = Path(filename)
self.directory = self.file.parent
self.directory.mkdir(parents=True, exist_ok=True)
kwargs = {
'when': when,
'interval': interval,
'backupCount': backup_count,
'encoding': encoding,
'delay': delay,
'utc': utc,
'atTime': at_time
}
super().__init__(filename, **kwargs)
self.namer = self._namer
# Add references
self.baseFilename = self.__getattribute__('baseFilename')
self.suffix = self.__getattribute__('suffix')
self.extMatch = self.__getattribute__('extMatch')
self.backupCount = self.__getattribute__('backupCount')
self.__setattr__('getFilesToDelete', self._get_files_to_delete)
def _namer(self, default):
"""
Define a custom name of old files
:param default: Used by superclass. It contains last modification time (str)
:return: new filename (str)
"""
fmt = self.suffix
dtstr = default[len(self.baseFilename + '.'):]
dt = datetime.strptime(dtstr, self.suffix)
return self.directory / dt.strftime(f'{fmt}{self.file.suffix}')
def _get_files_to_delete(self):
"""
Override method of superclass because there is a custom namer function
:return: list of files to delete
"""
result = []
for file in listdir(self.directory):
if self.extMatch.match(file):
result.append(self.directory / file)
if len(result) >= self.backupCount:
return sorted(result)[:len(result) - self.backupCount]
return []
| 0.529263 | 0.060836 |
Changelog
=========
v0.0.8 (2022-10-21)
-------------------
### New
- `pip` package available from <pypi.org>: <https://pypi.org/project/sciapy/>
### Changes
- Regression proxy model interface and tests moved to its own package
`regressproxy` <https://regressproxy.readthedocs.io>
- Fixes `numpy` v1.23 compatibility by using `.item()` instead of `np.asscalar()`
v0.0.7 (2022-04-04)
-------------------
### New
- CI support for Python 3.8, 3.9, and 3.10
### Changes
- Fixed and updated tests to increase code coverage
- Updated AE index and Lyman-alpha data files
- Updated docs
- Uses Github actions for CI and CD
- Removed Python 3.4 from CI setup, support status unclear
- Code style is more `black`-like now
v0.0.6 (2020-02-09)
-------------------
### New
- Documentation on `readthedocs` <https://sciapy.readthedocs.io>
with example notebooks
- Extensive MCMC sampler statistics
### Changes
- The local MSIS module has been extracted to its own package
called `pynrlmsise00` <https://github.com/st-bender/pynrlmsise00>
- Increased test coverage
v0.0.5 (2018-08-21)
-------------------
### New
- Enables the proxies to be scaled by cos(SZA)
- Enables the data to be split into (optionally randomized) training and test sets
- Continuous integration with https://travis-ci.org on https://travis-ci.org/st-bender/sciapy
- Includes tests, far from complete yet
- Installing with `pip`
### Other changes
- Code clean up and resource handling
v0.0.4 (2018-08-12)
-------------------
First official alpha release.
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/CHANGES.md
|
CHANGES.md
|
Changelog
=========
v0.0.8 (2022-10-21)
-------------------
### New
- `pip` package available from <pypi.org>: <https://pypi.org/project/sciapy/>
### Changes
- Regression proxy model interface and tests moved to its own package
`regressproxy` <https://regressproxy.readthedocs.io>
- Fixes `numpy` v1.23 compatibility by using `.item()` instead of `np.asscalar()`
v0.0.7 (2022-04-04)
-------------------
### New
- CI support for Python 3.8, 3.9, and 3.10
### Changes
- Fixed and updated tests to increase code coverage
- Updated AE index and Lyman-alpha data files
- Updated docs
- Uses Github actions for CI and CD
- Removed Python 3.4 from CI setup, support status unclear
- Code style is more `black`-like now
v0.0.6 (2020-02-09)
-------------------
### New
- Documentation on `readthedocs` <https://sciapy.readthedocs.io>
with example notebooks
- Extensive MCMC sampler statistics
### Changes
- The local MSIS module has been extracted to its own package
called `pynrlmsise00` <https://github.com/st-bender/pynrlmsise00>
- Increased test coverage
v0.0.5 (2018-08-21)
-------------------
### New
- Enables the proxies to be scaled by cos(SZA)
- Enables the data to be split into (optionally randomized) training and test sets
- Continuous integration with https://travis-ci.org on https://travis-ci.org/st-bender/sciapy
- Includes tests, far from complete yet
- Installing with `pip`
### Other changes
- Code clean up and resource handling
v0.0.4 (2018-08-12)
-------------------
First official alpha release.
| 0.883047 | 0.449936 |
# SCIAMACHY data tools
[](https://github.com/st-bender/sciapy/actions/workflows/ci_build_and_test.yml)
[](https://sciapy.rtfd.io/en/latest/?badge=latest)
[](https://coveralls.io/github/st-bender/sciapy)
[](https://scrutinizer-ci.com/g/st-bender/sciapy/?branch=master)
[](https://doi.org/10.5281/zenodo.1401370)
[](https://doi.org/10.5281/zenodo.1342701)
## Overview
These SCIAMACHY tools are provided as convenience tools for handling
SCIAMACHY level 1c limb spectra and retrieved level 2 trace-gas densities.
More extensive documentation is provided on [sciapy.rtfd.io](https://sciapy.rtfd.io).
### Level 1c tools
The `sciapy.level1c` submodule provides a few
[conversion tools](sciapy/level1c/README.md) for [SCIAMACHY](http://www.sciamachy.org)
level 1c calibrated spectra, to be used as input for trace gas retrieval with
[scia\_retrieval\_2d](https://github.com/st-bender/scia_retrieval_2d).
**Note that this is *not* a level 1b to level 1c calibration tool.**
For calibrating level 1b spectra (for example SCI\_NL\_\_1P version 8.02
provided by ESA via the
[ESA data browser](https://earth.esa.int/web/guest/data-access/browse-data-products))
to level 1c spectra, use the
[SciaL1C](https://earth.esa.int/web/guest/software-tools/content/-/article/scial1c-command-line-tool-4073)
command line tool or the free software
[nadc\_tools](https://github.com/rmvanhees/nadc_tools).
The first produces `.child` files, the second can output to HDF5 (`.h5`).
**Further note**: `.child` files are currently not supported.
### Level 2 tools
The `sciapy.level2` submodule provides
post-processing tools for trace-gas densities retrieved from SCIAMACHY limb scans.
Support simple operations as combining files into *netcdf*, calculating and noting
local solar time at the retrieval grid points, geomagnetic latitudes, etc.
The level 2 tools also include a simple binning algorithm.
### Regression
The `sciapy.regress` submodule can be used for regression analysis of SCIAMACHY
level 2 trace gas density time series, either directly or as daily zonal means.
It uses the [`regressproxy`](https://regressproxy.readthedocs.io) package
for modelling the proxy input with lag and lifetime decay.
The regression tools support various parameter fitting methods using
[`scipy.optimize`](https://docs.scipy.org/doc/scipy/reference/optimize.html)
and uncertainty evaluation using Markov-Chain Monte-Carlo sampling with
[`emcee`](https://emcee.readthedocs.io).
Further supports covariance modelling via
[`celerite`](https://celerite.readthedocs.io)
and [`george`](https://george.readthedocs.io).
## Install
### Prerequisites
Sciapy uses features from a lot of different packages.
All dependencies will be automatically installed when using
`pip install` or `python setup.py`, see below.
However, to speed up the install or for use
within a `conda` environment, it may be advantageous to
install some of the important packages beforehand:
- `numpy` at least version 1.13.0 for general numerics,
- `scipy` at least version 0.17.0 for scientific numerics,
- `matplotlib` at least version 2.2 for plotting,
- `netCDF4` for the low level netcdf4 interfaces,
- `h5py` for the low level hdf5 interfaces,
- `dask`,
- `toolz`,
- `pandas` and
- `xarray` for the higher level data interfaces,
- `astropy` for (astronomical) time conversions,
- `parse` for ASCII text parsing in `level1c`,
- `pybind11` C++ interface needed by `celerite`
- `celerite` at least version 0.3.0 and
- `george` for Gaussian process modelling,
- `emcee` for MCMC sampling and
- `corner` for the sample histogram plots,
- `regressproxy` for the regression proxy modelling.
Out of these packages, `numpy` is probably the most important one
to be installed first because at least `celerite` needs it for setup.
It may also be a good idea to install
[`pybind11`](https://pybind11.readthedocs.io)
because both `celerite` and `george` use its interface,
and both may fail to install without `pybind11`.
Depending on the setup, `numpy` and `pybind11` can be installed
via `pip`:
```sh
pip install numpy pybind11
```
or [`conda`](https://conda.io):
```sh
conda install numpy pybind11
```
### sciapy
Official releases are available as `pip` packages from the main package repository,
to be found at <https://pypi.org/project/sciapy/>, and which can be installed with:
```sh
$ pip install sciapy
```
The latest development version of
sciapy can be installed with [`pip`](https://pip.pypa.io) directly
from github (see <https://pip.pypa.io/en/stable/reference/pip_install/#vcs-support>
and <https://pip.pypa.io/en/stable/reference/pip_install/#git>):
```sh
$ pip install [-e] git+https://github.com/st-bender/sciapy.git
```
The other option is to use a local clone:
```sh
$ git clone https://github.com/st-bender/sciapy.git
$ cd sciapy
```
and then using `pip` (optionally using `-e`, see
<https://pip.pypa.io/en/stable/reference/pip_install/#install-editable>):
```sh
$ pip install [-e] .
```
or using `setup.py`:
```sh
$ python setup.py install
```
## Usage
The whole module as well as the individual submodules can be loaded as usual:
```python
>>> import sciapy
>>> import sciapy.level1c
>>> import sciapy.level2
>>> import sciapy.regress
```
Basic class and method documentation is accessible via `pydoc`:
```sh
$ pydoc sciapy
```
The submodules' documentation can be accessed with `pydoc` as well:
```sh
$ pydoc sciapy.level1c
$ pydoc sciapy.level2
$ pydoc sciapy.regress
```
## License
This python package is free software: you can redistribute it or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 (GPLv2), see [local copy](./LICENSE)
or [online version](http://www.gnu.org/licenses/gpl-2.0.html).
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/README.md
|
README.md
|
pip install numpy pybind11
conda install numpy pybind11
$ pip install sciapy
$ pip install [-e] git+https://github.com/st-bender/sciapy.git
$ git clone https://github.com/st-bender/sciapy.git
$ cd sciapy
$ pip install [-e] .
$ python setup.py install
>>> import sciapy
>>> import sciapy.level1c
>>> import sciapy.level2
>>> import sciapy.regress
$ pydoc sciapy
$ pydoc sciapy.level1c
$ pydoc sciapy.level2
$ pydoc sciapy.regress
| 0.532668 | 0.957477 |
Changelog
=========
v0.0.8 (2022-10-21)
-------------------
### New
- `pip` package available from <pypi.org>: <https://pypi.org/project/sciapy/>
### Changes
- Regression proxy model interface and tests moved to its own package
`regressproxy` <https://regressproxy.readthedocs.io>
- Fixes `numpy` v1.23 compatibility by using `.item()` instead of `np.asscalar()`
v0.0.7 (2022-04-04)
-------------------
### New
- CI support for Python 3.8, 3.9, and 3.10
### Changes
- Fixed and updated tests to increase code coverage
- Updated AE index and Lyman-alpha data files
- Updated docs
- Uses Github actions for CI and CD
- Removed Python 3.4 from CI setup, support status unclear
- Code style is more `black`-like now
v0.0.6 (2020-02-09)
-------------------
### New
- Documentation on `readthedocs` <https://sciapy.readthedocs.io>
with example notebooks
- Extensive MCMC sampler statistics
### Changes
- The local MSIS module has been extracted to its own package
called `pynrlmsise00` <https://github.com/st-bender/pynrlmsise00>
- Increased test coverage
v0.0.5 (2018-08-21)
-------------------
### New
- Enables the proxies to be scaled by cos(SZA)
- Enables the data to be split into (optionally randomized) training and test sets
- Continuous integration with https://travis-ci.org on https://travis-ci.org/st-bender/sciapy
- Includes tests, far from complete yet
- Installing with `pip`
### Other changes
- Code clean up and resource handling
v0.0.4 (2018-08-12)
-------------------
First official alpha release.
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/docs/CHANGES.md
|
CHANGES.md
|
Changelog
=========
v0.0.8 (2022-10-21)
-------------------
### New
- `pip` package available from <pypi.org>: <https://pypi.org/project/sciapy/>
### Changes
- Regression proxy model interface and tests moved to its own package
`regressproxy` <https://regressproxy.readthedocs.io>
- Fixes `numpy` v1.23 compatibility by using `.item()` instead of `np.asscalar()`
v0.0.7 (2022-04-04)
-------------------
### New
- CI support for Python 3.8, 3.9, and 3.10
### Changes
- Fixed and updated tests to increase code coverage
- Updated AE index and Lyman-alpha data files
- Updated docs
- Uses Github actions for CI and CD
- Removed Python 3.4 from CI setup, support status unclear
- Code style is more `black`-like now
v0.0.6 (2020-02-09)
-------------------
### New
- Documentation on `readthedocs` <https://sciapy.readthedocs.io>
with example notebooks
- Extensive MCMC sampler statistics
### Changes
- The local MSIS module has been extracted to its own package
called `pynrlmsise00` <https://github.com/st-bender/pynrlmsise00>
- Increased test coverage
v0.0.5 (2018-08-21)
-------------------
### New
- Enables the proxies to be scaled by cos(SZA)
- Enables the data to be split into (optionally randomized) training and test sets
- Continuous integration with https://travis-ci.org on https://travis-ci.org/st-bender/sciapy
- Includes tests, far from complete yet
- Installing with `pip`
### Other changes
- Code clean up and resource handling
v0.0.4 (2018-08-12)
-------------------
First official alpha release.
| 0.883047 | 0.449936 |
.. sciapy documentation master file, created by
sphinx-quickstart on Wed Mar 21 21:56:58 2018.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
sciapy
======
SCIAMACHY level 1c, level 2 data tools and regression modelling.
The source code is `developed on Github <https://github.com/st-bender/sciapy>`_.
.. image:: https://github.com/st-bender/sciapy/actions/workflows/ci_build_and_test.yml/badge.svg?branch=master
:target: https://github.com/st-bender/sciapy/actions/workflows/ci_build_and_test.yml
:alt: builds
.. image:: https://readthedocs.org/projects/sciapy/badge/?version=latest
:target: https://sciapy.readthedocs.io/en/latest/?badge=latest
:alt: docs
.. image:: https://coveralls.io/repos/github/st-bender/sciapy/badge.svg
:target: https://coveralls.io/github/st-bender/sciapy
:alt: coveralls
.. image:: https://scrutinizer-ci.com/g/st-bender/sciapy/badges/quality-score.png?b=master
:target: https://scrutinizer-ci.com/g/st-bender/sciapy/?branch=master
:alt: scrutinizer
.. raw:: html
<br />
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.1401370.svg
:target: https://doi.org/10.5281/zenodo.1401370
:alt: doi code
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.1342701.svg
:target: https://doi.org/10.5281/zenodo.1342701
:alt: doi mcmc samples
.. toctree::
:maxdepth: 2
:caption: Introduction
README
.. toctree::
:maxdepth: 1
:caption: Tutorials
tutorials/level2_binning
tutorials/regress_intro
tutorials/regress_model_fit
.. toctree::
:maxdepth: 2
:caption: Reference
reference/index
CHANGES
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/docs/index.rst
|
index.rst
|
.. sciapy documentation master file, created by
sphinx-quickstart on Wed Mar 21 21:56:58 2018.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
sciapy
======
SCIAMACHY level 1c, level 2 data tools and regression modelling.
The source code is `developed on Github <https://github.com/st-bender/sciapy>`_.
.. image:: https://github.com/st-bender/sciapy/actions/workflows/ci_build_and_test.yml/badge.svg?branch=master
:target: https://github.com/st-bender/sciapy/actions/workflows/ci_build_and_test.yml
:alt: builds
.. image:: https://readthedocs.org/projects/sciapy/badge/?version=latest
:target: https://sciapy.readthedocs.io/en/latest/?badge=latest
:alt: docs
.. image:: https://coveralls.io/repos/github/st-bender/sciapy/badge.svg
:target: https://coveralls.io/github/st-bender/sciapy
:alt: coveralls
.. image:: https://scrutinizer-ci.com/g/st-bender/sciapy/badges/quality-score.png?b=master
:target: https://scrutinizer-ci.com/g/st-bender/sciapy/?branch=master
:alt: scrutinizer
.. raw:: html
<br />
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.1401370.svg
:target: https://doi.org/10.5281/zenodo.1401370
:alt: doi code
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.1342701.svg
:target: https://doi.org/10.5281/zenodo.1342701
:alt: doi mcmc samples
.. toctree::
:maxdepth: 2
:caption: Introduction
README
.. toctree::
:maxdepth: 1
:caption: Tutorials
tutorials/level2_binning
tutorials/regress_intro
tutorials/regress_model_fit
.. toctree::
:maxdepth: 2
:caption: Reference
reference/index
CHANGES
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 0.624752 | 0.442697 |
# SCIAMACHY data tools
[](https://github.com/st-bender/sciapy/actions/workflows/ci_build_and_test.yml)
[](https://sciapy.rtfd.io/en/latest/?badge=latest)
[](https://coveralls.io/github/st-bender/sciapy)
[](https://scrutinizer-ci.com/g/st-bender/sciapy/?branch=master)
[](https://doi.org/10.5281/zenodo.1401370)
[](https://doi.org/10.5281/zenodo.1342701)
## Overview
These SCIAMACHY tools are provided as convenience tools for handling
SCIAMACHY level 1c limb spectra and retrieved level 2 trace-gas densities.
More extensive documentation is provided on [sciapy.rtfd.io](https://sciapy.rtfd.io).
### Level 1c tools
The `sciapy.level1c` submodule provides a few
[conversion tools](sciapy/level1c/README.md) for [SCIAMACHY](http://www.sciamachy.org)
level 1c calibrated spectra, to be used as input for trace gas retrieval with
[scia\_retrieval\_2d](https://github.com/st-bender/scia_retrieval_2d).
**Note that this is *not* a level 1b to level 1c calibration tool.**
For calibrating level 1b spectra (for example SCI\_NL\_\_1P version 8.02
provided by ESA via the
[ESA data browser](https://earth.esa.int/web/guest/data-access/browse-data-products))
to level 1c spectra, use the
[SciaL1C](https://earth.esa.int/web/guest/software-tools/content/-/article/scial1c-command-line-tool-4073)
command line tool or the free software
[nadc\_tools](https://github.com/rmvanhees/nadc_tools).
The first produces `.child` files, the second can output to HDF5 (`.h5`).
**Further note**: `.child` files are currently not supported.
### Level 2 tools
The `sciapy.level2` submodule provides
post-processing tools for trace-gas densities retrieved from SCIAMACHY limb scans.
Support simple operations as combining files into *netcdf*, calculating and noting
local solar time at the retrieval grid points, geomagnetic latitudes, etc.
The level 2 tools also include a simple binning algorithm.
### Regression
The `sciapy.regress` submodule can be used for regression analysis of SCIAMACHY
level 2 trace gas density time series, either directly or as daily zonal means.
It uses the [`regressproxy`](https://regressproxy.readthedocs.io) package
for modelling the proxy input with lag and lifetime decay.
The regression tools support various parameter fitting methods using
[`scipy.optimize`](https://docs.scipy.org/doc/scipy/reference/optimize.html)
and uncertainty evaluation using Markov-Chain Monte-Carlo sampling with
[`emcee`](https://emcee.readthedocs.io).
Further supports covariance modelling via
[`celerite`](https://celerite.readthedocs.io)
and [`george`](https://george.readthedocs.io).
## Install
### Prerequisites
Sciapy uses features from a lot of different packages.
All dependencies will be automatically installed when using
`pip install` or `python setup.py`, see below.
However, to speed up the install or for use
within a `conda` environment, it may be advantageous to
install some of the important packages beforehand:
- `numpy` at least version 1.13.0 for general numerics,
- `scipy` at least version 0.17.0 for scientific numerics,
- `matplotlib` at least version 2.2 for plotting,
- `netCDF4` for the low level netcdf4 interfaces,
- `h5py` for the low level hdf5 interfaces,
- `dask`,
- `toolz`,
- `pandas` and
- `xarray` for the higher level data interfaces,
- `astropy` for (astronomical) time conversions,
- `parse` for ASCII text parsing in `level1c`,
- `pybind11` C++ interface needed by `celerite`
- `celerite` at least version 0.3.0 and
- `george` for Gaussian process modelling,
- `emcee` for MCMC sampling and
- `corner` for the sample histogram plots,
- `regressproxy` for the regression proxy modelling.
Out of these packages, `numpy` is probably the most important one
to be installed first because at least `celerite` needs it for setup.
It may also be a good idea to install
[`pybind11`](https://pybind11.readthedocs.io)
because both `celerite` and `george` use its interface,
and both may fail to install without `pybind11`.
Depending on the setup, `numpy` and `pybind11` can be installed
via `pip`:
```sh
pip install numpy pybind11
```
or [`conda`](https://conda.io):
```sh
conda install numpy pybind11
```
### sciapy
Official releases are available as `pip` packages from the main package repository,
to be found at <https://pypi.org/project/sciapy/>, and which can be installed with:
```sh
$ pip install sciapy
```
The latest development version of
sciapy can be installed with [`pip`](https://pip.pypa.io) directly
from github (see <https://pip.pypa.io/en/stable/reference/pip_install/#vcs-support>
and <https://pip.pypa.io/en/stable/reference/pip_install/#git>):
```sh
$ pip install [-e] git+https://github.com/st-bender/sciapy.git
```
The other option is to use a local clone:
```sh
$ git clone https://github.com/st-bender/sciapy.git
$ cd sciapy
```
and then using `pip` (optionally using `-e`, see
<https://pip.pypa.io/en/stable/reference/pip_install/#install-editable>):
```sh
$ pip install [-e] .
```
or using `setup.py`:
```sh
$ python setup.py install
```
## Usage
The whole module as well as the individual submodules can be loaded as usual:
```python
>>> import sciapy
>>> import sciapy.level1c
>>> import sciapy.level2
>>> import sciapy.regress
```
Basic class and method documentation is accessible via `pydoc`:
```sh
$ pydoc sciapy
```
The submodules' documentation can be accessed with `pydoc` as well:
```sh
$ pydoc sciapy.level1c
$ pydoc sciapy.level2
$ pydoc sciapy.regress
```
## License
This python package is free software: you can redistribute it or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 (GPLv2), see [local copy](./LICENSE)
or [online version](http://www.gnu.org/licenses/gpl-2.0.html).
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/docs/README.md
|
README.md
|
pip install numpy pybind11
conda install numpy pybind11
$ pip install sciapy
$ pip install [-e] git+https://github.com/st-bender/sciapy.git
$ git clone https://github.com/st-bender/sciapy.git
$ cd sciapy
$ pip install [-e] .
$ python setup.py install
>>> import sciapy
>>> import sciapy.level1c
>>> import sciapy.level2
>>> import sciapy.regress
$ pydoc sciapy
$ pydoc sciapy.level1c
$ pydoc sciapy.level2
$ pydoc sciapy.regress
| 0.532668 | 0.957477 |
# Regression model intro
## Standard imports
First, setup some standard modules and matplotlib.
```
%matplotlib inline
%config InlineBackend.figure_format = 'png'
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
```
Load the main `sciapy` module and its wrappers for easy access to the used proxy timeseries.
```
import regressproxy
import sciapy
from sciapy.regress.load_data import load_dailymeanAE, load_dailymeanLya
plt.rcParams["figure.dpi"] = 120
```
## Model interface
The model is set up part by part, beginning with the more involved proxy models.
### Lyman-$\alpha$ proxy
We start with the Lyman-$\alpha$ proxy, it is not centered (mean-subtracted) and we set the rest of the parameters except `ltscan` to zero.
```
# load proxy data
plat, plap = load_dailymeanLya()
# setup the model
lya_model = regressproxy.ProxyModel(plat,
plap["Lya"],
center=False,
amp=0,
lag=0,
tau0=0,
taucos1=0, tausin1=0,
taucos2=0, tausin2=0,
ltscan=60)
```
### AE proxy with lifetime
The AE proxy is also not centered and we start with the same parameters as above.
```
# load proxy data
paet, paep = load_dailymeanAE()
# setup the model
ae_model = regressproxy.ProxyModel(paet,
paep["AE"],
center=False,
amp=0,
lag=0,
tau0=0,
taucos1=0, tausin1=0,
taucos2=0, tausin2=0,
ltscan=60)
```
### Offset
We use the `ConstantModel` (inherited from `celerite`) for the constant offset.
```
offset_model = regressproxy.ConstantModel(value=0.)
```
### Optional harmonic terms
The harmonic terms are not used here but we include them to show how to set them up.
```
harm1 = regressproxy.HarmonicModelCosineSine(freq=1, cos=0, sin=0)
harm2 = regressproxy.HarmonicModelCosineSine(freq=2, cos=0, sin=0)
# frequencies should not be fitted
harm1.freeze_parameter("freq")
harm2.freeze_parameter("freq")
```
### Combined model
We then combine the separate models into a `ModelSet`.
```
model = regressproxy.ProxyModelSet([("offset", offset_model),
("Lya", lya_model), ("GM", ae_model),
("f1", harm1), ("f2", harm2)])
```
The full model has the following parameters:
```
model.get_parameter_dict()
```
But we don't need all of them, so we freeze all parameters and thaw the ones we need.
This is easier than the other way around (freezing all unused parameters).
```
model.freeze_all_parameters()
model.thaw_parameter("offset:value")
model.thaw_parameter("Lya:amp")
model.thaw_parameter("GM:amp")
model.thaw_parameter("GM:tau0")
model.thaw_parameter("GM:taucos1")
model.thaw_parameter("GM:tausin1")
```
Cross check that only the used parameters are really active:
```
model.get_parameter_dict()
```
## Model parameters
### Manually setting the parameters
Now we set the model parameters to something non-trivial, with the same order as listed above:
```
model.set_parameter_vector([-25.6, 6.26, 0.0874, 1.54, 10.52, -0.714])
model.get_parameter_dict()
```
With the parameters properly set, we can now "predict" the density for any time we wish.
Here we take 25 years half-daily:
```
times = np.arange(1992, 2017.01, 0.5 / 365.25)
pred = model.get_value(times)
```
and then plot the result:
```
plt.plot(times, pred, label="model")
plt.xlabel("time [Julian epoch]")
# The data were scaled by 10^-6 before fitting
plt.ylabel("NO number density [10$^6$ cm$^{{-3}}$]")
plt.legend();
```
### Setting the parameters from file
Instead of making up some numbers for the parameters, we can take "real" ones.
We use the ones determined by fitting the model to actual data,
in this case SCIAMACHY nitric oxide daily zonal mean data.
We connect to zenodo and load the contents into memory.
It's a rather small file so that should be no problem, but we need the requests and netCDF4 modules for that.
The alternative would be to download a copy into the same folder as this notebook.
```
import requests
import netCDF4
def load_data_store(store, variables=None):
with xr.open_dataset(store, chunks={"lat": 9, "alt": 8}) as data_ds:
if variables is not None:
data_ds = data_ds[variables]
data_ds.load()
return data_ds
def load_data_url(url, variables=None):
with requests.get(url, stream=True) as response:
nc4_ds = netCDF4.Dataset("data", memory=response.content)
store = xr.backends.NetCDF4DataStore(nc4_ds)
return load_data_store(store, variables)
zenodo_url = "https://zenodo.org/record/1342701/files/NO_regress_quantiles_pGM_Lya_ltcs_exp1dscan60d_km32.nc"
# If you downloaded a copy, use load_data_store()
# and replace the url by "/path/to/<filename.nc>"
quants = load_data_url(zenodo_url)
```
The data file contains the median together with the (0.16, 0.84), (0.025, 0.975),
and (0.001, 0.999) quantiles corresponding to the 1$\sigma$, 2$\sigma$, and 3$\sigma$ confidence regions.
In particular, the contents of the quantiles dataset are:
```
quants
```
The dimensions of the available parameters are:
```
quants.lat, quants.alt
```
We loop over the parameter names and set the parameters to the median values (`quantile=0.5`)
for the selected altitude and latitude bin.
The variables in the quantiles file were created using [celerite](https://github.com/dfm/celerite)
which prepends "mean:" to the variables from the mean model.
```
# select latitude and altitude first
latitude = 65
altitude = 70
for v in model.get_parameter_names():
model.set_parameter(v, quants["mean:{0}".format(v)].sel(alt=altitude, lat=latitude, quantile=0.5))
```
The parameters from the file are (actually pretty close to the ones above):
```
model.get_parameter_dict()
```
We take the same times as above (25 years half-daily) to predict the model values:
```
pred = model.get_value(times)
```
and then plot the result again:
```
plt.plot(times, pred, label="model")
plt.xlabel("time [Julian epoch]")
# Again, the data were scaled by 10^-6 before fitting, so adjust the X-Axis label
plt.ylabel("NO number density [10$^6$ cm$^{{-3}}$]")
plt.legend();
```
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/docs/tutorials/regress_intro.ipynb
|
regress_intro.ipynb
|
%matplotlib inline
%config InlineBackend.figure_format = 'png'
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import regressproxy
import sciapy
from sciapy.regress.load_data import load_dailymeanAE, load_dailymeanLya
plt.rcParams["figure.dpi"] = 120
# load proxy data
plat, plap = load_dailymeanLya()
# setup the model
lya_model = regressproxy.ProxyModel(plat,
plap["Lya"],
center=False,
amp=0,
lag=0,
tau0=0,
taucos1=0, tausin1=0,
taucos2=0, tausin2=0,
ltscan=60)
# load proxy data
paet, paep = load_dailymeanAE()
# setup the model
ae_model = regressproxy.ProxyModel(paet,
paep["AE"],
center=False,
amp=0,
lag=0,
tau0=0,
taucos1=0, tausin1=0,
taucos2=0, tausin2=0,
ltscan=60)
offset_model = regressproxy.ConstantModel(value=0.)
harm1 = regressproxy.HarmonicModelCosineSine(freq=1, cos=0, sin=0)
harm2 = regressproxy.HarmonicModelCosineSine(freq=2, cos=0, sin=0)
# frequencies should not be fitted
harm1.freeze_parameter("freq")
harm2.freeze_parameter("freq")
model = regressproxy.ProxyModelSet([("offset", offset_model),
("Lya", lya_model), ("GM", ae_model),
("f1", harm1), ("f2", harm2)])
model.get_parameter_dict()
model.freeze_all_parameters()
model.thaw_parameter("offset:value")
model.thaw_parameter("Lya:amp")
model.thaw_parameter("GM:amp")
model.thaw_parameter("GM:tau0")
model.thaw_parameter("GM:taucos1")
model.thaw_parameter("GM:tausin1")
model.get_parameter_dict()
model.set_parameter_vector([-25.6, 6.26, 0.0874, 1.54, 10.52, -0.714])
model.get_parameter_dict()
times = np.arange(1992, 2017.01, 0.5 / 365.25)
pred = model.get_value(times)
plt.plot(times, pred, label="model")
plt.xlabel("time [Julian epoch]")
# The data were scaled by 10^-6 before fitting
plt.ylabel("NO number density [10$^6$ cm$^{{-3}}$]")
plt.legend();
import requests
import netCDF4
def load_data_store(store, variables=None):
with xr.open_dataset(store, chunks={"lat": 9, "alt": 8}) as data_ds:
if variables is not None:
data_ds = data_ds[variables]
data_ds.load()
return data_ds
def load_data_url(url, variables=None):
with requests.get(url, stream=True) as response:
nc4_ds = netCDF4.Dataset("data", memory=response.content)
store = xr.backends.NetCDF4DataStore(nc4_ds)
return load_data_store(store, variables)
zenodo_url = "https://zenodo.org/record/1342701/files/NO_regress_quantiles_pGM_Lya_ltcs_exp1dscan60d_km32.nc"
# If you downloaded a copy, use load_data_store()
# and replace the url by "/path/to/<filename.nc>"
quants = load_data_url(zenodo_url)
quants
quants.lat, quants.alt
# select latitude and altitude first
latitude = 65
altitude = 70
for v in model.get_parameter_names():
model.set_parameter(v, quants["mean:{0}".format(v)].sel(alt=altitude, lat=latitude, quantile=0.5))
model.get_parameter_dict()
pred = model.get_value(times)
plt.plot(times, pred, label="model")
plt.xlabel("time [Julian epoch]")
# Again, the data were scaled by 10^-6 before fitting, so adjust the X-Axis label
plt.ylabel("NO number density [10$^6$ cm$^{{-3}}$]")
plt.legend();
| 0.629775 | 0.940463 |
sciapy.level1c
==============
.. automodule:: sciapy.level1c
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_limb
^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level1c.scia_limb
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_limb\_hdf5
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: sciapy.level1c.scia_limb_hdf5
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_limb\_mpl
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: sciapy.level1c.scia_limb_mpl
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_limb\_nc
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: sciapy.level1c.scia_limb_nc
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_limb\_txt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: sciapy.level1c.scia_limb_txt
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_solar
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level1c.scia_solar
:members:
:undoc-members:
:show-inheritance:
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/docs/reference/sciapy.level1c.rst
|
sciapy.level1c.rst
|
sciapy.level1c
==============
.. automodule:: sciapy.level1c
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_limb
^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level1c.scia_limb
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_limb\_hdf5
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: sciapy.level1c.scia_limb_hdf5
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_limb\_mpl
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: sciapy.level1c.scia_limb_mpl
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_limb\_nc
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: sciapy.level1c.scia_limb_nc
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_limb\_txt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: sciapy.level1c.scia_limb_txt
:members:
:undoc-members:
:show-inheritance:
sciapy.level1c.scia\_solar
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level1c.scia_solar
:members:
:undoc-members:
:show-inheritance:
| 0.785473 | 0.315663 |
sciapy.level2
=============
.. automodule:: sciapy.level2
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.aacgm2005
^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.aacgm2005
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.binning
^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.binning
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.density
^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.density
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.density\_pp
^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.density_pp
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.igrf
^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.igrf
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.post\_process
^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.post_process
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.scia\_akm
^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.scia_akm
:members:
:undoc-members:
:show-inheritance:
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/docs/reference/sciapy.level2.rst
|
sciapy.level2.rst
|
sciapy.level2
=============
.. automodule:: sciapy.level2
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.aacgm2005
^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.aacgm2005
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.binning
^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.binning
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.density
^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.density
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.density\_pp
^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.density_pp
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.igrf
^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.igrf
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.post\_process
^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.post_process
:members:
:undoc-members:
:show-inheritance:
sciapy.level2.scia\_akm
^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: sciapy.level2.scia_akm
:members:
:undoc-members:
:show-inheritance:
| 0.800497 | 0.402627 |
import sys
import optparse as op
from sciapy.level1c import scia_limb_scan
convert_options = [
op.make_option("-a", "--mpl-to-text", action="store_true", dest="mpl_to_text"),
op.make_option("-A", "--netcdf-to-text", action="store_true", dest="netcdf_to_text"),
op.make_option("-n", "--text-to-netcdf", action="store_true", dest="text_to_netcdf"),
op.make_option("-N", "--mpl-to-netcdf", action="store_true", dest="mpl_to_netcdf"),
op.make_option("-m", "--text-to-mpl", action="store_true", dest="text_to_mpl"),
op.make_option("-M", "--netcdf-to-mpl", action="store_true", dest="netcdf_to_mpl"),
]
input_options = [
op.make_option("-f", "--from-type", dest="from_type", choices=["mpl", "netcdf", "text"], default="mpl"),
op.make_option("-t", "--to-type", dest="to_type", choices=["mpl", "netcdf", "text"], default="text"),
op.make_option("-i", "--input", dest="input", default=sys.stdin, metavar="FILE"),
op.make_option("-o", "--output", dest="output", default=sys.stdout, metavar="FILE"),
]
manip_options = [
op.make_option("-u", "--multiply-by", type=float, dest="mult_factor", default=1.0, metavar="FACTOR"),
op.make_option("-d", "--add", type=float, dest="add", default=0.0, metavar="NUMBER"),
]
def read_input(sls, rtype, filename):
if rtype == "mpl":
sls.read_from_mpl_binary(filename)
elif rtype == "text":
sls.read_from_textfile(filename)
elif rtype == "netcdf":
sls.read_from_netcdf(filename)
def write_output(sls, wtype, filename):
if wtype == "mpl":
sls.write_to_mpl_binary(filename)
elif wtype == "text":
sls.write_to_textfile(filename)
elif wtype == "netcdf":
sls.write_to_netcdf(filename)
parser = op.OptionParser(option_list=input_options)
convert_group = op.OptionGroup(parser, "Conversion options",
"Instead of specifying --from-type and --to-type, these options allow"
"direct conversions between the desired formats.")
for opt in convert_options:
convert_group.add_option(opt)
parser.add_option_group(convert_group)
manip_group = op.OptionGroup(parser, "Manipulation options",
"Allows manipulation of the radiance data.")
for opt in manip_options:
manip_group.add_option(opt)
parser.add_option_group(manip_group)
(options, args) = parser.parse_args()
if options.mpl_to_text:
options.from_type = "mpl"
options.to_type = "text"
if options.netcdf_to_text:
options.from_type = "netcdf"
options.to_type = "text"
if options.text_to_netcdf:
options.from_type = "text"
options.to_type = "netcdf"
if options.mpl_to_netcdf:
options.from_type = "mpl"
options.to_type = "netcdf"
if options.text_to_mpl:
options.from_type = "text"
options.to_type = "mpl"
if options.netcdf_to_mpl:
options.from_type = "netcdf"
options.to_type = "mpl"
slscan = scia_limb_scan()
read_input(slscan, options.from_type, options.input)
#slscan = sn.scia_nadir_scan()
#read_input(slscan, options.from_type, options.input)
if options.mult_factor != 1.0 or options.add != 0.:
tmp_list = []
for rad in slscan.rad_list:
tmp_list.append(rad * options.mult_factor + options.add)
slscan.rad_list = tmp_list
#slscan.average_spectra()
write_output(slscan, options.to_type, options.output)
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/scripts/scia_binary_util.py
|
scia_binary_util.py
|
import sys
import optparse as op
from sciapy.level1c import scia_limb_scan
convert_options = [
op.make_option("-a", "--mpl-to-text", action="store_true", dest="mpl_to_text"),
op.make_option("-A", "--netcdf-to-text", action="store_true", dest="netcdf_to_text"),
op.make_option("-n", "--text-to-netcdf", action="store_true", dest="text_to_netcdf"),
op.make_option("-N", "--mpl-to-netcdf", action="store_true", dest="mpl_to_netcdf"),
op.make_option("-m", "--text-to-mpl", action="store_true", dest="text_to_mpl"),
op.make_option("-M", "--netcdf-to-mpl", action="store_true", dest="netcdf_to_mpl"),
]
input_options = [
op.make_option("-f", "--from-type", dest="from_type", choices=["mpl", "netcdf", "text"], default="mpl"),
op.make_option("-t", "--to-type", dest="to_type", choices=["mpl", "netcdf", "text"], default="text"),
op.make_option("-i", "--input", dest="input", default=sys.stdin, metavar="FILE"),
op.make_option("-o", "--output", dest="output", default=sys.stdout, metavar="FILE"),
]
manip_options = [
op.make_option("-u", "--multiply-by", type=float, dest="mult_factor", default=1.0, metavar="FACTOR"),
op.make_option("-d", "--add", type=float, dest="add", default=0.0, metavar="NUMBER"),
]
def read_input(sls, rtype, filename):
if rtype == "mpl":
sls.read_from_mpl_binary(filename)
elif rtype == "text":
sls.read_from_textfile(filename)
elif rtype == "netcdf":
sls.read_from_netcdf(filename)
def write_output(sls, wtype, filename):
if wtype == "mpl":
sls.write_to_mpl_binary(filename)
elif wtype == "text":
sls.write_to_textfile(filename)
elif wtype == "netcdf":
sls.write_to_netcdf(filename)
parser = op.OptionParser(option_list=input_options)
convert_group = op.OptionGroup(parser, "Conversion options",
"Instead of specifying --from-type and --to-type, these options allow"
"direct conversions between the desired formats.")
for opt in convert_options:
convert_group.add_option(opt)
parser.add_option_group(convert_group)
manip_group = op.OptionGroup(parser, "Manipulation options",
"Allows manipulation of the radiance data.")
for opt in manip_options:
manip_group.add_option(opt)
parser.add_option_group(manip_group)
(options, args) = parser.parse_args()
if options.mpl_to_text:
options.from_type = "mpl"
options.to_type = "text"
if options.netcdf_to_text:
options.from_type = "netcdf"
options.to_type = "text"
if options.text_to_netcdf:
options.from_type = "text"
options.to_type = "netcdf"
if options.mpl_to_netcdf:
options.from_type = "mpl"
options.to_type = "netcdf"
if options.text_to_mpl:
options.from_type = "text"
options.to_type = "mpl"
if options.netcdf_to_mpl:
options.from_type = "netcdf"
options.to_type = "mpl"
slscan = scia_limb_scan()
read_input(slscan, options.from_type, options.input)
#slscan = sn.scia_nadir_scan()
#read_input(slscan, options.from_type, options.input)
if options.mult_factor != 1.0 or options.add != 0.:
tmp_list = []
for rad in slscan.rad_list:
tmp_list.append(rad * options.mult_factor + options.add)
slscan.rad_list = tmp_list
#slscan.average_spectra()
write_output(slscan, options.to_type, options.output)
| 0.193262 | 0.271484 |
from __future__ import absolute_import, division, print_function
import argparse as ap
import logging
import h5py
import numpy as np
import sciapy.level1c as slvl1c
def main():
logging.basicConfig(level=logging.WARN,
format="[%(levelname)-8s] (%(asctime)s) "
"%(filename)s:%(lineno)d %(message)s",
datefmt="%Y-%m-%d %H:%M:%S %z")
parser = ap.ArgumentParser()
parser.add_argument("file", help="The input HDF5 file.",
default="SCI_NL__1PYDPA20100203_031030_000060632086_00319_41455_0002.ch1.h5")
parser.add_argument("-C", "--cat", help="The categories to extract, either a "
"single number or a comma-separated list of numbers (default: %(default)s)",
default="26,27")
parser.add_argument("-c", "--clus", help="The spectral clusters to extract, either a "
"single number or a comma-separated list of numbers (default: %(default)s)",
default="2,3,4")
parser.add_argument("-z", "--solar_id", default="D0",
choices=["D0", "D1", "D2", "E0", "E1", "A0", "A1", "N1", "N2", "N3", "N4", "N5"],
help="The solar reference ID to extract (default: %(default)s).")
loglevels = parser.add_mutually_exclusive_group()
loglevels.add_argument("-q", "--quiet", action="store_true", default=False,
help="less output, same as --loglevel=ERROR (default: %(default)s)")
loglevels.add_argument("-v", "--verbose", action="store_true", default=False,
help="verbose output, same as --loglevel=INFO (default: %(default)s)")
loglevels.add_argument("-l", "--loglevel", default="WARNING",
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help="change the loglevel (default: %(default)s)")
args = parser.parse_args()
if args.quiet:
logging.getLogger().setLevel(logging.ERROR)
elif args.verbose:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(args.loglevel)
cats = [n for n in map(int, args.cat.split(','))]
cl_ids = [n - 1 for n in map(int, args.clus.split(','))]
logging.debug("categories: %s", cats)
logging.debug("cluster ids: %s", cl_ids)
hf = h5py.File(args.file, "r")
mlt_idxs = np.array([], dtype=int)
for cat in cats:
meas_cats = hf.get("/ADS/STATES")["meas_cat"]
mlt_idxs = np.append(mlt_idxs, np.where(meas_cats == cat)[0])
logging.info("limb state indexes: %s", mlt_idxs)
for sid, lstate_id in enumerate(sorted(mlt_idxs)):
logging.info("processing limb state nr. %s (%s)...", lstate_id, sid)
slsc = slvl1c.scia_limb_scan()
# read and continue to the next state if reading failed
if slsc.read_from_hdf5(hf, lstate_id, sid, cl_ids):
continue
logging.debug("final shapes: %s (wls), %s (signal)",
slsc.wls.shape, slsc.limb_data["rad"].shape)
filename = "SCIA_limb_{0:04d}{1:02d}{2:02d}_{3:02d}{4:02d}{5:02d}_{6}_{7}_{8:05d}".format(
slsc.date[0], slsc.date[1], slsc.date[2],
slsc.date[3], slsc.date[4], slsc.date[5],
slsc.orbit_state[3], slsc.orbit_state[4],
slsc.orbit_state[0])
slsc.write_to_textfile("{0}.dat".format(filename))
logging.info("limb state nr. %s written to %s",
lstate_id, "{0}.dat".format(filename))
slsc.write_to_mpl_binary("{0}.l_mpl_binary".format(filename))
logging.info("limb state nr. %s written to %s",
lstate_id, "{0}.l_mpl_binary".format(filename))
del slsc
sol = slvl1c.scia_solar()
sol.read_from_hdf5(hf, args.solar_id)
sol_filename = ("SCIA_solar_{0:%Y%m%d}_{1:%H%M%S}_{2}_{3:05d}".format(
sol.time, sol.time, sol.solar_id, sol.orbit))
sol.write_to_textfile("{0}.dat".format(sol_filename))
logging.info("solar reference %s written to %s",
sol.solar_id, "{0}.dat".format(sol_filename))
del sol
hf.close()
if __name__ == "__main__":
main()
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/scripts/scia_conv_hdf5_limb.py
|
scia_conv_hdf5_limb.py
|
from __future__ import absolute_import, division, print_function
import argparse as ap
import logging
import h5py
import numpy as np
import sciapy.level1c as slvl1c
def main():
logging.basicConfig(level=logging.WARN,
format="[%(levelname)-8s] (%(asctime)s) "
"%(filename)s:%(lineno)d %(message)s",
datefmt="%Y-%m-%d %H:%M:%S %z")
parser = ap.ArgumentParser()
parser.add_argument("file", help="The input HDF5 file.",
default="SCI_NL__1PYDPA20100203_031030_000060632086_00319_41455_0002.ch1.h5")
parser.add_argument("-C", "--cat", help="The categories to extract, either a "
"single number or a comma-separated list of numbers (default: %(default)s)",
default="26,27")
parser.add_argument("-c", "--clus", help="The spectral clusters to extract, either a "
"single number or a comma-separated list of numbers (default: %(default)s)",
default="2,3,4")
parser.add_argument("-z", "--solar_id", default="D0",
choices=["D0", "D1", "D2", "E0", "E1", "A0", "A1", "N1", "N2", "N3", "N4", "N5"],
help="The solar reference ID to extract (default: %(default)s).")
loglevels = parser.add_mutually_exclusive_group()
loglevels.add_argument("-q", "--quiet", action="store_true", default=False,
help="less output, same as --loglevel=ERROR (default: %(default)s)")
loglevels.add_argument("-v", "--verbose", action="store_true", default=False,
help="verbose output, same as --loglevel=INFO (default: %(default)s)")
loglevels.add_argument("-l", "--loglevel", default="WARNING",
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help="change the loglevel (default: %(default)s)")
args = parser.parse_args()
if args.quiet:
logging.getLogger().setLevel(logging.ERROR)
elif args.verbose:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(args.loglevel)
cats = [n for n in map(int, args.cat.split(','))]
cl_ids = [n - 1 for n in map(int, args.clus.split(','))]
logging.debug("categories: %s", cats)
logging.debug("cluster ids: %s", cl_ids)
hf = h5py.File(args.file, "r")
mlt_idxs = np.array([], dtype=int)
for cat in cats:
meas_cats = hf.get("/ADS/STATES")["meas_cat"]
mlt_idxs = np.append(mlt_idxs, np.where(meas_cats == cat)[0])
logging.info("limb state indexes: %s", mlt_idxs)
for sid, lstate_id in enumerate(sorted(mlt_idxs)):
logging.info("processing limb state nr. %s (%s)...", lstate_id, sid)
slsc = slvl1c.scia_limb_scan()
# read and continue to the next state if reading failed
if slsc.read_from_hdf5(hf, lstate_id, sid, cl_ids):
continue
logging.debug("final shapes: %s (wls), %s (signal)",
slsc.wls.shape, slsc.limb_data["rad"].shape)
filename = "SCIA_limb_{0:04d}{1:02d}{2:02d}_{3:02d}{4:02d}{5:02d}_{6}_{7}_{8:05d}".format(
slsc.date[0], slsc.date[1], slsc.date[2],
slsc.date[3], slsc.date[4], slsc.date[5],
slsc.orbit_state[3], slsc.orbit_state[4],
slsc.orbit_state[0])
slsc.write_to_textfile("{0}.dat".format(filename))
logging.info("limb state nr. %s written to %s",
lstate_id, "{0}.dat".format(filename))
slsc.write_to_mpl_binary("{0}.l_mpl_binary".format(filename))
logging.info("limb state nr. %s written to %s",
lstate_id, "{0}.l_mpl_binary".format(filename))
del slsc
sol = slvl1c.scia_solar()
sol.read_from_hdf5(hf, args.solar_id)
sol_filename = ("SCIA_solar_{0:%Y%m%d}_{1:%H%M%S}_{2}_{3:05d}".format(
sol.time, sol.time, sol.solar_id, sol.orbit))
sol.write_to_textfile("{0}.dat".format(sol_filename))
logging.info("solar reference %s written to %s",
sol.solar_id, "{0}.dat".format(sol_filename))
del sol
hf.close()
if __name__ == "__main__":
main()
| 0.396302 | 0.168686 |
import argparse as ap
import datetime as dt
import logging
from os import path
import numpy as np
import pandas as pd
import xarray as xr
try:
from dask import compute, delayed
from dask.distributed import Client
except ImportError:
delayed = None
from sciapy.level2.binning import bin_lat_timeavg
# non-sensible variables to drop
_drop_vars = ["NO_RSTD_cnt", "NO_RSTD_std"]
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING,
format="[%(levelname)-8s] (%(asctime)s) %(filename)s:%(lineno)d %(message)s",
datefmt="%Y-%m-%d %H:%M:%S %z")
parser = ap.ArgumentParser()
parser.add_argument("file", default="SCIA_NO.nc",
help="the filename of the input netcdf file")
parser.add_argument("-a", "--area_weighted",
action="store_true", default=True,
help="calculate the area-weighted mean within the bins")
parser.add_argument("-u", "--unweighted",
dest="area_weighted", action="store_false",
help="calculate the equally weighted mean within the bins")
parser.add_argument("-g", "--geomagnetic",
dest="geomag", action="store_true", default=False,
help="bin according to geomagnetic latitude instead of "
"geographic latitude (turns off area weighting). "
"(default: %(default)s)")
parser.add_argument("-G", "--bin_var", type=str, default=None,
help="bin according to the variable given instead of "
"geographic latitude (turns off area weighting).")
parser.add_argument("-b", "--bins", metavar="START:END:SIZE",
default="-90:90:5",
help="bins from START to END (inclusive both) with into SIZE sized bins "
"(default: %(default)s)")
parser.add_argument("-B", "--binn", metavar="START:END:NUM",
default=None,
help="bins from START to END (inclusive both) into NUM bins "
"(default: not used)")
parser.add_argument("-m", "--mlt", action="store_true", default=False,
help="indicate whether to deal with nominal or MLT data (default: False)")
parser.add_argument("-o", "--output", help="filename of the output file")
parser.add_argument("-t", "--akm_threshold", type=float, default=0.002,
help="the averaging kernel diagonal element threshold "
"for the mask calculation "
"(default: %(default)s)")
parser.add_argument("-j", "--jobs", metavar="N", type=int, default=1,
help="Use N parallel threads for binning "
"(default: %(default)s)")
loglevels = parser.add_mutually_exclusive_group()
loglevels.add_argument("-l", "--loglevel", default="WARNING",
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help="change the loglevel "
"(default: %(default)s)")
loglevels.add_argument("-q", "--quiet", action="store_true", default=False,
help="less output, same as --loglevel=ERROR "
"(default: %(default)s)")
loglevels.add_argument("-v", "--verbose", action="store_true", default=False,
help="verbose output, same as --loglevel=INFO "
"(default: %(default)s)")
args = parser.parse_args()
if args.quiet:
logging.getLogger().setLevel(logging.ERROR)
elif args.verbose:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(args.loglevel)
orbit_filename = args.file
# geomagnetic/geographic setup
if args.geomag:
logging.debug("using default geomagnetic latitudes")
binvar = "gm_lats"
args.area_weighted = False
elif args.bin_var is not None:
logging.debug("using custom latitude variable")
binvar = args.bin_var
args.area_weighted = False
else:
logging.debug("using default geographic latitudes")
binvar = "latitude"
logging.info("binning according to \"%s\"", binvar)
lats_rename_dict = {"{0}_bins".format(binvar): "latitude"}
if args.area_weighted:
logging.info("area weighted bins")
else:
logging.info("equally weighted bins")
if args.binn is None:
bin0, bin1, binstep = list(map(float, args.bins.split(':')))
bins = np.r_[bin0:bin1 + 0.5 * binstep:binstep]
else:
bin0, bin1, binnum = list(map(float, args.binn.split(':')))
bins = np.linspace(bin0, bin1, binnum + 1)
binstep = bins.diff[0]
logging.debug("using %s deg sized bins: %s", binstep, bins)
if args.output is None:
output = ("scia_dzm_{0}_akm{1:.3f}_{2}{3:.0f}_{4}.nc"
.format("".join(c if c.isalnum() else '_'
for c in path.basename(orbit_filename[:-3])),
args.akm_threshold,
"geomag" if args.geomag else "geogra",
binstep,
"aw" if args.area_weighted else "nw"))
else:
output = args.output
logging.info("saving to: %s", output)
ds = xr.open_mfdataset(orbit_filename, decode_times=False,
chunks={"time": 820, "latitude": 18, "altitude": 17})
ds["longitude"].values = ds.longitude.values % 360.
if args.mlt:
# construct the time (day) bin edges from jumps in the time variable
# works reliably only for the MLT data
time_rename_dict = {"time_bins": "time"}
tbin_edges = np.concatenate([[ds.time.values[0] - 0.5],
ds.time.values[np.where(np.diff(ds.time) > 1)] + 0.01,
[ds.time.values[-1] + 0.5]])
tbin_labels = ds.time.groupby_bins("time", tbin_edges).mean("time").values
ds_bins_daily_gb = ds.groupby_bins("time", tbin_edges, labels=tbin_labels)
else:
time_rename_dict = {"date": "time"}
ds["time"] = xr.conventions.decode_cf_variable("time", ds.time)
# ds.groupby("time.date") does not work anymore :(
ds_bins_daily_gb = ds.groupby(
xr.DataArray(
pd.to_datetime(pd.DatetimeIndex(ds.time.data).date),
coords=[ds.time], dims=["time"], name="date"))
if args.jobs > 1 and delayed is not None:
# use dask.delayed and dask.compute to distribute the binning
logging.info("multi-threaded binning with dask using %s threads",
args.jobs)
binned = (delayed(bin_lat_timeavg)(
ds, binvar=binvar,
bins=bins, area_weighted=args.area_weighted)
for _, ds in iter(ds_bins_daily_gb))
client = Client()
logging.info("dask.distributed client: %s", client)
ds_bins_daily = (ds_bins_daily_gb
._combine(compute(*binned, num_workers=args.jobs))
.rename(lats_rename_dict)
.drop(_drop_vars))
else:
# serial binning with .apply()
logging.info("single-threaded binning")
ds_bins_daily = (ds_bins_daily_gb
.apply(bin_lat_timeavg,
binvar=binvar, bins=bins,
area_weighted=args.area_weighted)
.rename(lats_rename_dict)
.drop(_drop_vars))
logging.info("finished binning.")
del ds_bins_daily_gb
ds_bins_daily = ds_bins_daily.rename(time_rename_dict)
ds_bins_daily["time"].attrs = ds["time"].attrs
ds_bins_daily["time"].attrs.update(
{"axis": "T", "long_name": "measurement date"})
# construct tha mask from the averaging kernel diagonal elements
ds_bins_daily["NO_MASK"] = (ds_bins_daily.NO_AKDIAG < args.akm_threshold)
ds_bins_daily["NO_MASK"].attrs = {"long_name": "density mask", "units": "1"}
# copy coordinate attributes
# "time" was already set above
for var in filter(lambda c: c != "time", ds.coords):
logging.debug("copying coordinate attributes for: %s", var)
ds_bins_daily[var].attrs = ds[var].attrs
if args.geomag:
ds_bins_daily["latitude"].attrs.update(
{"long_name": "geomagnetic_latitude"})
# copy global attributes
ds_bins_daily.attrs = ds.attrs
# note binning time
ds_bins_daily.attrs["binned_on"] = (dt.datetime.utcnow()
.replace(tzinfo=dt.timezone.utc)
.strftime("%a %b %d %Y %H:%M:%S %Z"))
ds_bins_daily.attrs["latitude_bin_type"] = \
"geomagnetic" if args.geomag else "geographic"
ds_bins_daily.to_netcdf(output, unlimited_dims=["time"])
|
sciapy
|
/sciapy-0.0.8.tar.gz/sciapy-0.0.8/scripts/scia_daily_zonal_mean.py
|
scia_daily_zonal_mean.py
|
import argparse as ap
import datetime as dt
import logging
from os import path
import numpy as np
import pandas as pd
import xarray as xr
try:
from dask import compute, delayed
from dask.distributed import Client
except ImportError:
delayed = None
from sciapy.level2.binning import bin_lat_timeavg
# non-sensible variables to drop
_drop_vars = ["NO_RSTD_cnt", "NO_RSTD_std"]
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING,
format="[%(levelname)-8s] (%(asctime)s) %(filename)s:%(lineno)d %(message)s",
datefmt="%Y-%m-%d %H:%M:%S %z")
parser = ap.ArgumentParser()
parser.add_argument("file", default="SCIA_NO.nc",
help="the filename of the input netcdf file")
parser.add_argument("-a", "--area_weighted",
action="store_true", default=True,
help="calculate the area-weighted mean within the bins")
parser.add_argument("-u", "--unweighted",
dest="area_weighted", action="store_false",
help="calculate the equally weighted mean within the bins")
parser.add_argument("-g", "--geomagnetic",
dest="geomag", action="store_true", default=False,
help="bin according to geomagnetic latitude instead of "
"geographic latitude (turns off area weighting). "
"(default: %(default)s)")
parser.add_argument("-G", "--bin_var", type=str, default=None,
help="bin according to the variable given instead of "
"geographic latitude (turns off area weighting).")
parser.add_argument("-b", "--bins", metavar="START:END:SIZE",
default="-90:90:5",
help="bins from START to END (inclusive both) with into SIZE sized bins "
"(default: %(default)s)")
parser.add_argument("-B", "--binn", metavar="START:END:NUM",
default=None,
help="bins from START to END (inclusive both) into NUM bins "
"(default: not used)")
parser.add_argument("-m", "--mlt", action="store_true", default=False,
help="indicate whether to deal with nominal or MLT data (default: False)")
parser.add_argument("-o", "--output", help="filename of the output file")
parser.add_argument("-t", "--akm_threshold", type=float, default=0.002,
help="the averaging kernel diagonal element threshold "
"for the mask calculation "
"(default: %(default)s)")
parser.add_argument("-j", "--jobs", metavar="N", type=int, default=1,
help="Use N parallel threads for binning "
"(default: %(default)s)")
loglevels = parser.add_mutually_exclusive_group()
loglevels.add_argument("-l", "--loglevel", default="WARNING",
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help="change the loglevel "
"(default: %(default)s)")
loglevels.add_argument("-q", "--quiet", action="store_true", default=False,
help="less output, same as --loglevel=ERROR "
"(default: %(default)s)")
loglevels.add_argument("-v", "--verbose", action="store_true", default=False,
help="verbose output, same as --loglevel=INFO "
"(default: %(default)s)")
args = parser.parse_args()
if args.quiet:
logging.getLogger().setLevel(logging.ERROR)
elif args.verbose:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(args.loglevel)
orbit_filename = args.file
# geomagnetic/geographic setup
if args.geomag:
logging.debug("using default geomagnetic latitudes")
binvar = "gm_lats"
args.area_weighted = False
elif args.bin_var is not None:
logging.debug("using custom latitude variable")
binvar = args.bin_var
args.area_weighted = False
else:
logging.debug("using default geographic latitudes")
binvar = "latitude"
logging.info("binning according to \"%s\"", binvar)
lats_rename_dict = {"{0}_bins".format(binvar): "latitude"}
if args.area_weighted:
logging.info("area weighted bins")
else:
logging.info("equally weighted bins")
if args.binn is None:
bin0, bin1, binstep = list(map(float, args.bins.split(':')))
bins = np.r_[bin0:bin1 + 0.5 * binstep:binstep]
else:
bin0, bin1, binnum = list(map(float, args.binn.split(':')))
bins = np.linspace(bin0, bin1, binnum + 1)
binstep = bins.diff[0]
logging.debug("using %s deg sized bins: %s", binstep, bins)
if args.output is None:
output = ("scia_dzm_{0}_akm{1:.3f}_{2}{3:.0f}_{4}.nc"
.format("".join(c if c.isalnum() else '_'
for c in path.basename(orbit_filename[:-3])),
args.akm_threshold,
"geomag" if args.geomag else "geogra",
binstep,
"aw" if args.area_weighted else "nw"))
else:
output = args.output
logging.info("saving to: %s", output)
ds = xr.open_mfdataset(orbit_filename, decode_times=False,
chunks={"time": 820, "latitude": 18, "altitude": 17})
ds["longitude"].values = ds.longitude.values % 360.
if args.mlt:
# construct the time (day) bin edges from jumps in the time variable
# works reliably only for the MLT data
time_rename_dict = {"time_bins": "time"}
tbin_edges = np.concatenate([[ds.time.values[0] - 0.5],
ds.time.values[np.where(np.diff(ds.time) > 1)] + 0.01,
[ds.time.values[-1] + 0.5]])
tbin_labels = ds.time.groupby_bins("time", tbin_edges).mean("time").values
ds_bins_daily_gb = ds.groupby_bins("time", tbin_edges, labels=tbin_labels)
else:
time_rename_dict = {"date": "time"}
ds["time"] = xr.conventions.decode_cf_variable("time", ds.time)
# ds.groupby("time.date") does not work anymore :(
ds_bins_daily_gb = ds.groupby(
xr.DataArray(
pd.to_datetime(pd.DatetimeIndex(ds.time.data).date),
coords=[ds.time], dims=["time"], name="date"))
if args.jobs > 1 and delayed is not None:
# use dask.delayed and dask.compute to distribute the binning
logging.info("multi-threaded binning with dask using %s threads",
args.jobs)
binned = (delayed(bin_lat_timeavg)(
ds, binvar=binvar,
bins=bins, area_weighted=args.area_weighted)
for _, ds in iter(ds_bins_daily_gb))
client = Client()
logging.info("dask.distributed client: %s", client)
ds_bins_daily = (ds_bins_daily_gb
._combine(compute(*binned, num_workers=args.jobs))
.rename(lats_rename_dict)
.drop(_drop_vars))
else:
# serial binning with .apply()
logging.info("single-threaded binning")
ds_bins_daily = (ds_bins_daily_gb
.apply(bin_lat_timeavg,
binvar=binvar, bins=bins,
area_weighted=args.area_weighted)
.rename(lats_rename_dict)
.drop(_drop_vars))
logging.info("finished binning.")
del ds_bins_daily_gb
ds_bins_daily = ds_bins_daily.rename(time_rename_dict)
ds_bins_daily["time"].attrs = ds["time"].attrs
ds_bins_daily["time"].attrs.update(
{"axis": "T", "long_name": "measurement date"})
# construct tha mask from the averaging kernel diagonal elements
ds_bins_daily["NO_MASK"] = (ds_bins_daily.NO_AKDIAG < args.akm_threshold)
ds_bins_daily["NO_MASK"].attrs = {"long_name": "density mask", "units": "1"}
# copy coordinate attributes
# "time" was already set above
for var in filter(lambda c: c != "time", ds.coords):
logging.debug("copying coordinate attributes for: %s", var)
ds_bins_daily[var].attrs = ds[var].attrs
if args.geomag:
ds_bins_daily["latitude"].attrs.update(
{"long_name": "geomagnetic_latitude"})
# copy global attributes
ds_bins_daily.attrs = ds.attrs
# note binning time
ds_bins_daily.attrs["binned_on"] = (dt.datetime.utcnow()
.replace(tzinfo=dt.timezone.utc)
.strftime("%a %b %d %Y %H:%M:%S %Z"))
ds_bins_daily.attrs["latitude_bin_type"] = \
"geomagnetic" if args.geomag else "geographic"
ds_bins_daily.to_netcdf(output, unlimited_dims=["time"])
| 0.334916 | 0.223843 |
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scib_metrics.utils import compute_simpson_index, convert_knn_graph_to_idx
def lisi_knn(X: csr_matrix, labels: np.ndarray, perplexity: float = None) -> np.ndarray:
"""Compute the local inverse simpson index (LISI) for each cell :cite:p:`korsunsky2019harmony`.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing distances to exactly each cell's k nearest neighbors.
labels
Array of shape (n_cells,) representing label values
for each cell.
perplexity
Parameter controlling effective neighborhood size. If None, the
perplexity is set to the number of neighbors // 3.
Returns
-------
lisi
Array of shape (n_cells,) with the LISI score for each cell.
"""
labels = np.asarray(pd.Categorical(labels).codes)
knn_dists, knn_idx = convert_knn_graph_to_idx(X)
if perplexity is None:
perplexity = np.floor(knn_idx.shape[1] / 3)
n_labels = len(np.unique(labels))
simpson = compute_simpson_index(knn_dists, knn_idx, labels, n_labels, perplexity=perplexity)
return 1 / simpson
def ilisi_knn(X: csr_matrix, batches: np.ndarray, perplexity: float = None, scale: bool = True) -> np.ndarray:
"""Compute the integration local inverse simpson index (iLISI) for each cell :cite:p:`korsunsky2019harmony`.
Returns a scaled version of the iLISI score for each cell, by default :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing distances to exactly each cell's k nearest neighbors.
batches
Array of shape (n_cells,) representing batch values
for each cell.
perplexity
Parameter controlling effective neighborhood size. If None, the
perplexity is set to the number of neighbors // 3.
scale
Scale lisi into the range [0, 1]. If True, higher values are better.
Returns
-------
ilisi
Array of shape (n_cells,) with the iLISI score for each cell.
"""
batches = np.asarray(pd.Categorical(batches).codes)
lisi = lisi_knn(X, batches, perplexity=perplexity)
ilisi = np.nanmedian(lisi)
if scale:
nbatches = len(np.unique(batches))
ilisi = (ilisi - 1) / (nbatches - 1)
return ilisi
def clisi_knn(X: csr_matrix, labels: np.ndarray, perplexity: float = None, scale: bool = True) -> np.ndarray:
"""Compute the cell-type local inverse simpson index (cLISI) for each cell :cite:p:`korsunsky2019harmony`.
Returns a scaled version of the cLISI score for each cell, by default :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing distances to exactly each cell's k nearest neighbors.
labels
Array of shape (n_cells,) representing cell type label values
for each cell.
perplexity
Parameter controlling effective neighborhood size. If None, the
perplexity is set to the number of neighbors // 3.
scale
Scale lisi into the range [0, 1]. If True, higher values are better.
Returns
-------
clisi
Array of shape (n_cells,) with the cLISI score for each cell.
"""
labels = np.asarray(pd.Categorical(labels).codes)
lisi = lisi_knn(X, labels, perplexity=perplexity)
clisi = np.nanmedian(lisi)
if scale:
nlabels = len(np.unique(labels))
clisi = (nlabels - clisi) / (nlabels - 1)
return clisi
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/_lisi.py
|
_lisi.py
|
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scib_metrics.utils import compute_simpson_index, convert_knn_graph_to_idx
def lisi_knn(X: csr_matrix, labels: np.ndarray, perplexity: float = None) -> np.ndarray:
"""Compute the local inverse simpson index (LISI) for each cell :cite:p:`korsunsky2019harmony`.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing distances to exactly each cell's k nearest neighbors.
labels
Array of shape (n_cells,) representing label values
for each cell.
perplexity
Parameter controlling effective neighborhood size. If None, the
perplexity is set to the number of neighbors // 3.
Returns
-------
lisi
Array of shape (n_cells,) with the LISI score for each cell.
"""
labels = np.asarray(pd.Categorical(labels).codes)
knn_dists, knn_idx = convert_knn_graph_to_idx(X)
if perplexity is None:
perplexity = np.floor(knn_idx.shape[1] / 3)
n_labels = len(np.unique(labels))
simpson = compute_simpson_index(knn_dists, knn_idx, labels, n_labels, perplexity=perplexity)
return 1 / simpson
def ilisi_knn(X: csr_matrix, batches: np.ndarray, perplexity: float = None, scale: bool = True) -> np.ndarray:
"""Compute the integration local inverse simpson index (iLISI) for each cell :cite:p:`korsunsky2019harmony`.
Returns a scaled version of the iLISI score for each cell, by default :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing distances to exactly each cell's k nearest neighbors.
batches
Array of shape (n_cells,) representing batch values
for each cell.
perplexity
Parameter controlling effective neighborhood size. If None, the
perplexity is set to the number of neighbors // 3.
scale
Scale lisi into the range [0, 1]. If True, higher values are better.
Returns
-------
ilisi
Array of shape (n_cells,) with the iLISI score for each cell.
"""
batches = np.asarray(pd.Categorical(batches).codes)
lisi = lisi_knn(X, batches, perplexity=perplexity)
ilisi = np.nanmedian(lisi)
if scale:
nbatches = len(np.unique(batches))
ilisi = (ilisi - 1) / (nbatches - 1)
return ilisi
def clisi_knn(X: csr_matrix, labels: np.ndarray, perplexity: float = None, scale: bool = True) -> np.ndarray:
"""Compute the cell-type local inverse simpson index (cLISI) for each cell :cite:p:`korsunsky2019harmony`.
Returns a scaled version of the cLISI score for each cell, by default :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing distances to exactly each cell's k nearest neighbors.
labels
Array of shape (n_cells,) representing cell type label values
for each cell.
perplexity
Parameter controlling effective neighborhood size. If None, the
perplexity is set to the number of neighbors // 3.
scale
Scale lisi into the range [0, 1]. If True, higher values are better.
Returns
-------
clisi
Array of shape (n_cells,) with the cLISI score for each cell.
"""
labels = np.asarray(pd.Categorical(labels).codes)
lisi = lisi_knn(X, labels, perplexity=perplexity)
clisi = np.nanmedian(lisi)
if scale:
nlabels = len(np.unique(labels))
clisi = (nlabels - clisi) / (nlabels - 1)
return clisi
| 0.928498 | 0.759002 |
import logging
from typing import Optional, Union
import numpy as np
import pandas as pd
from ._silhouette import silhouette_label
logger = logging.getLogger(__name__)
def isolated_labels(
X: np.ndarray,
labels: np.ndarray,
batch: np.ndarray,
iso_threshold: Optional[int] = None,
) -> float:
"""Isolated label score :cite:p:`luecken2022benchmarking`.
Score how well labels of isolated labels are distiguished in the dataset by
average-width silhouette score (ASW) on isolated label vs all other labels.
The default of the original scib package is to use a cluster-based F1 scoring
procedure, but here we use the ASW for speed and simplicity.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
batch
Array of shape (n_cells,) representing batch values
iso_threshold
Max number of batches per label for label to be considered as
isolated, if integer. If `None`, considers minimum number of
batches that labels are present in
Returns
-------
isolated_label_score
"""
scores = {}
isolated_labels = _get_isolated_labels(labels, batch, iso_threshold)
for label in isolated_labels:
score = _score_isolated_label(X, labels, label)
scores[label] = score
scores = pd.Series(scores)
return scores.mean()
def _score_isolated_label(
X: np.ndarray,
labels: np.ndarray,
isolated_label: Union[str, float, int],
):
"""Compute label score for a single label."""
mask = labels == isolated_label
score = silhouette_label(X, mask.astype(np.float32))
logging.info(f"{isolated_label}: {score}")
return score
def _get_isolated_labels(labels: np.ndarray, batch: np.ndarray, iso_threshold: float):
"""Get labels that are isolated depending on the number of batches."""
tmp = pd.DataFrame()
label_key = "label"
batch_key = "batch"
tmp[label_key] = labels
tmp[batch_key] = batch
tmp = tmp.drop_duplicates()
batch_per_lab = tmp.groupby(label_key).agg({batch_key: "count"})
# threshold for determining when label is considered isolated
if iso_threshold is None:
iso_threshold = batch_per_lab.min().tolist()[0]
logging.info(f"isolated labels: no more than {iso_threshold} batches per label")
labels = batch_per_lab[batch_per_lab[batch_key] <= iso_threshold].index.tolist()
if len(labels) == 0:
logging.info(f"no isolated labels with less than {iso_threshold} batches")
return np.array(labels)
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/_isolated_labels.py
|
_isolated_labels.py
|
import logging
from typing import Optional, Union
import numpy as np
import pandas as pd
from ._silhouette import silhouette_label
logger = logging.getLogger(__name__)
def isolated_labels(
X: np.ndarray,
labels: np.ndarray,
batch: np.ndarray,
iso_threshold: Optional[int] = None,
) -> float:
"""Isolated label score :cite:p:`luecken2022benchmarking`.
Score how well labels of isolated labels are distiguished in the dataset by
average-width silhouette score (ASW) on isolated label vs all other labels.
The default of the original scib package is to use a cluster-based F1 scoring
procedure, but here we use the ASW for speed and simplicity.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
batch
Array of shape (n_cells,) representing batch values
iso_threshold
Max number of batches per label for label to be considered as
isolated, if integer. If `None`, considers minimum number of
batches that labels are present in
Returns
-------
isolated_label_score
"""
scores = {}
isolated_labels = _get_isolated_labels(labels, batch, iso_threshold)
for label in isolated_labels:
score = _score_isolated_label(X, labels, label)
scores[label] = score
scores = pd.Series(scores)
return scores.mean()
def _score_isolated_label(
X: np.ndarray,
labels: np.ndarray,
isolated_label: Union[str, float, int],
):
"""Compute label score for a single label."""
mask = labels == isolated_label
score = silhouette_label(X, mask.astype(np.float32))
logging.info(f"{isolated_label}: {score}")
return score
def _get_isolated_labels(labels: np.ndarray, batch: np.ndarray, iso_threshold: float):
"""Get labels that are isolated depending on the number of batches."""
tmp = pd.DataFrame()
label_key = "label"
batch_key = "batch"
tmp[label_key] = labels
tmp[batch_key] = batch
tmp = tmp.drop_duplicates()
batch_per_lab = tmp.groupby(label_key).agg({batch_key: "count"})
# threshold for determining when label is considered isolated
if iso_threshold is None:
iso_threshold = batch_per_lab.min().tolist()[0]
logging.info(f"isolated labels: no more than {iso_threshold} batches per label")
labels = batch_per_lab[batch_per_lab[batch_key] <= iso_threshold].index.tolist()
if len(labels) == 0:
logging.info(f"no isolated labels with less than {iso_threshold} batches")
return np.array(labels)
| 0.935125 | 0.570271 |
import logging
import warnings
from typing import Dict, Tuple
import numpy as np
import scanpy as sc
from scipy.sparse import spmatrix
from sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score
from sklearn.utils import check_array
from .utils import KMeans, check_square
logger = logging.getLogger(__name__)
def _compute_clustering_kmeans(X: np.ndarray, n_clusters: int) -> np.ndarray:
kmeans = KMeans(n_clusters)
kmeans.fit(X)
return kmeans.labels_
def _compute_clustering_leiden(connectivity_graph: spmatrix, resolution: float) -> np.ndarray:
g = sc._utils.get_igraph_from_adjacency(connectivity_graph)
clustering = g.community_leiden(objective_function="modularity", weights="weight", resolution_parameter=resolution)
clusters = clustering.membership
return np.asarray(clusters)
def _compute_nmi_ari_cluster_labels(
X: np.ndarray,
labels: np.ndarray,
resolution: float = 1.0,
) -> Tuple[float, float]:
labels_pred = _compute_clustering_leiden(X, resolution)
nmi = normalized_mutual_info_score(labels, labels_pred, average_method="arithmetic")
ari = adjusted_rand_score(labels, labels_pred)
return nmi, ari
def nmi_ari_cluster_labels_kmeans(X: np.ndarray, labels: np.ndarray) -> Dict[str, float]:
"""Compute nmi and ari between k-means clusters and labels.
This deviates from the original implementation in scib by using k-means
with k equal to the known number of cell types/labels. This leads to
a more efficient computation of the nmi and ari scores.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
Returns
-------
nmi
Normalized mutual information score
ari
Adjusted rand index score
"""
X = check_array(X, accept_sparse=False, ensure_2d=True)
n_clusters = len(np.unique(labels))
labels_pred = _compute_clustering_kmeans(X, n_clusters)
nmi = normalized_mutual_info_score(labels, labels_pred, average_method="arithmetic")
ari = adjusted_rand_score(labels, labels_pred)
return {"nmi": nmi, "ari": ari}
def nmi_ari_cluster_labels_leiden(
X: spmatrix, labels: np.ndarray, optimize_resolution: bool = True, resolution: float = 1.0, n_jobs: int = 1
) -> Dict[str, float]:
"""Compute nmi and ari between leiden clusters and labels.
This deviates from the original implementation in scib by using leiden instead of
louvain clustering. Installing joblib allows for parallelization of the leiden
resoution optimization.
Parameters
----------
X
Array of shape (n_cells, n_cells) representing a connectivity graph.
Values should represent weights between pairs of neighbors, with a higher weight
indicating more connected.
labels
Array of shape (n_cells,) representing label values
optimize_resolution
Whether to optimize the resolution parameter of leiden clustering by searching over
10 values
resolution
Resolution parameter of leiden clustering. Only used if optimize_resolution is False.
n_jobs
Number of jobs for parallelizing resolution optimization via joblib. If -1, all CPUs
are used.
Returns
-------
nmi
Normalized mutual information score
ari
Adjusted rand index score
"""
X = check_array(X, accept_sparse=True, ensure_2d=True)
check_square(X)
if optimize_resolution:
n = 10
resolutions = np.array([2 * x / n for x in range(1, n + 1)])
try:
from joblib import Parallel, delayed
out = Parallel(n_jobs=n_jobs)(delayed(_compute_nmi_ari_cluster_labels)(X, labels, r) for r in resolutions)
except ImportError:
warnings.warn("Using for loop over clustering resolutions. `pip install joblib` for parallelization.")
out = [_compute_nmi_ari_cluster_labels(X, labels, r) for r in resolutions]
nmi_ari = np.array(out)
nmi_ind = np.argmax(nmi_ari[:, 0])
nmi, ari = nmi_ari[nmi_ind, :]
return {"nmi": nmi, "ari": ari}
else:
nmi, ari = _compute_nmi_ari_cluster_labels(X, labels, resolution)
return {"nmi": nmi, "ari": ari}
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/_nmi_ari.py
|
_nmi_ari.py
|
import logging
import warnings
from typing import Dict, Tuple
import numpy as np
import scanpy as sc
from scipy.sparse import spmatrix
from sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score
from sklearn.utils import check_array
from .utils import KMeans, check_square
logger = logging.getLogger(__name__)
def _compute_clustering_kmeans(X: np.ndarray, n_clusters: int) -> np.ndarray:
kmeans = KMeans(n_clusters)
kmeans.fit(X)
return kmeans.labels_
def _compute_clustering_leiden(connectivity_graph: spmatrix, resolution: float) -> np.ndarray:
g = sc._utils.get_igraph_from_adjacency(connectivity_graph)
clustering = g.community_leiden(objective_function="modularity", weights="weight", resolution_parameter=resolution)
clusters = clustering.membership
return np.asarray(clusters)
def _compute_nmi_ari_cluster_labels(
X: np.ndarray,
labels: np.ndarray,
resolution: float = 1.0,
) -> Tuple[float, float]:
labels_pred = _compute_clustering_leiden(X, resolution)
nmi = normalized_mutual_info_score(labels, labels_pred, average_method="arithmetic")
ari = adjusted_rand_score(labels, labels_pred)
return nmi, ari
def nmi_ari_cluster_labels_kmeans(X: np.ndarray, labels: np.ndarray) -> Dict[str, float]:
"""Compute nmi and ari between k-means clusters and labels.
This deviates from the original implementation in scib by using k-means
with k equal to the known number of cell types/labels. This leads to
a more efficient computation of the nmi and ari scores.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
Returns
-------
nmi
Normalized mutual information score
ari
Adjusted rand index score
"""
X = check_array(X, accept_sparse=False, ensure_2d=True)
n_clusters = len(np.unique(labels))
labels_pred = _compute_clustering_kmeans(X, n_clusters)
nmi = normalized_mutual_info_score(labels, labels_pred, average_method="arithmetic")
ari = adjusted_rand_score(labels, labels_pred)
return {"nmi": nmi, "ari": ari}
def nmi_ari_cluster_labels_leiden(
X: spmatrix, labels: np.ndarray, optimize_resolution: bool = True, resolution: float = 1.0, n_jobs: int = 1
) -> Dict[str, float]:
"""Compute nmi and ari between leiden clusters and labels.
This deviates from the original implementation in scib by using leiden instead of
louvain clustering. Installing joblib allows for parallelization of the leiden
resoution optimization.
Parameters
----------
X
Array of shape (n_cells, n_cells) representing a connectivity graph.
Values should represent weights between pairs of neighbors, with a higher weight
indicating more connected.
labels
Array of shape (n_cells,) representing label values
optimize_resolution
Whether to optimize the resolution parameter of leiden clustering by searching over
10 values
resolution
Resolution parameter of leiden clustering. Only used if optimize_resolution is False.
n_jobs
Number of jobs for parallelizing resolution optimization via joblib. If -1, all CPUs
are used.
Returns
-------
nmi
Normalized mutual information score
ari
Adjusted rand index score
"""
X = check_array(X, accept_sparse=True, ensure_2d=True)
check_square(X)
if optimize_resolution:
n = 10
resolutions = np.array([2 * x / n for x in range(1, n + 1)])
try:
from joblib import Parallel, delayed
out = Parallel(n_jobs=n_jobs)(delayed(_compute_nmi_ari_cluster_labels)(X, labels, r) for r in resolutions)
except ImportError:
warnings.warn("Using for loop over clustering resolutions. `pip install joblib` for parallelization.")
out = [_compute_nmi_ari_cluster_labels(X, labels, r) for r in resolutions]
nmi_ari = np.array(out)
nmi_ind = np.argmax(nmi_ari[:, 0])
nmi, ari = nmi_ari[nmi_ind, :]
return {"nmi": nmi, "ari": ari}
else:
nmi, ari = _compute_nmi_ari_cluster_labels(X, labels, resolution)
return {"nmi": nmi, "ari": ari}
| 0.920016 | 0.636155 |
import numpy as np
import pandas as pd
from scib_metrics.utils import silhouette_samples
def silhouette_label(X: np.ndarray, labels: np.ndarray, rescale: bool = True, chunk_size: int = 256) -> float:
"""Average silhouette width (ASW) :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
rescale
Scale asw into the range [0, 1].
chunk_size
Size of chunks to process at a time for distance computation.
Returns
-------
silhouette score
"""
asw = np.mean(silhouette_samples(X, labels, chunk_size=chunk_size))
if rescale:
asw = (asw + 1) / 2
return np.mean(asw)
def silhouette_batch(
X: np.ndarray, labels: np.ndarray, batch: np.ndarray, rescale: bool = True, chunk_size: int = 256
) -> float:
"""Average silhouette width (ASW) with respect to batch ids within each label :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
batch
Array of shape (n_cells,) representing batch values
rescale
Scale asw into the range [0, 1]. If True, higher values are better.
chunk_size
Size of chunks to process at a time for distance computation.
Returns
-------
silhouette score
"""
sil_dfs = []
unique_labels = np.unique(labels)
for group in unique_labels:
labels_mask = labels == group
X_subset = X[labels_mask]
batch_subset = batch[labels_mask]
n_batches = len(np.unique(batch_subset))
if (n_batches == 1) or (n_batches == X_subset.shape[0]):
continue
sil_per_group = silhouette_samples(X_subset, batch_subset, chunk_size=chunk_size)
# take only absolute value
sil_per_group = np.abs(sil_per_group)
if rescale:
# scale s.t. highest number is optimal
sil_per_group = 1 - sil_per_group
sil_dfs.append(
pd.DataFrame(
{
"group": [group] * len(sil_per_group),
"silhouette_score": sil_per_group,
}
)
)
sil_df = pd.concat(sil_dfs).reset_index(drop=True)
sil_means = sil_df.groupby("group").mean()
asw = sil_means["silhouette_score"].mean()
return asw
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/_silhouette.py
|
_silhouette.py
|
import numpy as np
import pandas as pd
from scib_metrics.utils import silhouette_samples
def silhouette_label(X: np.ndarray, labels: np.ndarray, rescale: bool = True, chunk_size: int = 256) -> float:
"""Average silhouette width (ASW) :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
rescale
Scale asw into the range [0, 1].
chunk_size
Size of chunks to process at a time for distance computation.
Returns
-------
silhouette score
"""
asw = np.mean(silhouette_samples(X, labels, chunk_size=chunk_size))
if rescale:
asw = (asw + 1) / 2
return np.mean(asw)
def silhouette_batch(
X: np.ndarray, labels: np.ndarray, batch: np.ndarray, rescale: bool = True, chunk_size: int = 256
) -> float:
"""Average silhouette width (ASW) with respect to batch ids within each label :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
batch
Array of shape (n_cells,) representing batch values
rescale
Scale asw into the range [0, 1]. If True, higher values are better.
chunk_size
Size of chunks to process at a time for distance computation.
Returns
-------
silhouette score
"""
sil_dfs = []
unique_labels = np.unique(labels)
for group in unique_labels:
labels_mask = labels == group
X_subset = X[labels_mask]
batch_subset = batch[labels_mask]
n_batches = len(np.unique(batch_subset))
if (n_batches == 1) or (n_batches == X_subset.shape[0]):
continue
sil_per_group = silhouette_samples(X_subset, batch_subset, chunk_size=chunk_size)
# take only absolute value
sil_per_group = np.abs(sil_per_group)
if rescale:
# scale s.t. highest number is optimal
sil_per_group = 1 - sil_per_group
sil_dfs.append(
pd.DataFrame(
{
"group": [group] * len(sil_per_group),
"silhouette_score": sil_per_group,
}
)
)
sil_df = pd.concat(sil_dfs).reset_index(drop=True)
sil_means = sil_df.groupby("group").mean()
asw = sil_means["silhouette_score"].mean()
return asw
| 0.924108 | 0.721449 |
import logging
import os
from typing import Literal, Union
from rich.console import Console
from rich.logging import RichHandler
scib_logger = logging.getLogger("scib_metrics")
class ScibConfig:
"""Config manager for scib-metrics.
Examples
--------
To set the progress bar style, choose one of "rich", "tqdm"
>>> scib_metrics.settings.progress_bar_style = "rich"
To set the verbosity
>>> import logging
>>> scib_metrics.settings.verbosity = logging.INFO
"""
def __init__(
self,
verbosity: int = logging.INFO,
progress_bar_style: Literal["rich", "tqdm"] = "tqdm",
jax_preallocate_gpu_memory: bool = False,
):
if progress_bar_style not in ["rich", "tqdm"]:
raise ValueError("Progress bar style must be in ['rich', 'tqdm']")
self.progress_bar_style = progress_bar_style
self.jax_preallocate_gpu_memory = jax_preallocate_gpu_memory
self.verbosity = verbosity
@property
def progress_bar_style(self) -> str:
"""Library to use for progress bar."""
return self._pbar_style
@progress_bar_style.setter
def progress_bar_style(self, pbar_style: Literal["tqdm", "rich"]):
"""Library to use for progress bar."""
self._pbar_style = pbar_style
@property
def verbosity(self) -> int:
"""Verbosity level (default `logging.INFO`).
Returns
-------
verbosity: int
"""
return self._verbosity
@verbosity.setter
def verbosity(self, level: Union[str, int]):
"""Set verbosity level.
If "scib_metrics" logger has no StreamHandler, add one.
Else, set its level to `level`.
Parameters
----------
level
Sets "scib_metrics" logging level to `level`
force_terminal
Rich logging option, set to False if piping to file output.
"""
self._verbosity = level
scib_logger.setLevel(level)
if len(scib_logger.handlers) == 0:
console = Console(force_terminal=True)
if console.is_jupyter is True:
console.is_jupyter = False
ch = RichHandler(level=level, show_path=False, console=console, show_time=False)
formatter = logging.Formatter("%(message)s")
ch.setFormatter(formatter)
scib_logger.addHandler(ch)
else:
scib_logger.setLevel(level)
def reset_logging_handler(self) -> None:
"""Reset "scib_metrics" log handler to a basic RichHandler().
This is useful if piping outputs to a file.
Returns
-------
None
"""
scib_logger.removeHandler(scib_logger.handlers[0])
ch = RichHandler(level=self._verbosity, show_path=False, show_time=False)
formatter = logging.Formatter("%(message)s")
ch.setFormatter(formatter)
scib_logger.addHandler(ch)
def jax_fix_no_kernel_image(self) -> None:
"""Fix for JAX error "No kernel image is available for execution on the device"."""
os.environ["XLA_FLAGS"] = "--xla_gpu_force_compilation_parallelism=1"
@property
def jax_preallocate_gpu_memory(self):
"""Jax GPU memory allocation settings.
If False, Jax will ony preallocate GPU memory it needs.
If float in (0, 1), Jax will preallocate GPU memory to that
fraction of the GPU memory.
Returns
-------
jax_preallocate_gpu_memory: bool or float
"""
return self._jax_gpu
@jax_preallocate_gpu_memory.setter
def jax_preallocate_gpu_memory(self, value: Union[float, bool]):
# see https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html#gpu-memory-allocation
if value is False:
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
elif isinstance(value, float):
if value >= 1 or value <= 0:
raise ValueError("Need to use a value between 0 and 1")
# format is ".XX"
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = str(value)[1:4]
else:
raise ValueError("value not understood, need bool or float in (0, 1)")
self._jax_gpu = value
settings = ScibConfig()
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/_settings.py
|
_settings.py
|
import logging
import os
from typing import Literal, Union
from rich.console import Console
from rich.logging import RichHandler
scib_logger = logging.getLogger("scib_metrics")
class ScibConfig:
"""Config manager for scib-metrics.
Examples
--------
To set the progress bar style, choose one of "rich", "tqdm"
>>> scib_metrics.settings.progress_bar_style = "rich"
To set the verbosity
>>> import logging
>>> scib_metrics.settings.verbosity = logging.INFO
"""
def __init__(
self,
verbosity: int = logging.INFO,
progress_bar_style: Literal["rich", "tqdm"] = "tqdm",
jax_preallocate_gpu_memory: bool = False,
):
if progress_bar_style not in ["rich", "tqdm"]:
raise ValueError("Progress bar style must be in ['rich', 'tqdm']")
self.progress_bar_style = progress_bar_style
self.jax_preallocate_gpu_memory = jax_preallocate_gpu_memory
self.verbosity = verbosity
@property
def progress_bar_style(self) -> str:
"""Library to use for progress bar."""
return self._pbar_style
@progress_bar_style.setter
def progress_bar_style(self, pbar_style: Literal["tqdm", "rich"]):
"""Library to use for progress bar."""
self._pbar_style = pbar_style
@property
def verbosity(self) -> int:
"""Verbosity level (default `logging.INFO`).
Returns
-------
verbosity: int
"""
return self._verbosity
@verbosity.setter
def verbosity(self, level: Union[str, int]):
"""Set verbosity level.
If "scib_metrics" logger has no StreamHandler, add one.
Else, set its level to `level`.
Parameters
----------
level
Sets "scib_metrics" logging level to `level`
force_terminal
Rich logging option, set to False if piping to file output.
"""
self._verbosity = level
scib_logger.setLevel(level)
if len(scib_logger.handlers) == 0:
console = Console(force_terminal=True)
if console.is_jupyter is True:
console.is_jupyter = False
ch = RichHandler(level=level, show_path=False, console=console, show_time=False)
formatter = logging.Formatter("%(message)s")
ch.setFormatter(formatter)
scib_logger.addHandler(ch)
else:
scib_logger.setLevel(level)
def reset_logging_handler(self) -> None:
"""Reset "scib_metrics" log handler to a basic RichHandler().
This is useful if piping outputs to a file.
Returns
-------
None
"""
scib_logger.removeHandler(scib_logger.handlers[0])
ch = RichHandler(level=self._verbosity, show_path=False, show_time=False)
formatter = logging.Formatter("%(message)s")
ch.setFormatter(formatter)
scib_logger.addHandler(ch)
def jax_fix_no_kernel_image(self) -> None:
"""Fix for JAX error "No kernel image is available for execution on the device"."""
os.environ["XLA_FLAGS"] = "--xla_gpu_force_compilation_parallelism=1"
@property
def jax_preallocate_gpu_memory(self):
"""Jax GPU memory allocation settings.
If False, Jax will ony preallocate GPU memory it needs.
If float in (0, 1), Jax will preallocate GPU memory to that
fraction of the GPU memory.
Returns
-------
jax_preallocate_gpu_memory: bool or float
"""
return self._jax_gpu
@jax_preallocate_gpu_memory.setter
def jax_preallocate_gpu_memory(self, value: Union[float, bool]):
# see https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html#gpu-memory-allocation
if value is False:
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
elif isinstance(value, float):
if value >= 1 or value <= 0:
raise ValueError("Need to use a value between 0 and 1")
# format is ".XX"
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = str(value)[1:4]
else:
raise ValueError("value not understood, need bool or float in (0, 1)")
self._jax_gpu = value
settings = ScibConfig()
| 0.871775 | 0.135089 |
import os
import warnings
from dataclasses import asdict, dataclass
from enum import Enum
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy as sc
from anndata import AnnData
from plottable import ColumnDefinition, Table
from plottable.cmap import normed_cmap
from plottable.plots import bar
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
import scib_metrics
from scib_metrics.nearest_neighbors import NeighborsOutput, pynndescent
Kwargs = Dict[str, Any]
MetricType = Union[bool, Kwargs]
_LABELS = "labels"
_BATCH = "batch"
_X_PRE = "X_pre"
_METRIC_TYPE = "Metric Type"
_AGGREGATE_SCORE = "Aggregate score"
# Mapping of metric fn names to clean DataFrame column names
metric_name_cleaner = {
"silhouette_label": "Silhouette label",
"silhouette_batch": "Silhouette batch",
"isolated_labels": "Isolated labels",
"nmi_ari_cluster_labels_leiden_nmi": "Leiden NMI",
"nmi_ari_cluster_labels_leiden_ari": "Leiden ARI",
"nmi_ari_cluster_labels_kmeans_nmi": "KMeans NMI",
"nmi_ari_cluster_labels_kmeans_ari": "KMeans ARI",
"clisi_knn": "cLISI",
"ilisi_knn": "iLISI",
"kbet_per_label": "KBET",
"graph_connectivity": "Graph connectivity",
"pcr_comparison": "PCR comparison",
}
@dataclass(frozen=True)
class BioConservation:
"""Specification of bio conservation metrics to run in the pipeline.
Metrics can be included using a boolean flag. Custom keyword args can be
used by passing a dictionary here. Keyword args should not set data-related
parameters, such as `X` or `labels`.
"""
isolated_labels: MetricType = True
nmi_ari_cluster_labels_leiden: MetricType = False
nmi_ari_cluster_labels_kmeans: MetricType = True
silhouette_label: MetricType = True
clisi_knn: MetricType = True
@dataclass(frozen=True)
class BatchCorrection:
"""Specification of which batch correction metrics to run in the pipeline.
Metrics can be included using a boolean flag. Custom keyword args can be
used by passing a dictionary here. Keyword args should not set data-related
parameters, such as `X` or `labels`.
"""
silhouette_batch: MetricType = True
ilisi_knn: MetricType = True
kbet_per_label: MetricType = True
graph_connectivity: MetricType = True
pcr_comparison: MetricType = True
class MetricAnnDataAPI(Enum):
"""Specification of the AnnData API for a metric."""
isolated_labels = lambda ad, fn: fn(ad.X, ad.obs[_LABELS], ad.obs[_BATCH])
nmi_ari_cluster_labels_leiden = lambda ad, fn: fn(ad.obsp["15_connectivities"], ad.obs[_LABELS])
nmi_ari_cluster_labels_kmeans = lambda ad, fn: fn(ad.X, ad.obs[_LABELS])
silhouette_label = lambda ad, fn: fn(ad.X, ad.obs[_LABELS])
clisi_knn = lambda ad, fn: fn(ad.obsp["90_distances"], ad.obs[_LABELS])
graph_connectivity = lambda ad, fn: fn(ad.obsp["15_distances"], ad.obs[_LABELS])
silhouette_batch = lambda ad, fn: fn(ad.X, ad.obs[_LABELS], ad.obs[_BATCH])
pcr_comparison = lambda ad, fn: fn(ad.obsm[_X_PRE], ad.X, ad.obs[_BATCH], categorical=True)
ilisi_knn = lambda ad, fn: fn(ad.obsp["90_distances"], ad.obs[_BATCH])
kbet_per_label = lambda ad, fn: fn(ad.obsp["50_connectivities"], ad.obs[_BATCH], ad.obs[_LABELS])
class Benchmarker:
"""Benchmarking pipeline for the single-cell integration task.
Parameters
----------
adata
AnnData object containing the raw count data and integrated embeddings as obsm keys.
batch_key
Key in `adata.obs` that contains the batch information.
label_key
Key in `adata.obs` that contains the cell type labels.
embedding_obsm_keys
List of obsm keys that contain the embeddings to be benchmarked.
bio_conservation_metrics
Specification of which bio conservation metrics to run in the pipeline.
batch_correction_metrics
Specification of which batch correction metrics to run in the pipeline.
pre_integrated_embedding_obsm_key
Obsm key containing a non-integrated embedding of the data. If `None`, the embedding will be computed
in the prepare step. See the notes below for more information.
n_jobs
Number of jobs to use for parallelization of neighbor search.
Notes
-----
`adata.X` should contain a form of the data that is not integrated, but is normalized. The `prepare` method will
use `adata.X` for PCA via :func:`~scanpy.tl.pca`, which also only uses features masked via `adata.var['highly_variable']`.
See further usage examples in the following tutorial:
1. :doc:`/notebooks/lung_example`
"""
def __init__(
self,
adata: AnnData,
batch_key: str,
label_key: str,
embedding_obsm_keys: List[str],
bio_conservation_metrics: Optional[BioConservation] = None,
batch_correction_metrics: Optional[BatchCorrection] = None,
pre_integrated_embedding_obsm_key: Optional[str] = None,
n_jobs: int = 1,
):
self._adata = adata
self._embedding_obsm_keys = embedding_obsm_keys
self._pre_integrated_embedding_obsm_key = pre_integrated_embedding_obsm_key
self._bio_conservation_metrics = bio_conservation_metrics if bio_conservation_metrics else BioConservation()
self._batch_correction_metrics = batch_correction_metrics if batch_correction_metrics else BatchCorrection()
self._results = pd.DataFrame(columns=list(self._embedding_obsm_keys) + [_METRIC_TYPE])
self._emb_adatas = {}
self._neighbor_values = (15, 50, 90)
self._prepared = False
self._benchmarked = False
self._batch_key = batch_key
self._label_key = label_key
self._n_jobs = n_jobs
self._metric_collection_dict = {
"Bio conservation": self._bio_conservation_metrics,
"Batch correction": self._batch_correction_metrics,
}
def prepare(self, neighbor_computer: Optional[Callable[[np.ndarray, int], NeighborsOutput]] = None) -> None:
"""Prepare the data for benchmarking.
Parameters
----------
neighbor_computer
Function that computes the neighbors of the data. If `None`, the neighbors will be computed
with :func:`~scib_metrics.utils.nearest_neighbors.pynndescent`. The function should take as input
the data and the number of neighbors to compute and return a :class:`~scib_metrics.utils.nearest_neighbors.NeighborsOutput`
object.
"""
# Compute PCA
if self._pre_integrated_embedding_obsm_key is None:
# This is how scib does it
# https://github.com/theislab/scib/blob/896f689e5fe8c57502cb012af06bed1a9b2b61d2/scib/metrics/pcr.py#L197
sc.tl.pca(self._adata, use_highly_variable=False)
self._pre_integrated_embedding_obsm_key = "X_pca"
for emb_key in self._embedding_obsm_keys:
self._emb_adatas[emb_key] = AnnData(self._adata.obsm[emb_key], obs=self._adata.obs)
self._emb_adatas[emb_key].obs[_BATCH] = np.asarray(self._adata.obs[self._batch_key].values)
self._emb_adatas[emb_key].obs[_LABELS] = np.asarray(self._adata.obs[self._label_key].values)
self._emb_adatas[emb_key].obsm[_X_PRE] = self._adata.obsm[self._pre_integrated_embedding_obsm_key]
# Compute neighbors
for ad in tqdm(self._emb_adatas.values(), desc="Computing neighbors"):
if neighbor_computer is not None:
neigh_output = neighbor_computer(ad.X, max(self._neighbor_values))
else:
neigh_output = pynndescent(
ad.X, n_neighbors=max(self._neighbor_values), random_state=0, n_jobs=self._n_jobs
)
indices, distances = neigh_output.indices, neigh_output.distances
for n in self._neighbor_values:
sp_distances, sp_conns = sc.neighbors._compute_connectivities_umap(
indices[:, :n], distances[:, :n], ad.n_obs, n_neighbors=n
)
ad.obsp[f"{n}_connectivities"] = sp_conns
ad.obsp[f"{n}_distances"] = sp_distances
self._prepared = True
def benchmark(self) -> None:
"""Run the pipeline."""
if self._benchmarked:
warnings.warn(
"The benchmark has already been run. Running it again will overwrite the previous results.",
UserWarning,
)
if not self._prepared:
self.prepare()
num_metrics = sum(
[sum([v is not False for v in asdict(met_col)]) for met_col in self._metric_collection_dict.values()]
)
for emb_key, ad in tqdm(self._emb_adatas.items(), desc="Embeddings", position=0, colour="green"):
pbar = tqdm(total=num_metrics, desc="Metrics", position=1, leave=False, colour="blue")
for metric_type, metric_collection in self._metric_collection_dict.items():
for metric_name, use_metric_or_kwargs in asdict(metric_collection).items():
if use_metric_or_kwargs:
pbar.set_postfix_str(f"{metric_type}: {metric_name}")
metric_fn = getattr(scib_metrics, metric_name)
if isinstance(use_metric_or_kwargs, dict):
# Kwargs in this case
metric_fn = partial(metric_fn, **use_metric_or_kwargs)
metric_value = getattr(MetricAnnDataAPI, metric_name)(ad, metric_fn)
# nmi/ari metrics return a dict
if isinstance(metric_value, dict):
for k, v in metric_value.items():
self._results.loc[f"{metric_name}_{k}", emb_key] = v
self._results.loc[f"{metric_name}_{k}", _METRIC_TYPE] = metric_type
else:
self._results.loc[metric_name, emb_key] = metric_value
self._results.loc[metric_name, _METRIC_TYPE] = metric_type
pbar.update(1)
self._benchmarked = True
def get_results(self, min_max_scale: bool = True, clean_names: bool = True) -> pd.DataFrame:
"""Return the benchmarking results.
Parameters
----------
min_max_scale
Whether to min max scale the results.
clean_names
Whether to clean the metric names.
Returns
-------
The benchmarking results.
"""
df = self._results.transpose()
df.index.name = "Embedding"
df = df.loc[df.index != _METRIC_TYPE]
if min_max_scale:
# Use sklearn to min max scale
df = pd.DataFrame(
MinMaxScaler().fit_transform(df),
columns=df.columns,
index=df.index,
)
if clean_names:
df = df.rename(columns=metric_name_cleaner)
df = df.transpose()
df[_METRIC_TYPE] = self._results[_METRIC_TYPE].values
# Compute scores
per_class_score = df.groupby(_METRIC_TYPE).mean().transpose()
# This is the default scIB weighting from the manuscript
per_class_score["Total"] = 0.4 * per_class_score["Batch correction"] + 0.6 * per_class_score["Bio conservation"]
df = pd.concat([df.transpose(), per_class_score], axis=1)
df.loc[_METRIC_TYPE, per_class_score.columns] = _AGGREGATE_SCORE
return df
def plot_results_table(
self, min_max_scale: bool = True, show: bool = True, save_dir: Optional[str] = None
) -> Table:
"""Plot the benchmarking results.
Parameters
----------
min_max_scale
Whether to min max scale the results.
show
Whether to show the plot.
save_dir
The directory to save the plot to. If `None`, the plot is not saved.
"""
num_embeds = len(self._embedding_obsm_keys)
cmap_fn = lambda col_data: normed_cmap(col_data, cmap=matplotlib.cm.PRGn, num_stds=2.5)
df = self.get_results(min_max_scale=min_max_scale)
# Do not want to plot what kind of metric it is
plot_df = df.drop(_METRIC_TYPE, axis=0)
# Sort by total score
plot_df = plot_df.sort_values(by="Total", ascending=False).astype(np.float64)
plot_df["Method"] = plot_df.index
# Split columns by metric type, using df as it doesn't have the new method col
score_cols = df.columns[df.loc[_METRIC_TYPE] == _AGGREGATE_SCORE]
other_cols = df.columns[df.loc[_METRIC_TYPE] != _AGGREGATE_SCORE]
column_definitions = [
ColumnDefinition("Method", width=1.5, textprops={"ha": "left", "weight": "bold"}),
]
# Circles for the metric values
column_definitions += [
ColumnDefinition(
col,
title=col.replace(" ", "\n", 1),
width=1,
textprops={
"ha": "center",
"bbox": {"boxstyle": "circle", "pad": 0.25},
},
cmap=cmap_fn(plot_df[col]),
group=df.loc[_METRIC_TYPE, col],
formatter="{:.2f}",
)
for i, col in enumerate(other_cols)
]
# Bars for the aggregate scores
column_definitions += [
ColumnDefinition(
col,
width=1,
title=col.replace(" ", "\n", 1),
plot_fn=bar,
plot_kw={
"cmap": matplotlib.cm.YlGnBu,
"plot_bg_bar": False,
"annotate": True,
"height": 0.9,
"formatter": "{:.2f}",
},
group=df.loc[_METRIC_TYPE, col],
border="left" if i == 0 else None,
)
for i, col in enumerate(score_cols)
]
# Allow to manipulate text post-hoc (in illustrator)
with matplotlib.rc_context({"svg.fonttype": "none"}):
fig, ax = plt.subplots(figsize=(len(df.columns) * 1.25, 3 + 0.3 * num_embeds))
tab = Table(
plot_df,
cell_kw={
"linewidth": 0,
"edgecolor": "k",
},
column_definitions=column_definitions,
ax=ax,
row_dividers=True,
footer_divider=True,
textprops={"fontsize": 10, "ha": "center"},
row_divider_kw={"linewidth": 1, "linestyle": (0, (1, 5))},
col_label_divider_kw={"linewidth": 1, "linestyle": "-"},
column_border_kw={"linewidth": 1, "linestyle": "-"},
index_col="Method",
).autoset_fontcolors(colnames=plot_df.columns)
if show:
plt.show()
if save_dir is not None:
fig.savefig(os.path.join(save_dir, "scib_results.svg"), facecolor=ax.get_facecolor(), dpi=300)
return tab
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/benchmark/_core.py
|
_core.py
|
import os
import warnings
from dataclasses import asdict, dataclass
from enum import Enum
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy as sc
from anndata import AnnData
from plottable import ColumnDefinition, Table
from plottable.cmap import normed_cmap
from plottable.plots import bar
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
import scib_metrics
from scib_metrics.nearest_neighbors import NeighborsOutput, pynndescent
Kwargs = Dict[str, Any]
MetricType = Union[bool, Kwargs]
_LABELS = "labels"
_BATCH = "batch"
_X_PRE = "X_pre"
_METRIC_TYPE = "Metric Type"
_AGGREGATE_SCORE = "Aggregate score"
# Mapping of metric fn names to clean DataFrame column names
metric_name_cleaner = {
"silhouette_label": "Silhouette label",
"silhouette_batch": "Silhouette batch",
"isolated_labels": "Isolated labels",
"nmi_ari_cluster_labels_leiden_nmi": "Leiden NMI",
"nmi_ari_cluster_labels_leiden_ari": "Leiden ARI",
"nmi_ari_cluster_labels_kmeans_nmi": "KMeans NMI",
"nmi_ari_cluster_labels_kmeans_ari": "KMeans ARI",
"clisi_knn": "cLISI",
"ilisi_knn": "iLISI",
"kbet_per_label": "KBET",
"graph_connectivity": "Graph connectivity",
"pcr_comparison": "PCR comparison",
}
@dataclass(frozen=True)
class BioConservation:
"""Specification of bio conservation metrics to run in the pipeline.
Metrics can be included using a boolean flag. Custom keyword args can be
used by passing a dictionary here. Keyword args should not set data-related
parameters, such as `X` or `labels`.
"""
isolated_labels: MetricType = True
nmi_ari_cluster_labels_leiden: MetricType = False
nmi_ari_cluster_labels_kmeans: MetricType = True
silhouette_label: MetricType = True
clisi_knn: MetricType = True
@dataclass(frozen=True)
class BatchCorrection:
"""Specification of which batch correction metrics to run in the pipeline.
Metrics can be included using a boolean flag. Custom keyword args can be
used by passing a dictionary here. Keyword args should not set data-related
parameters, such as `X` or `labels`.
"""
silhouette_batch: MetricType = True
ilisi_knn: MetricType = True
kbet_per_label: MetricType = True
graph_connectivity: MetricType = True
pcr_comparison: MetricType = True
class MetricAnnDataAPI(Enum):
"""Specification of the AnnData API for a metric."""
isolated_labels = lambda ad, fn: fn(ad.X, ad.obs[_LABELS], ad.obs[_BATCH])
nmi_ari_cluster_labels_leiden = lambda ad, fn: fn(ad.obsp["15_connectivities"], ad.obs[_LABELS])
nmi_ari_cluster_labels_kmeans = lambda ad, fn: fn(ad.X, ad.obs[_LABELS])
silhouette_label = lambda ad, fn: fn(ad.X, ad.obs[_LABELS])
clisi_knn = lambda ad, fn: fn(ad.obsp["90_distances"], ad.obs[_LABELS])
graph_connectivity = lambda ad, fn: fn(ad.obsp["15_distances"], ad.obs[_LABELS])
silhouette_batch = lambda ad, fn: fn(ad.X, ad.obs[_LABELS], ad.obs[_BATCH])
pcr_comparison = lambda ad, fn: fn(ad.obsm[_X_PRE], ad.X, ad.obs[_BATCH], categorical=True)
ilisi_knn = lambda ad, fn: fn(ad.obsp["90_distances"], ad.obs[_BATCH])
kbet_per_label = lambda ad, fn: fn(ad.obsp["50_connectivities"], ad.obs[_BATCH], ad.obs[_LABELS])
class Benchmarker:
"""Benchmarking pipeline for the single-cell integration task.
Parameters
----------
adata
AnnData object containing the raw count data and integrated embeddings as obsm keys.
batch_key
Key in `adata.obs` that contains the batch information.
label_key
Key in `adata.obs` that contains the cell type labels.
embedding_obsm_keys
List of obsm keys that contain the embeddings to be benchmarked.
bio_conservation_metrics
Specification of which bio conservation metrics to run in the pipeline.
batch_correction_metrics
Specification of which batch correction metrics to run in the pipeline.
pre_integrated_embedding_obsm_key
Obsm key containing a non-integrated embedding of the data. If `None`, the embedding will be computed
in the prepare step. See the notes below for more information.
n_jobs
Number of jobs to use for parallelization of neighbor search.
Notes
-----
`adata.X` should contain a form of the data that is not integrated, but is normalized. The `prepare` method will
use `adata.X` for PCA via :func:`~scanpy.tl.pca`, which also only uses features masked via `adata.var['highly_variable']`.
See further usage examples in the following tutorial:
1. :doc:`/notebooks/lung_example`
"""
def __init__(
self,
adata: AnnData,
batch_key: str,
label_key: str,
embedding_obsm_keys: List[str],
bio_conservation_metrics: Optional[BioConservation] = None,
batch_correction_metrics: Optional[BatchCorrection] = None,
pre_integrated_embedding_obsm_key: Optional[str] = None,
n_jobs: int = 1,
):
self._adata = adata
self._embedding_obsm_keys = embedding_obsm_keys
self._pre_integrated_embedding_obsm_key = pre_integrated_embedding_obsm_key
self._bio_conservation_metrics = bio_conservation_metrics if bio_conservation_metrics else BioConservation()
self._batch_correction_metrics = batch_correction_metrics if batch_correction_metrics else BatchCorrection()
self._results = pd.DataFrame(columns=list(self._embedding_obsm_keys) + [_METRIC_TYPE])
self._emb_adatas = {}
self._neighbor_values = (15, 50, 90)
self._prepared = False
self._benchmarked = False
self._batch_key = batch_key
self._label_key = label_key
self._n_jobs = n_jobs
self._metric_collection_dict = {
"Bio conservation": self._bio_conservation_metrics,
"Batch correction": self._batch_correction_metrics,
}
def prepare(self, neighbor_computer: Optional[Callable[[np.ndarray, int], NeighborsOutput]] = None) -> None:
"""Prepare the data for benchmarking.
Parameters
----------
neighbor_computer
Function that computes the neighbors of the data. If `None`, the neighbors will be computed
with :func:`~scib_metrics.utils.nearest_neighbors.pynndescent`. The function should take as input
the data and the number of neighbors to compute and return a :class:`~scib_metrics.utils.nearest_neighbors.NeighborsOutput`
object.
"""
# Compute PCA
if self._pre_integrated_embedding_obsm_key is None:
# This is how scib does it
# https://github.com/theislab/scib/blob/896f689e5fe8c57502cb012af06bed1a9b2b61d2/scib/metrics/pcr.py#L197
sc.tl.pca(self._adata, use_highly_variable=False)
self._pre_integrated_embedding_obsm_key = "X_pca"
for emb_key in self._embedding_obsm_keys:
self._emb_adatas[emb_key] = AnnData(self._adata.obsm[emb_key], obs=self._adata.obs)
self._emb_adatas[emb_key].obs[_BATCH] = np.asarray(self._adata.obs[self._batch_key].values)
self._emb_adatas[emb_key].obs[_LABELS] = np.asarray(self._adata.obs[self._label_key].values)
self._emb_adatas[emb_key].obsm[_X_PRE] = self._adata.obsm[self._pre_integrated_embedding_obsm_key]
# Compute neighbors
for ad in tqdm(self._emb_adatas.values(), desc="Computing neighbors"):
if neighbor_computer is not None:
neigh_output = neighbor_computer(ad.X, max(self._neighbor_values))
else:
neigh_output = pynndescent(
ad.X, n_neighbors=max(self._neighbor_values), random_state=0, n_jobs=self._n_jobs
)
indices, distances = neigh_output.indices, neigh_output.distances
for n in self._neighbor_values:
sp_distances, sp_conns = sc.neighbors._compute_connectivities_umap(
indices[:, :n], distances[:, :n], ad.n_obs, n_neighbors=n
)
ad.obsp[f"{n}_connectivities"] = sp_conns
ad.obsp[f"{n}_distances"] = sp_distances
self._prepared = True
def benchmark(self) -> None:
"""Run the pipeline."""
if self._benchmarked:
warnings.warn(
"The benchmark has already been run. Running it again will overwrite the previous results.",
UserWarning,
)
if not self._prepared:
self.prepare()
num_metrics = sum(
[sum([v is not False for v in asdict(met_col)]) for met_col in self._metric_collection_dict.values()]
)
for emb_key, ad in tqdm(self._emb_adatas.items(), desc="Embeddings", position=0, colour="green"):
pbar = tqdm(total=num_metrics, desc="Metrics", position=1, leave=False, colour="blue")
for metric_type, metric_collection in self._metric_collection_dict.items():
for metric_name, use_metric_or_kwargs in asdict(metric_collection).items():
if use_metric_or_kwargs:
pbar.set_postfix_str(f"{metric_type}: {metric_name}")
metric_fn = getattr(scib_metrics, metric_name)
if isinstance(use_metric_or_kwargs, dict):
# Kwargs in this case
metric_fn = partial(metric_fn, **use_metric_or_kwargs)
metric_value = getattr(MetricAnnDataAPI, metric_name)(ad, metric_fn)
# nmi/ari metrics return a dict
if isinstance(metric_value, dict):
for k, v in metric_value.items():
self._results.loc[f"{metric_name}_{k}", emb_key] = v
self._results.loc[f"{metric_name}_{k}", _METRIC_TYPE] = metric_type
else:
self._results.loc[metric_name, emb_key] = metric_value
self._results.loc[metric_name, _METRIC_TYPE] = metric_type
pbar.update(1)
self._benchmarked = True
def get_results(self, min_max_scale: bool = True, clean_names: bool = True) -> pd.DataFrame:
"""Return the benchmarking results.
Parameters
----------
min_max_scale
Whether to min max scale the results.
clean_names
Whether to clean the metric names.
Returns
-------
The benchmarking results.
"""
df = self._results.transpose()
df.index.name = "Embedding"
df = df.loc[df.index != _METRIC_TYPE]
if min_max_scale:
# Use sklearn to min max scale
df = pd.DataFrame(
MinMaxScaler().fit_transform(df),
columns=df.columns,
index=df.index,
)
if clean_names:
df = df.rename(columns=metric_name_cleaner)
df = df.transpose()
df[_METRIC_TYPE] = self._results[_METRIC_TYPE].values
# Compute scores
per_class_score = df.groupby(_METRIC_TYPE).mean().transpose()
# This is the default scIB weighting from the manuscript
per_class_score["Total"] = 0.4 * per_class_score["Batch correction"] + 0.6 * per_class_score["Bio conservation"]
df = pd.concat([df.transpose(), per_class_score], axis=1)
df.loc[_METRIC_TYPE, per_class_score.columns] = _AGGREGATE_SCORE
return df
def plot_results_table(
self, min_max_scale: bool = True, show: bool = True, save_dir: Optional[str] = None
) -> Table:
"""Plot the benchmarking results.
Parameters
----------
min_max_scale
Whether to min max scale the results.
show
Whether to show the plot.
save_dir
The directory to save the plot to. If `None`, the plot is not saved.
"""
num_embeds = len(self._embedding_obsm_keys)
cmap_fn = lambda col_data: normed_cmap(col_data, cmap=matplotlib.cm.PRGn, num_stds=2.5)
df = self.get_results(min_max_scale=min_max_scale)
# Do not want to plot what kind of metric it is
plot_df = df.drop(_METRIC_TYPE, axis=0)
# Sort by total score
plot_df = plot_df.sort_values(by="Total", ascending=False).astype(np.float64)
plot_df["Method"] = plot_df.index
# Split columns by metric type, using df as it doesn't have the new method col
score_cols = df.columns[df.loc[_METRIC_TYPE] == _AGGREGATE_SCORE]
other_cols = df.columns[df.loc[_METRIC_TYPE] != _AGGREGATE_SCORE]
column_definitions = [
ColumnDefinition("Method", width=1.5, textprops={"ha": "left", "weight": "bold"}),
]
# Circles for the metric values
column_definitions += [
ColumnDefinition(
col,
title=col.replace(" ", "\n", 1),
width=1,
textprops={
"ha": "center",
"bbox": {"boxstyle": "circle", "pad": 0.25},
},
cmap=cmap_fn(plot_df[col]),
group=df.loc[_METRIC_TYPE, col],
formatter="{:.2f}",
)
for i, col in enumerate(other_cols)
]
# Bars for the aggregate scores
column_definitions += [
ColumnDefinition(
col,
width=1,
title=col.replace(" ", "\n", 1),
plot_fn=bar,
plot_kw={
"cmap": matplotlib.cm.YlGnBu,
"plot_bg_bar": False,
"annotate": True,
"height": 0.9,
"formatter": "{:.2f}",
},
group=df.loc[_METRIC_TYPE, col],
border="left" if i == 0 else None,
)
for i, col in enumerate(score_cols)
]
# Allow to manipulate text post-hoc (in illustrator)
with matplotlib.rc_context({"svg.fonttype": "none"}):
fig, ax = plt.subplots(figsize=(len(df.columns) * 1.25, 3 + 0.3 * num_embeds))
tab = Table(
plot_df,
cell_kw={
"linewidth": 0,
"edgecolor": "k",
},
column_definitions=column_definitions,
ax=ax,
row_dividers=True,
footer_divider=True,
textprops={"fontsize": 10, "ha": "center"},
row_divider_kw={"linewidth": 1, "linestyle": (0, (1, 5))},
col_label_divider_kw={"linewidth": 1, "linestyle": "-"},
column_border_kw={"linewidth": 1, "linestyle": "-"},
index_col="Method",
).autoset_fontcolors(colnames=plot_df.columns)
if show:
plt.show()
if save_dir is not None:
fig.savefig(os.path.join(save_dir, "scib_results.svg"), facecolor=ax.get_facecolor(), dpi=300)
return tab
| 0.913857 | 0.370595 |
from typing import Optional
import jax
import jax.numpy as jnp
import numpy as np
import pandas as pd
from jax import jit
from scib_metrics._types import NdArray
from ._pca import pca
from ._utils import one_hot
def principal_component_regression(
X: NdArray,
covariate: NdArray,
categorical: bool = False,
n_components: Optional[int] = None,
) -> float:
"""Principal component regression (PCR) :cite:p:`buttner2018`.
Parameters
----------
X
Array of shape (n_cells, n_features).
covariate
Array of shape (n_cells,) or (n_cells, 1) representing batch/covariate values.
categorical
If True, batch will be treated as categorical and one-hot encoded.
n_components:
Number of components to compute, passed into :func:`~scib_metrics.utils.pca`.
If None, all components are used.
Returns
-------
pcr: float
Principal component regression using the first n_components principal components.
"""
if len(X.shape) != 2:
raise ValueError("Dimension mismatch: X must be 2-dimensional.")
if X.shape[0] != covariate.shape[0]:
raise ValueError("Dimension mismatch: X and batch must have the same number of samples.")
if categorical:
covariate = np.asarray(pd.Categorical(covariate).codes)
else:
covariate = np.asarray(covariate)
covariate = one_hot(covariate) if categorical else covariate.reshape((covariate.shape[0], 1))
pca_results = pca(X, n_components=n_components)
# Center inputs for no intercept
covariate = covariate - jnp.mean(covariate, axis=0)
pcr = _pcr(pca_results.coordinates, covariate, pca_results.variance)
return float(pcr)
@jit
def _pcr(
X_pca: NdArray,
covariate: NdArray,
var: NdArray,
) -> NdArray:
"""Principal component regression.
Parameters
----------
X_pca
Array of shape (n_cells, n_components) containing PCA coordinates. Must be standardized.
covariate
Array of shape (n_cells, 1) or (n_cells, n_classes) containing batch/covariate values. Must be standardized
if not categorical (one-hot).
var
Array of shape (n_components,) containing the explained variance of each PC.
"""
def r2(pc, batch):
residual_sum = jnp.linalg.lstsq(batch, pc)[1]
total_sum = jnp.sum((pc - jnp.mean(pc)) ** 2)
return jnp.maximum(0, 1 - residual_sum / total_sum)
# Index PCs on axis = 1, don't index batch
r2_ = jax.vmap(r2, in_axes=(1, None))(X_pca, covariate)
return jnp.dot(jnp.ravel(r2_), var) / jnp.sum(var)
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_pcr.py
|
_pcr.py
|
from typing import Optional
import jax
import jax.numpy as jnp
import numpy as np
import pandas as pd
from jax import jit
from scib_metrics._types import NdArray
from ._pca import pca
from ._utils import one_hot
def principal_component_regression(
X: NdArray,
covariate: NdArray,
categorical: bool = False,
n_components: Optional[int] = None,
) -> float:
"""Principal component regression (PCR) :cite:p:`buttner2018`.
Parameters
----------
X
Array of shape (n_cells, n_features).
covariate
Array of shape (n_cells,) or (n_cells, 1) representing batch/covariate values.
categorical
If True, batch will be treated as categorical and one-hot encoded.
n_components:
Number of components to compute, passed into :func:`~scib_metrics.utils.pca`.
If None, all components are used.
Returns
-------
pcr: float
Principal component regression using the first n_components principal components.
"""
if len(X.shape) != 2:
raise ValueError("Dimension mismatch: X must be 2-dimensional.")
if X.shape[0] != covariate.shape[0]:
raise ValueError("Dimension mismatch: X and batch must have the same number of samples.")
if categorical:
covariate = np.asarray(pd.Categorical(covariate).codes)
else:
covariate = np.asarray(covariate)
covariate = one_hot(covariate) if categorical else covariate.reshape((covariate.shape[0], 1))
pca_results = pca(X, n_components=n_components)
# Center inputs for no intercept
covariate = covariate - jnp.mean(covariate, axis=0)
pcr = _pcr(pca_results.coordinates, covariate, pca_results.variance)
return float(pcr)
@jit
def _pcr(
X_pca: NdArray,
covariate: NdArray,
var: NdArray,
) -> NdArray:
"""Principal component regression.
Parameters
----------
X_pca
Array of shape (n_cells, n_components) containing PCA coordinates. Must be standardized.
covariate
Array of shape (n_cells, 1) or (n_cells, n_classes) containing batch/covariate values. Must be standardized
if not categorical (one-hot).
var
Array of shape (n_components,) containing the explained variance of each PC.
"""
def r2(pc, batch):
residual_sum = jnp.linalg.lstsq(batch, pc)[1]
total_sum = jnp.sum((pc - jnp.mean(pc)) ** 2)
return jnp.maximum(0, 1 - residual_sum / total_sum)
# Index PCs on axis = 1, don't index batch
r2_ = jax.vmap(r2, in_axes=(1, None))(X_pca, covariate)
return jnp.dot(jnp.ravel(r2_), var) / jnp.sum(var)
| 0.958876 | 0.484441 |
from functools import partial
from typing import Tuple, Union
import chex
import jax
import jax.numpy as jnp
import numpy as np
from ._utils import get_ndarray
NdArray = Union[np.ndarray, jnp.ndarray]
@chex.dataclass
class _NeighborProbabilityState:
H: float
P: chex.ArrayDevice
Hdiff: float
beta: float
betamin: float
betamax: float
tries: int
@jax.jit
def _Hbeta(knn_dists_row: jnp.ndarray, beta: float) -> Tuple[jnp.ndarray, jnp.ndarray]:
P = jnp.exp(-knn_dists_row * beta)
sumP = jnp.nansum(P)
H = jnp.where(sumP == 0, 0, jnp.log(sumP) + beta * jnp.nansum(knn_dists_row * P) / sumP)
P = jnp.where(sumP == 0, jnp.zeros_like(knn_dists_row), P / sumP)
return H, P
@jax.jit
def _get_neighbor_probability(
knn_dists_row: jnp.ndarray, perplexity: float, tol: float
) -> Tuple[jnp.ndarray, jnp.ndarray]:
beta = 1
betamin = -jnp.inf
betamax = jnp.inf
H, P = _Hbeta(knn_dists_row, beta)
Hdiff = H - jnp.log(perplexity)
def _get_neighbor_probability_step(state):
Hdiff = state.Hdiff
beta = state.beta
betamin = state.betamin
betamax = state.betamax
tries = state.tries
new_betamin = jnp.where(Hdiff > 0, beta, betamin)
new_betamax = jnp.where(Hdiff > 0, betamax, beta)
new_beta = jnp.where(
Hdiff > 0,
jnp.where(betamax == jnp.inf, beta * 2, (beta + betamax) / 2),
jnp.where(betamin == -jnp.inf, beta / 2, (beta + betamin) / 2),
)
new_H, new_P = _Hbeta(knn_dists_row, new_beta)
new_Hdiff = new_H - jnp.log(perplexity)
return _NeighborProbabilityState(
H=new_H, P=new_P, Hdiff=new_Hdiff, beta=new_beta, betamin=new_betamin, betamax=new_betamax, tries=tries + 1
)
def _get_neighbor_probability_convergence(state):
Hdiff, tries = state.Hdiff, state.tries
return jnp.logical_and(jnp.abs(Hdiff) > tol, tries < 50)
init_state = _NeighborProbabilityState(H=H, P=P, Hdiff=Hdiff, beta=beta, betamin=betamin, betamax=betamax, tries=0)
final_state = jax.lax.while_loop(_get_neighbor_probability_convergence, _get_neighbor_probability_step, init_state)
return final_state.H, final_state.P
def _compute_simpson_index_cell(
knn_dists_row: jnp.ndarray, knn_labels_row: jnp.ndarray, n_batches: int, perplexity: float, tol: float
) -> jnp.ndarray:
H, P = _get_neighbor_probability(knn_dists_row, perplexity, tol)
def _non_zero_H_simpson():
sumP = jnp.bincount(knn_labels_row, weights=P, length=n_batches)
return jnp.where(knn_labels_row.shape[0] == P.shape[0], jnp.dot(sumP, sumP), 1)
return jnp.where(H == 0, -1, _non_zero_H_simpson())
def compute_simpson_index(
knn_dists: NdArray,
knn_idx: NdArray,
labels: NdArray,
n_labels: int,
perplexity: float = 30,
tol: float = 1e-5,
) -> np.ndarray:
"""Compute the Simpson index for each cell.
Parameters
----------
knn_dists
KNN distances of size (n_cells, n_neighbors).
knn_idx
KNN indices of size (n_cells, n_neighbors) corresponding to distances.
labels
Cell labels of size (n_cells,).
n_labels
Number of labels.
perplexity
Measure of the effective number of neighbors.
tol
Tolerance for binary search.
Returns
-------
simpson_index
Simpson index of size (n_cells,).
"""
knn_dists = jnp.array(knn_dists)
knn_idx = jnp.array(knn_idx)
labels = jnp.array(labels)
knn_labels = labels[knn_idx]
simpson_fn = partial(_compute_simpson_index_cell, n_batches=n_labels, perplexity=perplexity, tol=tol)
out = jax.vmap(simpson_fn)(knn_dists, knn_labels)
return get_ndarray(out)
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_lisi.py
|
_lisi.py
|
from functools import partial
from typing import Tuple, Union
import chex
import jax
import jax.numpy as jnp
import numpy as np
from ._utils import get_ndarray
NdArray = Union[np.ndarray, jnp.ndarray]
@chex.dataclass
class _NeighborProbabilityState:
H: float
P: chex.ArrayDevice
Hdiff: float
beta: float
betamin: float
betamax: float
tries: int
@jax.jit
def _Hbeta(knn_dists_row: jnp.ndarray, beta: float) -> Tuple[jnp.ndarray, jnp.ndarray]:
P = jnp.exp(-knn_dists_row * beta)
sumP = jnp.nansum(P)
H = jnp.where(sumP == 0, 0, jnp.log(sumP) + beta * jnp.nansum(knn_dists_row * P) / sumP)
P = jnp.where(sumP == 0, jnp.zeros_like(knn_dists_row), P / sumP)
return H, P
@jax.jit
def _get_neighbor_probability(
knn_dists_row: jnp.ndarray, perplexity: float, tol: float
) -> Tuple[jnp.ndarray, jnp.ndarray]:
beta = 1
betamin = -jnp.inf
betamax = jnp.inf
H, P = _Hbeta(knn_dists_row, beta)
Hdiff = H - jnp.log(perplexity)
def _get_neighbor_probability_step(state):
Hdiff = state.Hdiff
beta = state.beta
betamin = state.betamin
betamax = state.betamax
tries = state.tries
new_betamin = jnp.where(Hdiff > 0, beta, betamin)
new_betamax = jnp.where(Hdiff > 0, betamax, beta)
new_beta = jnp.where(
Hdiff > 0,
jnp.where(betamax == jnp.inf, beta * 2, (beta + betamax) / 2),
jnp.where(betamin == -jnp.inf, beta / 2, (beta + betamin) / 2),
)
new_H, new_P = _Hbeta(knn_dists_row, new_beta)
new_Hdiff = new_H - jnp.log(perplexity)
return _NeighborProbabilityState(
H=new_H, P=new_P, Hdiff=new_Hdiff, beta=new_beta, betamin=new_betamin, betamax=new_betamax, tries=tries + 1
)
def _get_neighbor_probability_convergence(state):
Hdiff, tries = state.Hdiff, state.tries
return jnp.logical_and(jnp.abs(Hdiff) > tol, tries < 50)
init_state = _NeighborProbabilityState(H=H, P=P, Hdiff=Hdiff, beta=beta, betamin=betamin, betamax=betamax, tries=0)
final_state = jax.lax.while_loop(_get_neighbor_probability_convergence, _get_neighbor_probability_step, init_state)
return final_state.H, final_state.P
def _compute_simpson_index_cell(
knn_dists_row: jnp.ndarray, knn_labels_row: jnp.ndarray, n_batches: int, perplexity: float, tol: float
) -> jnp.ndarray:
H, P = _get_neighbor_probability(knn_dists_row, perplexity, tol)
def _non_zero_H_simpson():
sumP = jnp.bincount(knn_labels_row, weights=P, length=n_batches)
return jnp.where(knn_labels_row.shape[0] == P.shape[0], jnp.dot(sumP, sumP), 1)
return jnp.where(H == 0, -1, _non_zero_H_simpson())
def compute_simpson_index(
knn_dists: NdArray,
knn_idx: NdArray,
labels: NdArray,
n_labels: int,
perplexity: float = 30,
tol: float = 1e-5,
) -> np.ndarray:
"""Compute the Simpson index for each cell.
Parameters
----------
knn_dists
KNN distances of size (n_cells, n_neighbors).
knn_idx
KNN indices of size (n_cells, n_neighbors) corresponding to distances.
labels
Cell labels of size (n_cells,).
n_labels
Number of labels.
perplexity
Measure of the effective number of neighbors.
tol
Tolerance for binary search.
Returns
-------
simpson_index
Simpson index of size (n_cells,).
"""
knn_dists = jnp.array(knn_dists)
knn_idx = jnp.array(knn_idx)
labels = jnp.array(labels)
knn_labels = labels[knn_idx]
simpson_fn = partial(_compute_simpson_index_cell, n_batches=n_labels, perplexity=perplexity, tol=tol)
out = jax.vmap(simpson_fn)(knn_dists, knn_labels)
return get_ndarray(out)
| 0.935626 | 0.644267 |
from functools import partial
from typing import Literal
import jax
import jax.numpy as jnp
import numpy as np
from sklearn.utils import check_array
from scib_metrics._types import IntOrKey
from ._dist import cdist
from ._utils import get_ndarray, validate_seed
def _initialize_random(X: jnp.ndarray, n_clusters: int, key: jax.random.KeyArray) -> jnp.ndarray:
"""Initialize cluster centroids randomly."""
n_obs = X.shape[0]
indices = jax.random.choice(key, n_obs, (n_clusters,), replace=False)
initial_state = X[indices]
return initial_state
@partial(jax.jit, static_argnums=1)
def _initialize_plus_plus(X: jnp.ndarray, n_clusters: int, key: jax.random.KeyArray) -> jnp.ndarray:
"""Initialize cluster centroids with k-means++ algorithm."""
n_obs = X.shape[0]
key, subkey = jax.random.split(key)
initial_centroid_idx = jax.random.choice(subkey, n_obs, (1,), replace=False)
initial_centroid = X[initial_centroid_idx].ravel()
dist_sq = jnp.square(cdist(initial_centroid[jnp.newaxis, :], X)).ravel()
initial_state = {"min_dist_sq": dist_sq, "centroid": initial_centroid, "key": key}
n_local_trials = 2 + int(np.log(n_clusters))
def _step(state, _):
prob = state["min_dist_sq"] / jnp.sum(state["min_dist_sq"])
# note that observations already chosen as centers will have 0 probability
# and will not be chosen again
state["key"], subkey = jax.random.split(state["key"])
next_centroid_idx_candidates = jax.random.choice(subkey, n_obs, (n_local_trials,), replace=False, p=prob)
next_centroid_candidates = X[next_centroid_idx_candidates]
# candidates by observations
dist_sq_candidates = jnp.square(cdist(next_centroid_candidates, X))
dist_sq_candidates = jnp.minimum(state["min_dist_sq"][jnp.newaxis, :], dist_sq_candidates)
candidates_pot = dist_sq_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = jnp.argmin(candidates_pot)
min_dist_sq = dist_sq_candidates[best_candidate]
best_candidate = next_centroid_idx_candidates[best_candidate]
state["min_dist_sq"] = min_dist_sq.ravel()
state["centroid"] = X[best_candidate].ravel()
return state, state["centroid"]
_, centroids = jax.lax.scan(_step, initial_state, jnp.arange(n_clusters - 1))
return centroids
@jax.jit
def _get_dist_labels(X: jnp.ndarray, centroids: jnp.ndarray) -> jnp.ndarray:
"""Get the distance and labels for each observation."""
dist = cdist(X, centroids)
labels = jnp.argmin(dist, axis=1)
return dist, labels
class KMeans:
"""Jax implementation of :class:`sklearn.cluster.KMeans`.
This implementation is limited to Euclidean distance.
Parameters
----------
n_clusters
Number of clusters.
init
Cluster centroid initialization method. One of the following:
* ``'k-means++'``: Sample initial cluster centroids based on an
empirical distribution of the points' contributions to the
overall inertia.
* ``'random'``: Uniformly sample observations as initial centroids
n_init
Number of times the k-means algorithm will be initialized.
max_iter
Maximum number of iterations of the k-means algorithm for a single run.
tol
Relative tolerance with regards to inertia to declare convergence.
seed
Random seed.
"""
def __init__(
self,
n_clusters: int = 8,
init: Literal["k-means++", "random"] = "k-means++",
n_init: int = 10,
max_iter: int = 300,
tol: float = 1e-4,
seed: IntOrKey = 0,
):
self.n_clusters = n_clusters
self.n_init = n_init
self.max_iter = max_iter
self.tol = tol
self.seed: jax.random.KeyArray = validate_seed(seed)
if init not in ["k-means++", "random"]:
raise ValueError("Invalid init method, must be one of ['k-means++' or 'random'].")
if init == "k-means++":
self._initialize = _initialize_plus_plus
else:
self._initialize = _initialize_random
def fit(self, X: np.ndarray):
"""Fit the model to the data."""
X = check_array(X, dtype=np.float32, order="C")
# Subtract mean for numerical accuracy
mean = X.mean(axis=0)
X -= mean
self._fit(X)
X += mean
self.cluster_centroids_ += mean
return self
def _fit(self, X: np.ndarray):
all_centroids, all_inertias = jax.lax.map(
lambda key: self._kmeans_full_run(X, key), jax.random.split(self.seed, self.n_init)
)
i = jnp.argmin(all_inertias)
self.cluster_centroids_ = get_ndarray(all_centroids[i])
self.inertia_ = get_ndarray(all_inertias[i])
_, labels = _get_dist_labels(X, self.cluster_centroids_)
self.labels_ = get_ndarray(labels)
@partial(jax.jit, static_argnums=(0,))
def _kmeans_full_run(self, X: jnp.ndarray, key: jnp.ndarray) -> jnp.ndarray:
def _kmeans_step(state):
old_inertia = state[1]
centroids, _, _, n_iter = state
# TODO(adamgayoso): Efficiently compute argmin and min simultaneously.
dist, new_labels = _get_dist_labels(X, centroids)
# From https://colab.research.google.com/drive/1AwS4haUx6swF82w3nXr6QKhajdF8aSvA?usp=sharing
counts = (new_labels[jnp.newaxis, :] == jnp.arange(self.n_clusters)[:, jnp.newaxis]).sum(
axis=1, keepdims=True
)
counts = jnp.clip(counts, a_min=1, a_max=None)
# Sum over points in a centroid by zeroing others out
new_centroids = (
jnp.sum(
jnp.where(
# axes: (data points, clusters, data dimension)
new_labels[:, jnp.newaxis, jnp.newaxis]
== jnp.arange(self.n_clusters)[jnp.newaxis, :, jnp.newaxis],
X[:, jnp.newaxis, :],
0.0,
),
axis=0,
)
/ counts
)
new_inertia = jnp.mean(jnp.min(dist, axis=1))
n_iter = n_iter + 1
return new_centroids, new_inertia, old_inertia, n_iter
def _kmeans_convergence(state):
_, new_inertia, old_inertia, n_iter = state
cond1 = jnp.abs(old_inertia - new_inertia) < self.tol
cond2 = n_iter > self.max_iter
return jnp.logical_or(cond1, cond2)[0]
centroids = self._initialize(X, self.n_clusters, key)
# centroids, new_inertia, old_inertia, n_iter
state = (centroids, jnp.inf, jnp.inf, jnp.array([0.0]))
state = _kmeans_step(state)
state = jax.lax.while_loop(_kmeans_convergence, _kmeans_step, state)
return state[0], state[1]
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_kmeans.py
|
_kmeans.py
|
from functools import partial
from typing import Literal
import jax
import jax.numpy as jnp
import numpy as np
from sklearn.utils import check_array
from scib_metrics._types import IntOrKey
from ._dist import cdist
from ._utils import get_ndarray, validate_seed
def _initialize_random(X: jnp.ndarray, n_clusters: int, key: jax.random.KeyArray) -> jnp.ndarray:
"""Initialize cluster centroids randomly."""
n_obs = X.shape[0]
indices = jax.random.choice(key, n_obs, (n_clusters,), replace=False)
initial_state = X[indices]
return initial_state
@partial(jax.jit, static_argnums=1)
def _initialize_plus_plus(X: jnp.ndarray, n_clusters: int, key: jax.random.KeyArray) -> jnp.ndarray:
"""Initialize cluster centroids with k-means++ algorithm."""
n_obs = X.shape[0]
key, subkey = jax.random.split(key)
initial_centroid_idx = jax.random.choice(subkey, n_obs, (1,), replace=False)
initial_centroid = X[initial_centroid_idx].ravel()
dist_sq = jnp.square(cdist(initial_centroid[jnp.newaxis, :], X)).ravel()
initial_state = {"min_dist_sq": dist_sq, "centroid": initial_centroid, "key": key}
n_local_trials = 2 + int(np.log(n_clusters))
def _step(state, _):
prob = state["min_dist_sq"] / jnp.sum(state["min_dist_sq"])
# note that observations already chosen as centers will have 0 probability
# and will not be chosen again
state["key"], subkey = jax.random.split(state["key"])
next_centroid_idx_candidates = jax.random.choice(subkey, n_obs, (n_local_trials,), replace=False, p=prob)
next_centroid_candidates = X[next_centroid_idx_candidates]
# candidates by observations
dist_sq_candidates = jnp.square(cdist(next_centroid_candidates, X))
dist_sq_candidates = jnp.minimum(state["min_dist_sq"][jnp.newaxis, :], dist_sq_candidates)
candidates_pot = dist_sq_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = jnp.argmin(candidates_pot)
min_dist_sq = dist_sq_candidates[best_candidate]
best_candidate = next_centroid_idx_candidates[best_candidate]
state["min_dist_sq"] = min_dist_sq.ravel()
state["centroid"] = X[best_candidate].ravel()
return state, state["centroid"]
_, centroids = jax.lax.scan(_step, initial_state, jnp.arange(n_clusters - 1))
return centroids
@jax.jit
def _get_dist_labels(X: jnp.ndarray, centroids: jnp.ndarray) -> jnp.ndarray:
"""Get the distance and labels for each observation."""
dist = cdist(X, centroids)
labels = jnp.argmin(dist, axis=1)
return dist, labels
class KMeans:
"""Jax implementation of :class:`sklearn.cluster.KMeans`.
This implementation is limited to Euclidean distance.
Parameters
----------
n_clusters
Number of clusters.
init
Cluster centroid initialization method. One of the following:
* ``'k-means++'``: Sample initial cluster centroids based on an
empirical distribution of the points' contributions to the
overall inertia.
* ``'random'``: Uniformly sample observations as initial centroids
n_init
Number of times the k-means algorithm will be initialized.
max_iter
Maximum number of iterations of the k-means algorithm for a single run.
tol
Relative tolerance with regards to inertia to declare convergence.
seed
Random seed.
"""
def __init__(
self,
n_clusters: int = 8,
init: Literal["k-means++", "random"] = "k-means++",
n_init: int = 10,
max_iter: int = 300,
tol: float = 1e-4,
seed: IntOrKey = 0,
):
self.n_clusters = n_clusters
self.n_init = n_init
self.max_iter = max_iter
self.tol = tol
self.seed: jax.random.KeyArray = validate_seed(seed)
if init not in ["k-means++", "random"]:
raise ValueError("Invalid init method, must be one of ['k-means++' or 'random'].")
if init == "k-means++":
self._initialize = _initialize_plus_plus
else:
self._initialize = _initialize_random
def fit(self, X: np.ndarray):
"""Fit the model to the data."""
X = check_array(X, dtype=np.float32, order="C")
# Subtract mean for numerical accuracy
mean = X.mean(axis=0)
X -= mean
self._fit(X)
X += mean
self.cluster_centroids_ += mean
return self
def _fit(self, X: np.ndarray):
all_centroids, all_inertias = jax.lax.map(
lambda key: self._kmeans_full_run(X, key), jax.random.split(self.seed, self.n_init)
)
i = jnp.argmin(all_inertias)
self.cluster_centroids_ = get_ndarray(all_centroids[i])
self.inertia_ = get_ndarray(all_inertias[i])
_, labels = _get_dist_labels(X, self.cluster_centroids_)
self.labels_ = get_ndarray(labels)
@partial(jax.jit, static_argnums=(0,))
def _kmeans_full_run(self, X: jnp.ndarray, key: jnp.ndarray) -> jnp.ndarray:
def _kmeans_step(state):
old_inertia = state[1]
centroids, _, _, n_iter = state
# TODO(adamgayoso): Efficiently compute argmin and min simultaneously.
dist, new_labels = _get_dist_labels(X, centroids)
# From https://colab.research.google.com/drive/1AwS4haUx6swF82w3nXr6QKhajdF8aSvA?usp=sharing
counts = (new_labels[jnp.newaxis, :] == jnp.arange(self.n_clusters)[:, jnp.newaxis]).sum(
axis=1, keepdims=True
)
counts = jnp.clip(counts, a_min=1, a_max=None)
# Sum over points in a centroid by zeroing others out
new_centroids = (
jnp.sum(
jnp.where(
# axes: (data points, clusters, data dimension)
new_labels[:, jnp.newaxis, jnp.newaxis]
== jnp.arange(self.n_clusters)[jnp.newaxis, :, jnp.newaxis],
X[:, jnp.newaxis, :],
0.0,
),
axis=0,
)
/ counts
)
new_inertia = jnp.mean(jnp.min(dist, axis=1))
n_iter = n_iter + 1
return new_centroids, new_inertia, old_inertia, n_iter
def _kmeans_convergence(state):
_, new_inertia, old_inertia, n_iter = state
cond1 = jnp.abs(old_inertia - new_inertia) < self.tol
cond2 = n_iter > self.max_iter
return jnp.logical_or(cond1, cond2)[0]
centroids = self._initialize(X, self.n_clusters, key)
# centroids, new_inertia, old_inertia, n_iter
state = (centroids, jnp.inf, jnp.inf, jnp.array([0.0]))
state = _kmeans_step(state)
state = jax.lax.while_loop(_kmeans_convergence, _kmeans_step, state)
return state[0], state[1]
| 0.880026 | 0.44071 |
from typing import Optional, Tuple
import jax.numpy as jnp
from chex import dataclass
from jax import jit
from scib_metrics._types import NdArray
from ._utils import get_ndarray
@dataclass
class _SVDResult:
"""SVD result.
Attributes
----------
u
Array of shape (n_cells, n_components) containing the left singular vectors.
s
Array of shape (n_components,) containing the singular values.
v
Array of shape (n_components, n_features) containing the right singular vectors.
"""
u: NdArray
s: NdArray
v: NdArray
@dataclass
class _PCAResult:
"""PCA result.
Attributes
----------
coordinates
Array of shape (n_cells, n_components) containing the PCA coordinates.
components
Array of shape (n_components, n_features) containing the PCA components.
variance
Array of shape (n_components,) containing the explained variance of each PC.
variance_ratio
Array of shape (n_components,) containing the explained variance ratio of each PC.
svd
Dataclass containing the SVD data.
"""
coordinates: NdArray
components: NdArray
variance: NdArray
variance_ratio: NdArray
svd: Optional[_SVDResult] = None
def _svd_flip(
u: NdArray,
v: NdArray,
u_based_decision: bool = True,
):
"""Sign correction to ensure deterministic output from SVD.
Jax implementation of :func:`~sklearn.utils.extmath.svd_flip`.
Parameters
----------
u
Left singular vectors of shape (M, K).
v
Right singular vectors of shape (K, N).
u_based_decision
If True, use the columns of u as the basis for sign flipping.
"""
if u_based_decision:
max_abs_cols = jnp.argmax(jnp.abs(u), axis=0)
signs = jnp.sign(u[max_abs_cols, jnp.arange(u.shape[1])])
else:
max_abs_rows = jnp.argmax(jnp.abs(v), axis=1)
signs = jnp.sign(v[jnp.arange(v.shape[0]), max_abs_rows])
u_ = u * signs
v_ = v * signs[:, None]
return u_, v_
def pca(
X: NdArray,
n_components: Optional[int] = None,
return_svd: bool = False,
) -> _PCAResult:
"""Principal component analysis (PCA).
Parameters
----------
X
Array of shape (n_cells, n_features).
n_components
Number of components to keep. If None, all components are kept.
return_svd
If True, also return the results from SVD.
Returns
-------
results: _PCAData
"""
max_components = min(X.shape)
if n_components and n_components > max_components:
raise ValueError(f"n_components = {n_components} must be <= min(n_cells, n_features) = {max_components}")
n_components = n_components or max_components
u, s, v, variance, variance_ratio = _pca(X)
# Select n_components
coordinates = u[:, :n_components] * s[:n_components]
components = v[:n_components]
variance_ = variance[:n_components]
variance_ratio_ = variance_ratio[:n_components]
results = _PCAResult(
coordinates=get_ndarray(coordinates),
components=get_ndarray(components),
variance=get_ndarray(variance_),
variance_ratio=get_ndarray(variance_ratio_),
svd=_SVDResult(u=get_ndarray(u), s=get_ndarray(s), v=get_ndarray(v)) if return_svd else None,
)
return results
@jit
def _pca(
X: NdArray,
) -> Tuple[NdArray, NdArray, NdArray, NdArray, NdArray]:
"""Principal component analysis.
Parameters
----------
X
Array of shape (n_cells, n_features).
Returns
-------
u: NdArray
Left singular vectors of shape (M, K).
s: NdArray
Singular values of shape (K,).
v: NdArray
Right singular vectors of shape (K, N).
variance: NdArray
Array of shape (K,) containing the explained variance of each PC.
variance_ratio: NdArray
Array of shape (K,) containing the explained variance ratio of each PC.
"""
X_ = X - jnp.mean(X, axis=0)
u, s, v = jnp.linalg.svd(X_, full_matrices=False)
u, v = _svd_flip(u, v)
variance = (s**2) / (X.shape[0] - 1)
total_variance = jnp.sum(variance)
variance_ratio = variance / total_variance
return u, s, v, variance, variance_ratio
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_pca.py
|
_pca.py
|
from typing import Optional, Tuple
import jax.numpy as jnp
from chex import dataclass
from jax import jit
from scib_metrics._types import NdArray
from ._utils import get_ndarray
@dataclass
class _SVDResult:
"""SVD result.
Attributes
----------
u
Array of shape (n_cells, n_components) containing the left singular vectors.
s
Array of shape (n_components,) containing the singular values.
v
Array of shape (n_components, n_features) containing the right singular vectors.
"""
u: NdArray
s: NdArray
v: NdArray
@dataclass
class _PCAResult:
"""PCA result.
Attributes
----------
coordinates
Array of shape (n_cells, n_components) containing the PCA coordinates.
components
Array of shape (n_components, n_features) containing the PCA components.
variance
Array of shape (n_components,) containing the explained variance of each PC.
variance_ratio
Array of shape (n_components,) containing the explained variance ratio of each PC.
svd
Dataclass containing the SVD data.
"""
coordinates: NdArray
components: NdArray
variance: NdArray
variance_ratio: NdArray
svd: Optional[_SVDResult] = None
def _svd_flip(
u: NdArray,
v: NdArray,
u_based_decision: bool = True,
):
"""Sign correction to ensure deterministic output from SVD.
Jax implementation of :func:`~sklearn.utils.extmath.svd_flip`.
Parameters
----------
u
Left singular vectors of shape (M, K).
v
Right singular vectors of shape (K, N).
u_based_decision
If True, use the columns of u as the basis for sign flipping.
"""
if u_based_decision:
max_abs_cols = jnp.argmax(jnp.abs(u), axis=0)
signs = jnp.sign(u[max_abs_cols, jnp.arange(u.shape[1])])
else:
max_abs_rows = jnp.argmax(jnp.abs(v), axis=1)
signs = jnp.sign(v[jnp.arange(v.shape[0]), max_abs_rows])
u_ = u * signs
v_ = v * signs[:, None]
return u_, v_
def pca(
X: NdArray,
n_components: Optional[int] = None,
return_svd: bool = False,
) -> _PCAResult:
"""Principal component analysis (PCA).
Parameters
----------
X
Array of shape (n_cells, n_features).
n_components
Number of components to keep. If None, all components are kept.
return_svd
If True, also return the results from SVD.
Returns
-------
results: _PCAData
"""
max_components = min(X.shape)
if n_components and n_components > max_components:
raise ValueError(f"n_components = {n_components} must be <= min(n_cells, n_features) = {max_components}")
n_components = n_components or max_components
u, s, v, variance, variance_ratio = _pca(X)
# Select n_components
coordinates = u[:, :n_components] * s[:n_components]
components = v[:n_components]
variance_ = variance[:n_components]
variance_ratio_ = variance_ratio[:n_components]
results = _PCAResult(
coordinates=get_ndarray(coordinates),
components=get_ndarray(components),
variance=get_ndarray(variance_),
variance_ratio=get_ndarray(variance_ratio_),
svd=_SVDResult(u=get_ndarray(u), s=get_ndarray(s), v=get_ndarray(v)) if return_svd else None,
)
return results
@jit
def _pca(
X: NdArray,
) -> Tuple[NdArray, NdArray, NdArray, NdArray, NdArray]:
"""Principal component analysis.
Parameters
----------
X
Array of shape (n_cells, n_features).
Returns
-------
u: NdArray
Left singular vectors of shape (M, K).
s: NdArray
Singular values of shape (K,).
v: NdArray
Right singular vectors of shape (K, N).
variance: NdArray
Array of shape (K,) containing the explained variance of each PC.
variance_ratio: NdArray
Array of shape (K,) containing the explained variance ratio of each PC.
"""
X_ = X - jnp.mean(X, axis=0)
u, s, v = jnp.linalg.svd(X_, full_matrices=False)
u, v = _svd_flip(u, v)
variance = (s**2) / (X.shape[0] - 1)
total_variance = jnp.sum(variance)
variance_ratio = variance / total_variance
return u, s, v, variance, variance_ratio
| 0.975012 | 0.662223 |
import logging
from typing import Literal
import numpy as np
import pynndescent
import scipy
from scipy.sparse import csr_matrix, issparse
logger = logging.getLogger(__name__)
def _compute_transitions(X: csr_matrix, density_normalize: bool = True):
"""Code from scanpy.
https://github.com/scverse/scanpy/blob/2e98705347ea484c36caa9ba10de1987b09081bf/scanpy/neighbors/__init__.py#L899
"""
# TODO(adamgayoso): Refactor this with Jax
# density normalization as of Coifman et al. (2005)
# ensures that kernel matrix is independent of sampling density
if density_normalize:
# q[i] is an estimate for the sampling density at point i
# it's also the degree of the underlying graph
q = np.asarray(X.sum(axis=0))
if not issparse(X):
Q = np.diag(1.0 / q)
else:
Q = scipy.sparse.spdiags(1.0 / q, 0, X.shape[0], X.shape[0])
K = Q @ X @ Q
else:
K = X
# z[i] is the square root of the row sum of K
z = np.sqrt(np.asarray(K.sum(axis=0)))
if not issparse(K):
Z = np.diag(1.0 / z)
else:
Z = scipy.sparse.spdiags(1.0 / z, 0, K.shape[0], K.shape[0])
transitions_sym = Z @ K @ Z
return transitions_sym
def _compute_eigen(
transitions_sym: csr_matrix,
n_comps: int = 15,
sort: Literal["decrease", "increase"] = "decrease",
):
"""Compute eigen decomposition of transition matrix.
https://github.com/scverse/scanpy/blob/2e98705347ea484c36caa9ba10de1987b09081bf/scanpy/neighbors/__init__.py
"""
# TODO(adamgayoso): Refactor this with Jax
matrix = transitions_sym
# compute the spectrum
if n_comps == 0:
evals, evecs = scipy.linalg.eigh(matrix)
else:
n_comps = min(matrix.shape[0] - 1, n_comps)
# ncv = max(2 * n_comps + 1, int(np.sqrt(matrix.shape[0])))
ncv = None
which = "LM" if sort == "decrease" else "SM"
# it pays off to increase the stability with a bit more precision
matrix = matrix.astype(np.float64)
evals, evecs = scipy.sparse.linalg.eigsh(matrix, k=n_comps, which=which, ncv=ncv)
evals, evecs = evals.astype(np.float32), evecs.astype(np.float32)
if sort == "decrease":
evals = evals[::-1]
evecs = evecs[:, ::-1]
return evals, evecs
def _get_sparse_matrix_from_indices_distances_numpy(indices, distances, n_obs, n_neighbors):
"""Code from scanpy."""
n_nonzero = n_obs * n_neighbors
indptr = np.arange(0, n_nonzero + 1, n_neighbors)
D = csr_matrix(
(
distances.copy().ravel(), # copy the data, otherwise strange behavior here
indices.copy().ravel(),
indptr,
),
shape=(n_obs, n_obs),
)
D.eliminate_zeros()
D.sort_indices()
return D
def diffusion_nn(X: csr_matrix, k: int, n_comps: int = 100):
"""Diffusion-based neighbors.
This function generates a nearest neighbour list from a connectivities matrix.
This allows us to select a consistent number of nearest neighbors across all methods.
This differs from the original scIB implemenation by leveraging diffusion maps. Here we
embed the data with diffusion maps in which euclidean distance represents well the diffusion
distance. We then use pynndescent to find the nearest neighbours in this embedding space.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing connectivities.
k
Number of nearest neighbours to select.
n_comps
Number of components for diffusion map
Returns
-------
Neighbors graph
"""
transitions = _compute_transitions(X)
evals, evecs = _compute_eigen(transitions, n_comps=n_comps)
evals += 1e-8 # Avoid division by zero
# Multiscale such that the number of steps t gets "integrated out"
embedding = evecs
scaled_evals = np.array([e if e == 1 else e / (1 - e) for e in evals])
embedding *= scaled_evals
nn_obj = pynndescent.NNDescent(embedding, n_neighbors=k + 1)
neigh_inds, neigh_distances = nn_obj.neighbor_graph
# We purposely ignore the first neighbor as it is the cell itself
# It gets added back inside the kbet internal function
neigh_graph = _get_sparse_matrix_from_indices_distances_numpy(
neigh_inds[:, 1:], neigh_distances[:, 1:], X.shape[0], k
)
return neigh_graph
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_diffusion_nn.py
|
_diffusion_nn.py
|
import logging
from typing import Literal
import numpy as np
import pynndescent
import scipy
from scipy.sparse import csr_matrix, issparse
logger = logging.getLogger(__name__)
def _compute_transitions(X: csr_matrix, density_normalize: bool = True):
"""Code from scanpy.
https://github.com/scverse/scanpy/blob/2e98705347ea484c36caa9ba10de1987b09081bf/scanpy/neighbors/__init__.py#L899
"""
# TODO(adamgayoso): Refactor this with Jax
# density normalization as of Coifman et al. (2005)
# ensures that kernel matrix is independent of sampling density
if density_normalize:
# q[i] is an estimate for the sampling density at point i
# it's also the degree of the underlying graph
q = np.asarray(X.sum(axis=0))
if not issparse(X):
Q = np.diag(1.0 / q)
else:
Q = scipy.sparse.spdiags(1.0 / q, 0, X.shape[0], X.shape[0])
K = Q @ X @ Q
else:
K = X
# z[i] is the square root of the row sum of K
z = np.sqrt(np.asarray(K.sum(axis=0)))
if not issparse(K):
Z = np.diag(1.0 / z)
else:
Z = scipy.sparse.spdiags(1.0 / z, 0, K.shape[0], K.shape[0])
transitions_sym = Z @ K @ Z
return transitions_sym
def _compute_eigen(
transitions_sym: csr_matrix,
n_comps: int = 15,
sort: Literal["decrease", "increase"] = "decrease",
):
"""Compute eigen decomposition of transition matrix.
https://github.com/scverse/scanpy/blob/2e98705347ea484c36caa9ba10de1987b09081bf/scanpy/neighbors/__init__.py
"""
# TODO(adamgayoso): Refactor this with Jax
matrix = transitions_sym
# compute the spectrum
if n_comps == 0:
evals, evecs = scipy.linalg.eigh(matrix)
else:
n_comps = min(matrix.shape[0] - 1, n_comps)
# ncv = max(2 * n_comps + 1, int(np.sqrt(matrix.shape[0])))
ncv = None
which = "LM" if sort == "decrease" else "SM"
# it pays off to increase the stability with a bit more precision
matrix = matrix.astype(np.float64)
evals, evecs = scipy.sparse.linalg.eigsh(matrix, k=n_comps, which=which, ncv=ncv)
evals, evecs = evals.astype(np.float32), evecs.astype(np.float32)
if sort == "decrease":
evals = evals[::-1]
evecs = evecs[:, ::-1]
return evals, evecs
def _get_sparse_matrix_from_indices_distances_numpy(indices, distances, n_obs, n_neighbors):
"""Code from scanpy."""
n_nonzero = n_obs * n_neighbors
indptr = np.arange(0, n_nonzero + 1, n_neighbors)
D = csr_matrix(
(
distances.copy().ravel(), # copy the data, otherwise strange behavior here
indices.copy().ravel(),
indptr,
),
shape=(n_obs, n_obs),
)
D.eliminate_zeros()
D.sort_indices()
return D
def diffusion_nn(X: csr_matrix, k: int, n_comps: int = 100):
"""Diffusion-based neighbors.
This function generates a nearest neighbour list from a connectivities matrix.
This allows us to select a consistent number of nearest neighbors across all methods.
This differs from the original scIB implemenation by leveraging diffusion maps. Here we
embed the data with diffusion maps in which euclidean distance represents well the diffusion
distance. We then use pynndescent to find the nearest neighbours in this embedding space.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing connectivities.
k
Number of nearest neighbours to select.
n_comps
Number of components for diffusion map
Returns
-------
Neighbors graph
"""
transitions = _compute_transitions(X)
evals, evecs = _compute_eigen(transitions, n_comps=n_comps)
evals += 1e-8 # Avoid division by zero
# Multiscale such that the number of steps t gets "integrated out"
embedding = evecs
scaled_evals = np.array([e if e == 1 else e / (1 - e) for e in evals])
embedding *= scaled_evals
nn_obj = pynndescent.NNDescent(embedding, n_neighbors=k + 1)
neigh_inds, neigh_distances = nn_obj.neighbor_graph
# We purposely ignore the first neighbor as it is the cell itself
# It gets added back inside the kbet internal function
neigh_graph = _get_sparse_matrix_from_indices_distances_numpy(
neigh_inds[:, 1:], neigh_distances[:, 1:], X.shape[0], k
)
return neigh_graph
| 0.674587 | 0.588416 |
import warnings
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from chex import ArrayDevice
from jax import nn
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array
from scib_metrics._types import ArrayLike, IntOrKey, NdArray
def get_ndarray(x: ArrayDevice) -> np.ndarray:
"""Convert Jax device array to Numpy array."""
return np.array(jax.device_get(x))
def one_hot(y: NdArray, n_classes: Optional[int] = None) -> jnp.ndarray:
"""One-hot encode an array. Wrapper around :func:`~jax.nn.one_hot`.
Parameters
----------
y
Array of shape (n_cells,) or (n_cells, 1).
n_classes
Number of classes. If None, inferred from the data.
Returns
-------
one_hot: jnp.ndarray
Array of shape (n_cells, n_classes).
"""
n_classes = n_classes or jnp.max(y) + 1
return nn.one_hot(jnp.ravel(y), n_classes)
def validate_seed(seed: IntOrKey) -> jax.random.KeyArray:
"""Validate a seed and return a Jax random key."""
return jax.random.PRNGKey(seed) if isinstance(seed, int) else seed
def check_square(X: ArrayLike):
"""Check if a matrix is square."""
if X.shape[0] != X.shape[1]:
raise ValueError("X must be a square matrix")
def convert_knn_graph_to_idx(X: csr_matrix) -> Tuple[np.ndarray, np.ndarray]:
"""Convert a kNN graph to indices and distances."""
check_array(X, accept_sparse="csr")
check_square(X)
n_neighbors = np.unique(X.nonzero()[0], return_counts=True)[1]
if len(np.unique(n_neighbors)) > 1:
raise ValueError("Each cell must have the same number of neighbors.")
n_neighbors = int(np.unique(n_neighbors)[0])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Precomputed sparse input")
nn_obj = NearestNeighbors(n_neighbors=n_neighbors, metric="precomputed").fit(X)
kneighbors = nn_obj.kneighbors(X)
return kneighbors
|
scib-metrics
|
/scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_utils.py
|
_utils.py
|
import warnings
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from chex import ArrayDevice
from jax import nn
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array
from scib_metrics._types import ArrayLike, IntOrKey, NdArray
def get_ndarray(x: ArrayDevice) -> np.ndarray:
"""Convert Jax device array to Numpy array."""
return np.array(jax.device_get(x))
def one_hot(y: NdArray, n_classes: Optional[int] = None) -> jnp.ndarray:
"""One-hot encode an array. Wrapper around :func:`~jax.nn.one_hot`.
Parameters
----------
y
Array of shape (n_cells,) or (n_cells, 1).
n_classes
Number of classes. If None, inferred from the data.
Returns
-------
one_hot: jnp.ndarray
Array of shape (n_cells, n_classes).
"""
n_classes = n_classes or jnp.max(y) + 1
return nn.one_hot(jnp.ravel(y), n_classes)
def validate_seed(seed: IntOrKey) -> jax.random.KeyArray:
"""Validate a seed and return a Jax random key."""
return jax.random.PRNGKey(seed) if isinstance(seed, int) else seed
def check_square(X: ArrayLike):
"""Check if a matrix is square."""
if X.shape[0] != X.shape[1]:
raise ValueError("X must be a square matrix")
def convert_knn_graph_to_idx(X: csr_matrix) -> Tuple[np.ndarray, np.ndarray]:
"""Convert a kNN graph to indices and distances."""
check_array(X, accept_sparse="csr")
check_square(X)
n_neighbors = np.unique(X.nonzero()[0], return_counts=True)[1]
if len(np.unique(n_neighbors)) > 1:
raise ValueError("Each cell must have the same number of neighbors.")
n_neighbors = int(np.unique(n_neighbors)[0])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Precomputed sparse input")
nn_obj = NearestNeighbors(n_neighbors=n_neighbors, metric="precomputed").fit(X)
kneighbors = nn_obj.kneighbors(X)
return kneighbors
| 0.918441 | 0.585012 |
[](https://github.com/theislab/scib/stargazers)
[](https://pypi.org/project/scib)
[](https://pepy.tech/project/scib)
[](https://github.com/theislab/scib/actions/workflows/test.yml)
[](https://scib.readthedocs.io/en/latest/?badge=latest)
[](https://codecov.io/gh/theislab/scib)
[](https://github.com/pre-commit/pre-commit)
# Benchmarking atlas-level data integration in single-cell genomics
This repository contains the code for the `scib` package used in our benchmarking study for data integration tools.
In [our study](https://doi.org/10.1038/s41592-021-01336-8), we benchmark 16 methods (see Tools) with 4 combinations of
preprocessing steps leading to 68 methods combinations on 85 batches of gene expression and chromatin accessibility
data.

## Resources
- The git repository of the [`scib` package](https://github.com/theislab/scib) and
its [documentation](https://scib.readthedocs.io/).
- The reusable pipeline we used in the study can be found in the
separate [scib pipeline](https://github.com/theislab/scib-pipeline.git) repository. It is reproducible and automates
the computation of preprocesssing combinations, integration methods and benchmarking metrics.
- On our [website](https://theislab.github.io/scib-reproducibility) we visualise the results of the study.
- For reproducibility and visualisation we have a dedicated
repository: [scib-reproducibility](https://github.com/theislab/scib-reproducibility).
### Please cite:
Luecken, M.D., Büttner, M., Chaichoompu, K. et al. Benchmarking atlas-level data integration in single-cell genomics.
Nat Methods 19, 41–50 (2022). [https://doi.org/10.1038/s41592-021-01336-8](https://doi.org/10.1038/s41592-021-01336-8)
## Package: scib
We created the python package called `scib` that uses `scanpy` to streamline the integration of single-cell datasets and
evaluate the results. The package contains several modules for preprocessing an `anndata` object, running integration
methods and evaluating the resulting using a number of metrics. For preprocessing, `scib.preprocessing` (or `scib.pp`)
contains functions for normalising, scaling or batch-aware selection of highly variable genes. Functions for the
integration methods are in `scib.integration` or for short `scib.ig` and metrics are under
`scib.metrics` (or `scib.me`).
The `scib` python package is available on [PyPI](https://pypi.org/) and can be installed through
```commandline
pip install scib
```
Import `scib` in python:
```python
import scib
```
### Optional Dependencies
The package contains optional dependencies that need to be installed manually if needed.
These include R dependencies (`rpy2`, `anndata2ri`) which require an installation of R integration method packages.
All optional dependencies are listed under `setup.cfg` under `[options.extras_require]` and can be installed through pip.
e.g. for installing `rpy2` and `bbknn` dependencies:
```commandline
pip install 'scib[rpy2,bbknn]'
```
Optional dependencies outside of python need to be installed separately.
For instance, in order to run kBET, install it via the following command in R:
```R
install.packages('remotes')
remotes::install_github('theislab/kBET')
```
## Metrics
We implemented different metrics for evaluating batch correction and biological conservation in the `scib.metrics`
module.
<table class="docutils align-default">
<colgroup>
<col style="width: 50%" />
<col style="width: 50%" />
</colgroup>
<thead>
<tr class="row-odd"><th class="head"><p>Biological Conservation</p></th>
<th class="head"><p>Batch Correction</p></th>
</tr>
</thead>
<tbody>
<tr class="row-even" >
<td><ul class="simple">
<li><p>Cell type ASW</p></li>
<li><p>Cell cycle conservation</p></li>
<li><p>Graph cLISI</p></li>
<li><p>Adjusted rand index (ARI) for cell label</p></li>
<li><p>Normalised mutual information (NMI) for cell label</p></li>
<li><p>Highly variable gene conservation</p></li>
<li><p>Isolated label ASW</p></li>
<li><p>Isolated label F1</p></li>
<li><p>Trajectory conservation</p></li>
</ul></td>
<td><ul class="simple">
<li><p>Batch ASW</p></li>
<li><p>Principal component regression</p></li>
<li><p>Graph iLISI</p></li>
<li><p>Graph connectivity</p></li>
<li><p>kBET (K-nearest neighbour batch effect)</p></li>
</ul></td>
</tr>
</tbody>
</table>
For a detailed description of the metrics implemented in this package, please see our
[publication](https://doi.org/10.1038/s41592-021-01336-8) and the package [documentation](https://scib.readthedocs.io/).
## Integration Tools
Tools that are compared include:
- [BBKNN](https://github.com/Teichlab/bbknn) 1.3.9
- [Combat](https://scanpy.readthedocs.io/en/stable/api/scanpy.pp.combat.html) [paper](https://academic.oup.com/biostatistics/article/8/1/118/252073)
- [Conos](https://github.com/hms-dbmi/conos) 1.3.0
- [DESC](https://github.com/eleozzr/desc) 2.0.3
- [FastMNN](https://bioconductor.org/packages/batchelor/) (batchelor 1.4.0)
- [Harmony](https://github.com/immunogenomics/harmony) 1.0
- [LIGER](https://github.com/MacoskoLab/liger) 0.5.0
- [MNN](https://github.com/chriscainx/mnnpy) 0.1.9.5
- [SAUCIE](https://github.com/KrishnaswamyLab/SAUCIE)
- [Scanorama](https://github.com/brianhie/scanorama) 1.7.0
- [scANVI](https://github.com/chenlingantelope/HarmonizationSCANVI) (scVI 0.6.7)
- [scGen](https://github.com/theislab/scgen) 1.1.5
- [scVI](https://github.com/YosefLab/scVI) 0.6.7
- [Seurat v3](https://github.com/satijalab/seurat) 3.2.0 CCA (default) and RPCA
- [TrVae](https://github.com/theislab/trvae) 0.0.1
- [TrVaep](https://github.com/theislab/trvaep) 0.1.0
## Development
For developing this package, please make sure to install additional dependencies so that you can use `pytest` and
`pre-commit`.
```shell
pip install -e '.[test,dev]'
```
Please refer to the `setup.cfg` for more optional dependencies.
Install `pre-commit` to the repository for running it automatically every time you commit in git.
```shell
pre-commit install
```
|
scib
|
/scib-1.1.4.tar.gz/scib-1.1.4/README.md
|
README.md
|
pip install scib
import scib
pip install 'scib[rpy2,bbknn]'
install.packages('remotes')
remotes::install_github('theislab/kBET')
pip install -e '.[test,dev]'
pre-commit install
| 0.382833 | 0.965414 |
.. :changelog:
History
-------
0.2 (2016-09-12)
---------------------
* Renamed to scibag to avoid name collisions
0.1.4 (2016-05-24)
---------------------
* Dropped wheel dependency (#2)
* fixed version numbers throughout the project (#3)
* marked package as "inactive" to prepare for the name transition
0.1.3 (2015-04-23)
---------------------
* Added jsonschema dependency
0.1.1 (2015-04-23)
---------------------
* Added tornado dependency
0.1.0 (2015-04-23)
---------------------
* First release on PyPI.
|
scibag
|
/scibag-0.2.1.tar.gz/scibag-0.2.1/HISTORY.rst
|
HISTORY.rst
|
.. :changelog:
History
-------
0.2 (2016-09-12)
---------------------
* Renamed to scibag to avoid name collisions
0.1.4 (2016-05-24)
---------------------
* Dropped wheel dependency (#2)
* fixed version numbers throughout the project (#3)
* marked package as "inactive" to prepare for the name transition
0.1.3 (2015-04-23)
---------------------
* Added jsonschema dependency
0.1.1 (2015-04-23)
---------------------
* Added tornado dependency
0.1.0 (2015-04-23)
---------------------
* First release on PyPI.
| 0.792304 | 0.183777 |
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/javipalanca/scibag/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
scibag could always use more documentation, whether as part of the
official scibag docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/javipalanca/scibag/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `scibag` for local development.
1. Fork the `scibag` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/scibag.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv scibag
$ cd scibag/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 scibag tests
$ python setup.py test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.6, 2.7, 3.3, and 3.4, and for PyPy. Check
https://travis-ci.org/javipalanca/scibag/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ python -m unittest tests.test_scibag
|
scibag
|
/scibag-0.2.1.tar.gz/scibag-0.2.1/CONTRIBUTING.rst
|
CONTRIBUTING.rst
|
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/javipalanca/scibag/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
scibag could always use more documentation, whether as part of the
official scibag docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/javipalanca/scibag/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `scibag` for local development.
1. Fork the `scibag` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/scibag.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv scibag
$ cd scibag/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 scibag tests
$ python setup.py test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.6, 2.7, 3.3, and 3.4, and for PyPy. Check
https://travis-ci.org/javipalanca/scibag/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ python -m unittest tests.test_scibag
| 0.563618 | 0.42471 |
# scIBD
scIBD is a doublet detection tool for scCAS data.
scIBD is totally produced by Python.
The depending packages used in scIBD can be installed by the command pip/pip3 install -r requirements.txt
Installation
-----
```bash
pip install -r requirements.txt
git clone git://github.com/Ying-Lab/scIBD
cd scIBD
python setup.py install
```
Running
-----
```bash
import scibd as si
KNNITER = si.KNNIter(input)
result = KNNITER.IterCall()
```
Parameters
-----
input: the AnnData; or the count matrix(numpy or scipy)
output:
if the input is an obeject of AnnData, the output is also an AnnData, the obs of returned AnnData adds two columns: obs['PredDBL'] is the predicted results where 1 indicates the predicted doublets and 0 indicates the singlets; obs['DBLscore'] is the doublet scores of all droplets.
if the input is the count matrix, the output are the idx of predicted doublets and the doublet scores of all droplets
-----
other parameters:
exprate: The expected calling rate of doublets, default 0.1.
strategy: The KNN graphing strategy, scIBD can adaptively opt a KNN graphing strategy. Users can also manually set it as 'PCoA' or 'PCA'.
core: The number of threads, default is the max core number depending on the terminals.
sim_rate: The ratio of simulated doublets in each iteration, default is 0.3.
nPC: The number of used principal components, default is 5.
neighbors: The number of neighbors used to construct KNN graph, default is 40.
n_tree: The number of trees in KNN constrcution, default is 30.
|
scibd
|
/scibd-1.2.0.tar.gz/scibd-1.2.0/README.md
|
README.md
|
pip install -r requirements.txt
git clone git://github.com/Ying-Lab/scIBD
cd scIBD
python setup.py install
import scibd as si
KNNITER = si.KNNIter(input)
result = KNNITER.IterCall()
| 0.38122 | 0.827166 |
|logo|
SciBeam |Build Status| |codecov| |PyPI version|
===============================================
**SciBeam** is an open source library for analyzing time series beam
measurement data. Using pandas dataframe and series as its base
classing, additional time series related features are added for quick
analysis, such as file name matching, gaussian fitting, peak analysis,
noise filtering, plotting, etc. The flexible method chain enables fast
data analysis on any time series data.
SciBeam is originally designed for experimental physics data analysis.
The library has been tested on the daily lab data analysis and is under
active development in terms of bredth and deepth of scientific
computation.
Installation
============
Dependencies
------------
SciBeam requires:
- Python( >= 3.4)
- Numpy( >= 1.8.2)
- Scipy( >= 0.13.3)
- pandas ( >= 0.23.0)
- matplotlib ( >= 1.5.1)
- re
- os
User installation
-----------------
Currently only avaliable through downloading from Github, will be
avaliable for installation through pip soon:
Using PyPI
~~~~~~~~~~
.. code:: bash
pip install scibeam
Using souce code
~~~~~~~~~~~~~~~~
Download the souce code:
.. code:: bash
git clone https://github.com/SuperYuLu/SciBeam`
Change to the package directory:
.. code:: bash
cd scibeam
Install the package:
::
python setup.py install
Release
=======
- v0.1.0: 08/19/2018 first release !
Development
===========
Under active development.
TODO:
-----
- Increase test coverage
- Add more plotting functions
- Add config.py for global configurature
- Add AppVeyor
Contribute
----------
Coming soon…
Testing
-------
The testing part is based on unittest and can be run through setuptools:
.. code:: python
python setup.py test
or
.. code:: bash
make test
Status
------
Version 0.1.0 on `PyPI <https://pypi.org/project/scibeam/>`__
.. |logo| image:: https://raw.githubusercontent.com/SuperYuLu/SciBeam/master/img/logo.png
:target: https://github.com/SuperYuLu/SciBeam
.. |Build Status| image:: https://travis-ci.org/SuperYuLu/SciBeam.svg?branch=master
:target: https://travis-ci.org/SuperYuLu/SciBeam
.. |codecov| image:: https://codecov.io/gh/SuperYuLu/SciBeam/branch/master/graph/badge.svg
:target: https://codecov.io/gh/SuperYuLu/SciBeam
.. |PyPI version| image:: https://badge.fury.io/py/scibeam.svg
:target: https://badge.fury.io/py/scibeam
|
scibeam
|
/scibeam-0.1.1.tar.gz/scibeam-0.1.1/README.rst
|
README.rst
|
|logo|
SciBeam |Build Status| |codecov| |PyPI version|
===============================================
**SciBeam** is an open source library for analyzing time series beam
measurement data. Using pandas dataframe and series as its base
classing, additional time series related features are added for quick
analysis, such as file name matching, gaussian fitting, peak analysis,
noise filtering, plotting, etc. The flexible method chain enables fast
data analysis on any time series data.
SciBeam is originally designed for experimental physics data analysis.
The library has been tested on the daily lab data analysis and is under
active development in terms of bredth and deepth of scientific
computation.
Installation
============
Dependencies
------------
SciBeam requires:
- Python( >= 3.4)
- Numpy( >= 1.8.2)
- Scipy( >= 0.13.3)
- pandas ( >= 0.23.0)
- matplotlib ( >= 1.5.1)
- re
- os
User installation
-----------------
Currently only avaliable through downloading from Github, will be
avaliable for installation through pip soon:
Using PyPI
~~~~~~~~~~
.. code:: bash
pip install scibeam
Using souce code
~~~~~~~~~~~~~~~~
Download the souce code:
.. code:: bash
git clone https://github.com/SuperYuLu/SciBeam`
Change to the package directory:
.. code:: bash
cd scibeam
Install the package:
::
python setup.py install
Release
=======
- v0.1.0: 08/19/2018 first release !
Development
===========
Under active development.
TODO:
-----
- Increase test coverage
- Add more plotting functions
- Add config.py for global configurature
- Add AppVeyor
Contribute
----------
Coming soon…
Testing
-------
The testing part is based on unittest and can be run through setuptools:
.. code:: python
python setup.py test
or
.. code:: bash
make test
Status
------
Version 0.1.0 on `PyPI <https://pypi.org/project/scibeam/>`__
.. |logo| image:: https://raw.githubusercontent.com/SuperYuLu/SciBeam/master/img/logo.png
:target: https://github.com/SuperYuLu/SciBeam
.. |Build Status| image:: https://travis-ci.org/SuperYuLu/SciBeam.svg?branch=master
:target: https://travis-ci.org/SuperYuLu/SciBeam
.. |codecov| image:: https://codecov.io/gh/SuperYuLu/SciBeam/branch/master/graph/badge.svg
:target: https://codecov.io/gh/SuperYuLu/SciBeam
.. |PyPI version| image:: https://badge.fury.io/py/scibeam.svg
:target: https://badge.fury.io/py/scibeam
| 0.596786 | 0.612715 |
[](https://github.com/SuperYuLu/SciBeam)
# SciBeam [](https://travis-ci.org/SuperYuLu/SciBeam) [](https://codecov.io/gh/SuperYuLu/SciBeam) [](https://badge.fury.io/py/scibeam)
**SciBeam** is an open source library for analyzing time series beam measurement data. Using pandas dataframe and series as its base classing, additional time series related features are added for quick analysis, such as file name matching, gaussian fitting, peak analysis, noise filtering, plotting, etc. The flexible method chain enables fast data analysis on any time series data.
SciBeam is originally designed for experimental physics data analysis. The library has been tested on the daily lab data analysis and is under active development in terms of bredth and deepth of scientific computation.
# Installation
## Dependencies
SciBeam requires:
+ Python( >= 3.4)
+ Numpy( >= 1.8.2)
+ Scipy( >= 0.13.3)
+ pandas ( >= 0.23.0)
+ matplotlib ( >= 1.5.1)
+ re
+ os
## User installation
Currently only avaliable through downloading from Github, will be avaliable for installation through pip soon:
### Using PyPI
```bash
pip install scibeam
```
### Using souce code
Download the souce code:
```bash
git clone https://github.com/SuperYuLu/SciBeam`
```
Change to the package directory:
```bash
cd scibeam
```
Install the package:
```
python setup.py install
```
# Release
+ v0.1.0: 08/19/2018 first release !
# Development
Under active development.
## TODO:
+ Increase test coverage
+ Add more plotting functions
+ Add config.py for global configurature
+ Add AppVeyor
## Contribute
Coming soon...
## Testing
The testing part is based on unittest and can be run through setuptools:
```python
python setup.py test
```
or
```bash
make test
```
## Status
Version 0.1.1 on [PyPI](https://pypi.org/project/scibeam/)
|
scibeam
|
/scibeam-0.1.1.tar.gz/scibeam-0.1.1/README.md
|
README.md
|
pip install scibeam
git clone https://github.com/SuperYuLu/SciBeam`
cd scibeam
python setup.py install
python setup.py test
make test
| 0.566498 | 0.976061 |
# scibiomart
[](https://doi.org/10.5281/zenodo.4099048)
[](https://pypi.org/project/scibiomart/)
This is just a simple wrapper around the API from BioMart, but I found existing packages were not quite sufficent
for what I was wanting to do i.e. cli interface and python interface with tsv API.
Here you can simply get the list of all genes and perform other biomart functions such as mapping between human and
mouse.
Have a look at the [docs](https://arianemora.github.io/scibiomart/) which explains things in more detail.
## Installation
```
pip install scibiomart
```
## Usage
For the most simple usage, use API which will get the latest mouse and human and map gene IDs to gene names.
### Examples
```
from scibiomart import SciBiomartApi
sb = SciBiomartApi()
# Get only the default for those genes
results_df = sb.get_mouse_default({'ensembl_gene_id': 'ENSMUSG00000029844,ENSMUSG00000032446'})
# Select attributes
results_df = sb.get_mouse_default({'ensembl_gene_id': 'ENSMUSG00000020875,ENSMUSG00000038210'},
attr_list=['entrezgene_id'])
# Get all genes
results_df = sb.get_mouse_default()
# Sort the results based on TSS (takes direction into account)
results_df = sb.sort_df_on_starts(results_df)
# Get human
results_df = sb.get_human_default()
```
### Examples extended
If you are interested in more than the simple API, see the tests for all examples, however, you can list the datasets
etc, and query other attributes.
#### Print marts
```
sb = SciBiomart()
marts = sb.list_marts()
print('\n'.join(marts))
```
#### Print datasets
```
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
err = sb.list_datasets()
```
#### List attributes
```
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
sb.set_dataset('fcatus_gene_ensembl')
err = sb.list_attributes()
```
#### List configs
```
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
sb.set_dataset('fcatus_gene_ensembl')
err = sb.list_configs()
```
#### List filters
```
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
sb.set_dataset('fcatus_gene_ensembl')
err = sb.list_filters()
```
#### Run generic query
Here we show a generic query for two genes (as a comma separated list) and the attributes we're interested in are
'ensembl_gene_id', 'hgnc_symbol', 'uniprotswissprot'.
Run query: `def run_query(self, filter_dict: dict, attr_list: list):`
i.e. you can pass it a filter dictionary and a list of attributes. This will make it quicker, you can also run it and it
will get all genes (i.e. if filter_dict is empty).
```
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
sb.set_dataset('hsapiens_gene_ensembl')
results = sb.run_query({'ensembl_gene_id': 'ENSG00000139618,ENSG00000091483'},
['ensembl_gene_id', 'hgnc_symbol', 'uniprotswissprot'])
print(results)
```
#### Match mouse to human
Get mouse orthologs for human genes
```
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
sb.set_dataset('hsapiens_gene_ensembl')
attributes = ['ensembl_gene_id', 'mmusculus_homolog_ensembl_gene', 'mmusculus_homolog_perc_id_r1']
results = sb.run_query({'ensembl_gene_id': 'ENSG00000139618,ENSG00000091483'}, attributes)
print(results)
```
### See docs for more info
|
scibiomart
|
/scibiomart-1.0.2.tar.gz/scibiomart-1.0.2/README.md
|
README.md
|
pip install scibiomart
from scibiomart import SciBiomartApi
sb = SciBiomartApi()
# Get only the default for those genes
results_df = sb.get_mouse_default({'ensembl_gene_id': 'ENSMUSG00000029844,ENSMUSG00000032446'})
# Select attributes
results_df = sb.get_mouse_default({'ensembl_gene_id': 'ENSMUSG00000020875,ENSMUSG00000038210'},
attr_list=['entrezgene_id'])
# Get all genes
results_df = sb.get_mouse_default()
# Sort the results based on TSS (takes direction into account)
results_df = sb.sort_df_on_starts(results_df)
# Get human
results_df = sb.get_human_default()
sb = SciBiomart()
marts = sb.list_marts()
print('\n'.join(marts))
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
err = sb.list_datasets()
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
sb.set_dataset('fcatus_gene_ensembl')
err = sb.list_attributes()
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
sb.set_dataset('fcatus_gene_ensembl')
err = sb.list_configs()
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
sb.set_dataset('fcatus_gene_ensembl')
err = sb.list_filters()
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
sb.set_dataset('hsapiens_gene_ensembl')
results = sb.run_query({'ensembl_gene_id': 'ENSG00000139618,ENSG00000091483'},
['ensembl_gene_id', 'hgnc_symbol', 'uniprotswissprot'])
print(results)
sb = SciBiomart()
sb.set_mart('ENSEMBL_MART_ENSEMBL')
sb.set_dataset('hsapiens_gene_ensembl')
attributes = ['ensembl_gene_id', 'mmusculus_homolog_ensembl_gene', 'mmusculus_homolog_perc_id_r1']
results = sb.run_query({'ensembl_gene_id': 'ENSG00000139618,ENSG00000091483'}, attributes)
print(results)
| 0.562777 | 0.924244 |
----------------------------------------------------------------------------------------
sciblox
----------------------------------------------------------------------------------------
sciblox was designed to make data science and machine learning easier.
It features similar modules to R's CARET, and draws inspiration from many other packages.
To use it:
from sciblox import *
%matplotlib notebook
What's included?
----------------------------------------------------------------------------------------
1. CARET like preprocessing (scaling, normalising, dummify, outlier removal, unskew, etc)
2. Processor modules - you can fit onto testing data!
3. LightGBM and RandomForest support - later will add more
4. More analysis methods: outlier detection, skewness methods, auto unskewing etc
5. 3D automatic plots in NEW cool analyse function!
6. BPCA, MICE, KNN, Boosting, Forest, SVD imputation
7. Easy sequential text mining and automatic text mining
8. Jupyter Notebooks integrated
What's in construction?
----------------------------------------------------------------------------------------
1. More machine learning libraries - Extra Trees, Neural Networks, SVM etc
2. Advanced data and text mining
3. CV, Auto machine learning
4. Multiprocessing support
Demonstration
----------------------------------------------------------------------------------------
If you want to see sciblox in action, please visit
https://danielhanchen.github.io/
GITHUB: https://github.com/danielhanchen/sciblox
|
sciblox
|
/sciblox-0.2.11.tar.gz/sciblox-0.2.11/README.txt
|
README.txt
|
----------------------------------------------------------------------------------------
sciblox
----------------------------------------------------------------------------------------
sciblox was designed to make data science and machine learning easier.
It features similar modules to R's CARET, and draws inspiration from many other packages.
To use it:
from sciblox import *
%matplotlib notebook
What's included?
----------------------------------------------------------------------------------------
1. CARET like preprocessing (scaling, normalising, dummify, outlier removal, unskew, etc)
2. Processor modules - you can fit onto testing data!
3. LightGBM and RandomForest support - later will add more
4. More analysis methods: outlier detection, skewness methods, auto unskewing etc
5. 3D automatic plots in NEW cool analyse function!
6. BPCA, MICE, KNN, Boosting, Forest, SVD imputation
7. Easy sequential text mining and automatic text mining
8. Jupyter Notebooks integrated
What's in construction?
----------------------------------------------------------------------------------------
1. More machine learning libraries - Extra Trees, Neural Networks, SVM etc
2. Advanced data and text mining
3. CV, Auto machine learning
4. Multiprocessing support
Demonstration
----------------------------------------------------------------------------------------
If you want to see sciblox in action, please visit
https://danielhanchen.github.io/
GITHUB: https://github.com/danielhanchen/sciblox
| 0.418697 | 0.495911 |
# SciBot
curation workflow automation and coordination
* find RRIDs in articles
* look them up in the SciCrunch resolver
* create Hypothesis annotations that anchor to the RRIDs and display lookup results
## Getting Started
* [Create a Hypothesis](https://web.hypothes.is/start/) account which will post the annotations.
* Generate an api token at https://hypothes.is/profile/developer (must be logged in to see page).
* Create a group to store the annotations at https://hypothes.is/groups/new (must be logged in to see page).
* See [Setup on amazon](#setup-on-amazon)
## Capturing the bookmarklet
Visit https://HOST:PORT/bookmarklet and follow the instructions.
## Using the bookmarklet
Visit an article that contains RRIDs, click the bookmarklet
## Checking results in the browser
The found RRIDs are logged to the JavaScript console
## Checking results on the server
The found RRIDs are logged to timestamped files, along with the text and html of the article that was scanned for RRIDs
## Setup on amazon
0. ssh in to the host that will serve the script
1. `sudo yum install gcc libxml2 libxml2-devel libxslt libxslt-devel python36 python36-devel python36-pip`
2. `sudo alternatives --set python /usr/bin/python3.6`
3. `sudo pip install pipenv`
4. `git clone https://github.com/SciCrunch/scibot.git`
5. `cd scibot && python3.6 setup.py wheel && pipenv install dist/*.whl`
6. `export SCIBOT_USERNAME=someusername`
7. `export SCIBOT_GROUP=somegroupname`
8. `unset HISTFILE`
9. `export SCIBOT_API_TOKEN=sometoken`
10. `export SCIBOT_SYNC=somerandomnumber` (e.g. run `head -c 100 /dev/urandom | tr -dc 'a-zA-Z0-9'` every time)
11. create a screen session
12. in the screen session run `pipenv run scibot-server` you should create a link to the log files folder in ~/scibot/
13. get letsencrypt certs using certbot, follow directions [here](https://certbot.eff.org/docs/using.html) (prefer standalone)
14. alternately if using a cert from another registrar you may need to bundle your certs `cat my-cert.crt existing-bundle.crt > scicrunch.io.crt` (see https://gist.github.com/bradmontgomery/6487319 for details)
15. before or after starting gunicorn you need to run `sudo yum install nginx && sudo cp ~/scibot/nginx.conf /etc/nginx/nginx.conf && sudo service start nginx`
16. run `pipenv run scibot-sync` in another screen (if run in a terminal with a different environment you need to run step 10 again first)
|
scibot
|
/scibot-0.0.1.tar.gz/scibot-0.0.1/README.md
|
README.md
|
# SciBot
curation workflow automation and coordination
* find RRIDs in articles
* look them up in the SciCrunch resolver
* create Hypothesis annotations that anchor to the RRIDs and display lookup results
## Getting Started
* [Create a Hypothesis](https://web.hypothes.is/start/) account which will post the annotations.
* Generate an api token at https://hypothes.is/profile/developer (must be logged in to see page).
* Create a group to store the annotations at https://hypothes.is/groups/new (must be logged in to see page).
* See [Setup on amazon](#setup-on-amazon)
## Capturing the bookmarklet
Visit https://HOST:PORT/bookmarklet and follow the instructions.
## Using the bookmarklet
Visit an article that contains RRIDs, click the bookmarklet
## Checking results in the browser
The found RRIDs are logged to the JavaScript console
## Checking results on the server
The found RRIDs are logged to timestamped files, along with the text and html of the article that was scanned for RRIDs
## Setup on amazon
0. ssh in to the host that will serve the script
1. `sudo yum install gcc libxml2 libxml2-devel libxslt libxslt-devel python36 python36-devel python36-pip`
2. `sudo alternatives --set python /usr/bin/python3.6`
3. `sudo pip install pipenv`
4. `git clone https://github.com/SciCrunch/scibot.git`
5. `cd scibot && python3.6 setup.py wheel && pipenv install dist/*.whl`
6. `export SCIBOT_USERNAME=someusername`
7. `export SCIBOT_GROUP=somegroupname`
8. `unset HISTFILE`
9. `export SCIBOT_API_TOKEN=sometoken`
10. `export SCIBOT_SYNC=somerandomnumber` (e.g. run `head -c 100 /dev/urandom | tr -dc 'a-zA-Z0-9'` every time)
11. create a screen session
12. in the screen session run `pipenv run scibot-server` you should create a link to the log files folder in ~/scibot/
13. get letsencrypt certs using certbot, follow directions [here](https://certbot.eff.org/docs/using.html) (prefer standalone)
14. alternately if using a cert from another registrar you may need to bundle your certs `cat my-cert.crt existing-bundle.crt > scicrunch.io.crt` (see https://gist.github.com/bradmontgomery/6487319 for details)
15. before or after starting gunicorn you need to run `sudo yum install nginx && sudo cp ~/scibot/nginx.conf /etc/nginx/nginx.conf && sudo service start nginx`
16. run `pipenv run scibot-sync` in another screen (if run in a terminal with a different environment you need to run step 10 again first)
| 0.639624 | 0.661055 |
# SciCamera
---
Consistent and reliable imaging for scientific applications.
## Why _SciCamera_?
Scientific imaging applications often require minimal post-processing pipelines,
precise capture timing, near-gapless sequential frames, and easily
configurable settings like gain, resolution, bit-depth, and exposure
length.
This project, which began as fork of the webcam/video-focused [`picamera2`][picamera2]
library, aims to make it easy to configure and use cameras for scientific applications,
with a focus on _performance, reliability, code quality, and maintainability_.
### Why not _SciCamera_?
SciCamera currently focuses on high-quality, timing-sensitive, minimally-processed
_still images_. For low-bandwidth, real-time image and video streaming, we recommend
the [`picamera2`][picamera2] library.
## Platform support
_SciCamera_ supports
- Raspberry Pi OS (Bullseye or later), 64-bit.
- x86 Ubuntu
Other debian flavors are likely to be supported. We welcome pull requests to extend
the testing toolchains to cover your platform.
## Installation
_SciCamera_ is a pure python package, but relies on the python
c++ wrapper of _libcamera_.
_SciCamera_ can be installed simply with:
```
pip install scicamera
```
### Installing libcamera + python bindings
Import and use of the above pacakge requires that `libcamera` to be built
with the python package enabled. On rasbian, this is accomplished by
installing the `libcamera` package from apt. In x86 it must be built
using something like the following:
```bash
git clone https://github.com/Exclosure/libcamera.git
cd libcamera
git checkout v0.0.4
meson setup build -D pycamera=enabled
ninja -C build
sudo ninja -C build install
```
## Bugs/Contributing
Open an issue/PR to discuss your bug or feature. Once a course of action
has been identified, open a PR, discuss the changes.
Feature creep is not of interest, but we would be happy
to help you build your more complicated project on top of this.
If we like them, and the tests pass we will merge them.
CI requires code has been processed `isort` and `black` toolchains.
Doing this is pretty easy:
```
isort .
black .
```
Great work.
## Publishing to PYPI
Should be added to github action later
1. Add your pypi token
```sh
$ poetry config pypi-token.pypi my-token
```
2. Cut a new tag
```sh
$ git tag -a v0.1.0 -m "Version 0.1.0"
$ git push origin v0.1.0
```
3. Publish
```sh
$ poetry publish --build
```
[picamera2]:https://github.com/raspberrypi/picamera2
|
scicamera
|
/scicamera-0.2.1.tar.gz/scicamera-0.2.1/README.md
|
README.md
|
pip install scicamera
git clone https://github.com/Exclosure/libcamera.git
cd libcamera
git checkout v0.0.4
meson setup build -D pycamera=enabled
ninja -C build
sudo ninja -C build install
isort .
black .
| 0.567457 | 0.854642 |
sci
===
.. image:: https://img.shields.io/pypi/v/sci.svg
:target: https://pypi.python.org/pypi/sci
:alt: Latest PyPI version
.. image:: https://gitlab.com/marcos_felt/sci/badges/master/pipeline.svg
:target: https://gitlab.com/marcos_felt/sci/commits/master
:alt: Gitlab CI/CD Pipeline
Design, automate and share any science experiment.
Usage
-----
Installation
------------
Requirements
^^^^^^^^^^^^
Compatibility
-------------
Licence
-------
Authors
-------
`sci` was written by `scici <[email protected]>`_.
|
scici
|
/scici-0.1.0.tar.gz/scici-0.1.0/README.rst
|
README.rst
|
sci
===
.. image:: https://img.shields.io/pypi/v/sci.svg
:target: https://pypi.python.org/pypi/sci
:alt: Latest PyPI version
.. image:: https://gitlab.com/marcos_felt/sci/badges/master/pipeline.svg
:target: https://gitlab.com/marcos_felt/sci/commits/master
:alt: Gitlab CI/CD Pipeline
Design, automate and share any science experiment.
Usage
-----
Installation
------------
Requirements
^^^^^^^^^^^^
Compatibility
-------------
Licence
-------
Authors
-------
`sci` was written by `scici <[email protected]>`_.
| 0.799677 | 0.356335 |
from pint.quantity import _Quantity
from sci import units
from pint.errors import UndefinedUnitError
def check_units(value, dimension: str):
""" Check if units are of a certain dimension
Parameters
----------
value: `pint.quantity._Quantity`
The pint :class:`pint.quantity._Quantity` to check
dimemension: `str`
Desired dimensionality of value
Returns
-------
result: `bool`
If the units are of the desired dimension, returns True.
Raises
------
ValueError
Raised if the unit dimensions are incorrrect or the
the value is not a pint unit quantity.
Examples
--------
>>> check_units(100 * units.millilters, '[length]^3')
True
Notes
-----
See the pint_ documentation for more examples on dimensionality.
.. pint_: https://pint.readthedocs.io/en/latest/wrapping.html#checking-dimensionality
"""
try:
if value.check(dimension):
return True
else:
raise ValueError(f'{value} must contain pint units of dimension {dimension}.')
except AttributeError:
raise ValueError(f'{value} does contain pint units.(must be of dimension {dimension}).')
def filter_dict_values(input: dict, filter):
''' Filter dictionary values through a function called filter
This function will look recursively through nested dictionaries
and call filter(value) on all dictionary values.
Parameters
----------
input: `dict`
Input dictionary to filter
filter: `callable``
Function for filtering dictionary values.
This is called in form filter(value)
Returns
-------
filtered: `dict`
Returns filtered dictionary
'''
for k, v in input.items():
if isinstance(v, dict):
input[k] = filter_dict_values(v, filter)
else:
input[k] = filter(v)
return input
def stringify(input):
'''Convert pint quantities into strings
Parameters
----------
input: `pint.quantity._Quantity`
Pint unit quantity
Returns
-------
output: `str``
input as a string
'''
if isinstance(input, _Quantity):
return str(input)
else:
return input
def pintify(input: str):
''' Convert strings into pint quantities
Parameters
----------
input: `str`
String to be converted to pint quantity
Returns
-------
result: `pint.quantity._Quantity`
input as a pint quantity
'''
try:
return units(input)
except UndefinedUnitError:
return input
def check_kwargs(key, caller, **kwargs):
''' Check if kwargs has a needed field
Parameters
----------
key: `str`
keyword to look for in kwargs
Returns
-------
value
The value of the kwargs[key]
params: `dict``
The params dictionary (without the returned key/value pair)
Raises
------
ValueError
Raised if the key does not exist in kwargs
'''
if not kwargs.get(key):
raise ValueError('''{} needs to be an argumentwhen instantating a {}.'''
.format(key, caller))
else:
value = kwargs.pop(key)
return value, kwargs
|
scici
|
/scici-0.1.0.tar.gz/scici-0.1.0/sci/utils.py
|
utils.py
|
from pint.quantity import _Quantity
from sci import units
from pint.errors import UndefinedUnitError
def check_units(value, dimension: str):
""" Check if units are of a certain dimension
Parameters
----------
value: `pint.quantity._Quantity`
The pint :class:`pint.quantity._Quantity` to check
dimemension: `str`
Desired dimensionality of value
Returns
-------
result: `bool`
If the units are of the desired dimension, returns True.
Raises
------
ValueError
Raised if the unit dimensions are incorrrect or the
the value is not a pint unit quantity.
Examples
--------
>>> check_units(100 * units.millilters, '[length]^3')
True
Notes
-----
See the pint_ documentation for more examples on dimensionality.
.. pint_: https://pint.readthedocs.io/en/latest/wrapping.html#checking-dimensionality
"""
try:
if value.check(dimension):
return True
else:
raise ValueError(f'{value} must contain pint units of dimension {dimension}.')
except AttributeError:
raise ValueError(f'{value} does contain pint units.(must be of dimension {dimension}).')
def filter_dict_values(input: dict, filter):
''' Filter dictionary values through a function called filter
This function will look recursively through nested dictionaries
and call filter(value) on all dictionary values.
Parameters
----------
input: `dict`
Input dictionary to filter
filter: `callable``
Function for filtering dictionary values.
This is called in form filter(value)
Returns
-------
filtered: `dict`
Returns filtered dictionary
'''
for k, v in input.items():
if isinstance(v, dict):
input[k] = filter_dict_values(v, filter)
else:
input[k] = filter(v)
return input
def stringify(input):
'''Convert pint quantities into strings
Parameters
----------
input: `pint.quantity._Quantity`
Pint unit quantity
Returns
-------
output: `str``
input as a string
'''
if isinstance(input, _Quantity):
return str(input)
else:
return input
def pintify(input: str):
''' Convert strings into pint quantities
Parameters
----------
input: `str`
String to be converted to pint quantity
Returns
-------
result: `pint.quantity._Quantity`
input as a pint quantity
'''
try:
return units(input)
except UndefinedUnitError:
return input
def check_kwargs(key, caller, **kwargs):
''' Check if kwargs has a needed field
Parameters
----------
key: `str`
keyword to look for in kwargs
Returns
-------
value
The value of the kwargs[key]
params: `dict``
The params dictionary (without the returned key/value pair)
Raises
------
ValueError
Raised if the key does not exist in kwargs
'''
if not kwargs.get(key):
raise ValueError('''{} needs to be an argumentwhen instantating a {}.'''
.format(key, caller))
else:
value = kwargs.pop(key)
return value, kwargs
| 0.895451 | 0.620507 |
from sci import units
from sci.utils import check_units, filter_dict_values, stringify, check_kwargs, pintify
from pint.quantity import _Quantity
from interface import implements, Interface
from typing import Type, Union, List
class _Ref:
''' Base Class for Refs
Refs are physical containers (e.g., syringes, microplates).
This class should not be used directly. Instead, it should be inherited
by another class.
Parameters
----------
name: `str`
Reference name for the ref (e.g., 0.5M NaOH solution)
**params
The type parameter must be passed in as a keyword argument to all refs.
- ``type``: Ref type
'''
def __init__(self, name: str, **params):
self.type, self.params = check_kwargs('type', 'Ref', **dict(params))
self.name = name
def to_dict(self):
''' Convert ref to a dictionary ready for json serialization
'''
str_params = filter_dict_values(self.params, stringify)
return {"type": self.type, "name": self.name, "params": str_params}
def __repr__(self):
return f"{self.name} ({self.type.lower()})"
#Create interface for refs
_RefInterface = Interface.from_class(_Ref, ['__init__'])
ref_type = Type[_Ref]
def ref_from_dict(input: dict):
''' Create a instance of a ref from a dictionary
Parameters
----------
input: `dict`
Input dictionary for the ref
Returns
-------
ref: `_Ref`
One of the subclasses of ref (e.g., Syringe)
Raises
------
ValueError
Raised if the "type" field not passed in input or
if the passed type is not a valid ref class
Examples
--------
>>> input = {'type': 'Syringe', 'name': '0.5M Citric Acid', 'params': {'liquid_volume': '10 millilters'}}
>>> my_syringe = from_dict(input)
See also
--------
_Ref.to_dict
'''
#Check if "type" field in input
if "type" not in input:
raise ValueError(f"The 'type' field was not passed, which is required.")
#Error handling when checking issubclass
def check_subclass(subclass, superclass):
try:
if issubclass(subclass, superclass): return True
except TypeError:
return False
#Find subclasses of _Ref
subclasses = [cls.__name__ for key, cls
in list(globals().items())
if check_subclass(cls, _Ref)]
subclasses.remove(_Ref.__name__)
#Convert dimensional values to pint quantities
params = filter_dict_values(input["params"], pintify)
#Create instance of class
ref_type = input.get("type")
ref_name = input.pop("name")
if ref_type in subclasses:
ref = globals()[ref_type]
new_ref = ref(name=ref_name, **params)
return new_ref
else:
raise ValueError(f"sci saying hi: {type} is not one of the available refs.")
class Syringe(_Ref, implements(_RefInterface),):
''' Ref for syringes
Parameters
----------
name: `str`
Reference name for the syringe (e.g., 0.5M NaOH solution)
**kwargs
- ``liquid_volume``: Volume of liquid in the syringe, not the total volume of syringe (`pint.quantity. _Quantity`)
'''
def __init__(self, name: str, **params):
#Make sure liquid volume is keyword arg and that units are correct
liquid_volume, _ = check_kwargs('liquid_volume', 'Syringe', **params)
check_units(liquid_volume, '[length]^3')
#Add type to params dictionary
params.update({'type': 'Syringe'})
#Inhert superclass __init__ method
super().__init__(name, **params)
|
scici
|
/scici-0.1.0.tar.gz/scici-0.1.0/sci/refs.py
|
refs.py
|
from sci import units
from sci.utils import check_units, filter_dict_values, stringify, check_kwargs, pintify
from pint.quantity import _Quantity
from interface import implements, Interface
from typing import Type, Union, List
class _Ref:
''' Base Class for Refs
Refs are physical containers (e.g., syringes, microplates).
This class should not be used directly. Instead, it should be inherited
by another class.
Parameters
----------
name: `str`
Reference name for the ref (e.g., 0.5M NaOH solution)
**params
The type parameter must be passed in as a keyword argument to all refs.
- ``type``: Ref type
'''
def __init__(self, name: str, **params):
self.type, self.params = check_kwargs('type', 'Ref', **dict(params))
self.name = name
def to_dict(self):
''' Convert ref to a dictionary ready for json serialization
'''
str_params = filter_dict_values(self.params, stringify)
return {"type": self.type, "name": self.name, "params": str_params}
def __repr__(self):
return f"{self.name} ({self.type.lower()})"
#Create interface for refs
_RefInterface = Interface.from_class(_Ref, ['__init__'])
ref_type = Type[_Ref]
def ref_from_dict(input: dict):
''' Create a instance of a ref from a dictionary
Parameters
----------
input: `dict`
Input dictionary for the ref
Returns
-------
ref: `_Ref`
One of the subclasses of ref (e.g., Syringe)
Raises
------
ValueError
Raised if the "type" field not passed in input or
if the passed type is not a valid ref class
Examples
--------
>>> input = {'type': 'Syringe', 'name': '0.5M Citric Acid', 'params': {'liquid_volume': '10 millilters'}}
>>> my_syringe = from_dict(input)
See also
--------
_Ref.to_dict
'''
#Check if "type" field in input
if "type" not in input:
raise ValueError(f"The 'type' field was not passed, which is required.")
#Error handling when checking issubclass
def check_subclass(subclass, superclass):
try:
if issubclass(subclass, superclass): return True
except TypeError:
return False
#Find subclasses of _Ref
subclasses = [cls.__name__ for key, cls
in list(globals().items())
if check_subclass(cls, _Ref)]
subclasses.remove(_Ref.__name__)
#Convert dimensional values to pint quantities
params = filter_dict_values(input["params"], pintify)
#Create instance of class
ref_type = input.get("type")
ref_name = input.pop("name")
if ref_type in subclasses:
ref = globals()[ref_type]
new_ref = ref(name=ref_name, **params)
return new_ref
else:
raise ValueError(f"sci saying hi: {type} is not one of the available refs.")
class Syringe(_Ref, implements(_RefInterface),):
''' Ref for syringes
Parameters
----------
name: `str`
Reference name for the syringe (e.g., 0.5M NaOH solution)
**kwargs
- ``liquid_volume``: Volume of liquid in the syringe, not the total volume of syringe (`pint.quantity. _Quantity`)
'''
def __init__(self, name: str, **params):
#Make sure liquid volume is keyword arg and that units are correct
liquid_volume, _ = check_kwargs('liquid_volume', 'Syringe', **params)
check_units(liquid_volume, '[length]^3')
#Add type to params dictionary
params.update({'type': 'Syringe'})
#Inhert superclass __init__ method
super().__init__(name, **params)
| 0.881538 | 0.32118 |
`Science VM <http://www.scivm.com>`_ is a scicloud-computing platform that integrates into the Python Programming Language. It enables you to leverage the computing power of your datacenter and/or your choice of scicloud providers without having to manage, maintain, or configure virtual servers.
When using this Python library known as *scicloud*, Science VM will integrate seamlessly into your existing code base. To offload the execution of a function to our servers, all you must do is pass your desired function into the *scicloud* library. ScienceVM will run the function on its high-performance cluster. As you run more functions, our cluster auto-scales to meet your computational needs.
Before using this package, you will need to sign up a `Science VM <http://www.scivm.com>`_ account.
The *scicloud* library also features a simulator, which can be used without a Science VM account. The simulator uses the `multiprocessing <http://docs.python.org/library/multiprocessing.html>`_ library to create a stripped down version of the Science VM service. This simulated service can then run jobs locally across all CPU cores.
Quick command-line example::
>>> import scicloud
>>> def square(x):
... return x*x
...
>>> jid = scicloud.call(square,3) #square(3) evaluated on Science VM
>>> scicloud.result(jid)
9
Full package documentation is available at http://docs.scivm.com. Some dependencies may be required depending on your platform and Python version; see INSTALL for more information.
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/README.txt
|
README.txt
|
`Science VM <http://www.scivm.com>`_ is a scicloud-computing platform that integrates into the Python Programming Language. It enables you to leverage the computing power of your datacenter and/or your choice of scicloud providers without having to manage, maintain, or configure virtual servers.
When using this Python library known as *scicloud*, Science VM will integrate seamlessly into your existing code base. To offload the execution of a function to our servers, all you must do is pass your desired function into the *scicloud* library. ScienceVM will run the function on its high-performance cluster. As you run more functions, our cluster auto-scales to meet your computational needs.
Before using this package, you will need to sign up a `Science VM <http://www.scivm.com>`_ account.
The *scicloud* library also features a simulator, which can be used without a Science VM account. The simulator uses the `multiprocessing <http://docs.python.org/library/multiprocessing.html>`_ library to create a stripped down version of the Science VM service. This simulated service can then run jobs locally across all CPU cores.
Quick command-line example::
>>> import scicloud
>>> def square(x):
... return x*x
...
>>> jid = scicloud.call(square,3) #square(3) evaluated on Science VM
>>> scicloud.result(jid)
9
Full package documentation is available at http://docs.scivm.com. Some dependencies may be required depending on your platform and Python version; see INSTALL for more information.
| 0.881755 | 0.683829 |
from __future__ import absolute_import
"""
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2011 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
import scicloud as cloud
import types
from scicloud.util import fix_time_element
import logging, datetime
_request_query = 'realtime/request/'
_release_query = 'realtime/release/'
_list_query = 'realtime/list/'
_change_max_duration_query = 'realtime/change_max_duration/'
"""
Real time requests management
"""
def list(request_id=""):
"""Returns a list of dictionaries describing realtime core requests.
If *request_id* is specified, only show realtime core request with that request_id
The keys within each returned dictionary are:
* request_id: numeric ID associated with the request
* type: Type of computation resource this request grants
* cores: Number of (type) cores this request grants
* start_time: Time when real time request was satisfied; None if still pending"""
if request_id != "":
try:
int(request_id)
except ValueError:
raise TypeError('Optional parameter to list_rt_cores must be a numeric request_id')
conn = cloud._getcloudnetconnection()
rt_list = conn.send_request(_list_query, {'rid': str(request_id)})
return [fix_time_element(rt,'start_time') for rt in rt_list['requests']]
def request(type, cores, max_duration=None):
"""Request a number of *cores* of a certain compute resource *type*
Returns a dictionary describing the newly created realtime request, with the same format
as the requests returned by list_rt_cores.
If specified, request will terminate after being active for *max_duration* hours
"""
if max_duration != None:
if not isinstance(max_duration, (int, long)):
raise TypeError('Optional parameter max_duration should be an integer value > 0')
if max_duration <= 0:
raise TypeError('Optional parameter max_duration should be an integer value > 0')
conn = cloud._getcloudnetconnection()
return fix_time_element(conn.send_request(_request_query,
{'cores': cores,
'type' : type,
'cap_duration': max_duration if max_duration else 0}),
'start_time')
def release(request_id):
"""Release the realtime core request associated with *request_id*.
Request must have been satisfied to terminate."""
try:
int(request_id)
except ValueError:
raise TypeError('release_rt_cores requires a numeric request_id')
conn = cloud._getcloudnetconnection()
conn.send_request(_release_query, {'rid': str(request_id)})
def change_max_duration(request_id, new_max_duration=None):
try:
int(request_id)
except ValueError:
raise TypeError('release_rt_cores requires a numeric request_id')
if new_max_duration != None:
if not isinstance(new_max_duration, (int, long)):
raise TypeError('Optional parameter max_duration should be an integer value > 0')
if new_max_duration <= 0:
raise TypeError('Optional parameter max_duration should be an integer value > 0')
conn = cloud._getcloudnetconnection()
conn.send_request(_change_max_duration_query, {'rid': str(request_id), 'cap_duration':new_max_duration})
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/realtime.py
|
realtime.py
|
from __future__ import absolute_import
"""
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2011 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
import scicloud as cloud
import types
from scicloud.util import fix_time_element
import logging, datetime
_request_query = 'realtime/request/'
_release_query = 'realtime/release/'
_list_query = 'realtime/list/'
_change_max_duration_query = 'realtime/change_max_duration/'
"""
Real time requests management
"""
def list(request_id=""):
"""Returns a list of dictionaries describing realtime core requests.
If *request_id* is specified, only show realtime core request with that request_id
The keys within each returned dictionary are:
* request_id: numeric ID associated with the request
* type: Type of computation resource this request grants
* cores: Number of (type) cores this request grants
* start_time: Time when real time request was satisfied; None if still pending"""
if request_id != "":
try:
int(request_id)
except ValueError:
raise TypeError('Optional parameter to list_rt_cores must be a numeric request_id')
conn = cloud._getcloudnetconnection()
rt_list = conn.send_request(_list_query, {'rid': str(request_id)})
return [fix_time_element(rt,'start_time') for rt in rt_list['requests']]
def request(type, cores, max_duration=None):
"""Request a number of *cores* of a certain compute resource *type*
Returns a dictionary describing the newly created realtime request, with the same format
as the requests returned by list_rt_cores.
If specified, request will terminate after being active for *max_duration* hours
"""
if max_duration != None:
if not isinstance(max_duration, (int, long)):
raise TypeError('Optional parameter max_duration should be an integer value > 0')
if max_duration <= 0:
raise TypeError('Optional parameter max_duration should be an integer value > 0')
conn = cloud._getcloudnetconnection()
return fix_time_element(conn.send_request(_request_query,
{'cores': cores,
'type' : type,
'cap_duration': max_duration if max_duration else 0}),
'start_time')
def release(request_id):
"""Release the realtime core request associated with *request_id*.
Request must have been satisfied to terminate."""
try:
int(request_id)
except ValueError:
raise TypeError('release_rt_cores requires a numeric request_id')
conn = cloud._getcloudnetconnection()
conn.send_request(_release_query, {'rid': str(request_id)})
def change_max_duration(request_id, new_max_duration=None):
try:
int(request_id)
except ValueError:
raise TypeError('release_rt_cores requires a numeric request_id')
if new_max_duration != None:
if not isinstance(new_max_duration, (int, long)):
raise TypeError('Optional parameter max_duration should be an integer value > 0')
if new_max_duration <= 0:
raise TypeError('Optional parameter max_duration should be an integer value > 0')
conn = cloud._getcloudnetconnection()
conn.send_request(_change_max_duration_query, {'rid': str(request_id), 'cap_duration':new_max_duration})
| 0.806319 | 0.101679 |
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
from scicloud import CloudTimeoutError
from . import _getcloud
import multiprocessing
class AsyncResult(object):
"""Result object that emulates multiprocessing.pool.AsyncResult"""
_jid = None #internal - jid (or jid list) associated with this result
def __init__(self, jid):
self._jid = jid
def get(self, timeout=None):
"""
Return result when it arrives.
If timeout is not None and none arrives,
raise multiprocessing.TimeoutError in *timeout* seconds
"""
return _getcloud().result(self._jid)
def wait(self, timeout=None):
"""
Wait until result is available or *timeout* seconds pass
"""
try:
_getcloud().join(self._jid)
except CloudTimeoutError:
pass
def ready(self):
"""Returns true if the job finished (done or errored)"""
c = _getcloud()
status = c.status(self._jid)
if not hasattr(status, '__iter__'):
return status in c.finished_statuses
else:
for s in status:
if s not in c.finished_statuses:
return False
return True
def successful(self):
"""Returns true if job finished successfully.
Asserts that job has finished"""
assert(self.ready())
status = _getcloud().status(self._jid)
if not hasattr(status, '__iter__'):
return status == 'done'
else:
for s in status:
if s != 'done':
return False
return True
def apply(func, args=()):
"""
Equivalent to Multiprocessing apply.
keyword arguments are not supported
"""
c = _getcloud()
jid = c.call(func, *args)
return c.result(jid)
def apply_async(func, args=(), callback=None):
"""
Equivalent to Multiprocessing apply_async
keyword arguments are not supported
callback is a list of functions that should be run on the callee's computer once this job finishes successfully.
Each callback will be invoked with one argument - the jid of the complete job
"""
c = _getcloud()
jid = c.call(func, _callback = callback, *args)
return AsyncResult(jid)
def map(func, iterable, chunksize=None):
"""
Equivalent to Multiprocessing map
chunksize is not used here
"""
c = _getcloud()
jids = c.map(func, iterable)
return c.result(jids)
def map_async(func, iterable, chunksize=None):
"""
Equivalent to Multiprocessing map_async
chunksize is not used here
"""
c = _getcloud()
jids = c.map(func, iterable)
return AsyncResult(jids)
def imap(func, iterable, chunksize = None):
"""
Equivalent to Multiprocessing imap
chunksize is used only to control the cloud.iresult stage
"""
c = _getcloud()
jids = c.map(func, iterable)
return c.iresult(jids,chunksize)
def imap_unordered(func, iterable, chunksize = None):
"""
Same as imap
"""
return imap(func, iterable, chunksize)
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/pool_interface.py
|
pool_interface.py
|
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
from scicloud import CloudTimeoutError
from . import _getcloud
import multiprocessing
class AsyncResult(object):
"""Result object that emulates multiprocessing.pool.AsyncResult"""
_jid = None #internal - jid (or jid list) associated with this result
def __init__(self, jid):
self._jid = jid
def get(self, timeout=None):
"""
Return result when it arrives.
If timeout is not None and none arrives,
raise multiprocessing.TimeoutError in *timeout* seconds
"""
return _getcloud().result(self._jid)
def wait(self, timeout=None):
"""
Wait until result is available or *timeout* seconds pass
"""
try:
_getcloud().join(self._jid)
except CloudTimeoutError:
pass
def ready(self):
"""Returns true if the job finished (done or errored)"""
c = _getcloud()
status = c.status(self._jid)
if not hasattr(status, '__iter__'):
return status in c.finished_statuses
else:
for s in status:
if s not in c.finished_statuses:
return False
return True
def successful(self):
"""Returns true if job finished successfully.
Asserts that job has finished"""
assert(self.ready())
status = _getcloud().status(self._jid)
if not hasattr(status, '__iter__'):
return status == 'done'
else:
for s in status:
if s != 'done':
return False
return True
def apply(func, args=()):
"""
Equivalent to Multiprocessing apply.
keyword arguments are not supported
"""
c = _getcloud()
jid = c.call(func, *args)
return c.result(jid)
def apply_async(func, args=(), callback=None):
"""
Equivalent to Multiprocessing apply_async
keyword arguments are not supported
callback is a list of functions that should be run on the callee's computer once this job finishes successfully.
Each callback will be invoked with one argument - the jid of the complete job
"""
c = _getcloud()
jid = c.call(func, _callback = callback, *args)
return AsyncResult(jid)
def map(func, iterable, chunksize=None):
"""
Equivalent to Multiprocessing map
chunksize is not used here
"""
c = _getcloud()
jids = c.map(func, iterable)
return c.result(jids)
def map_async(func, iterable, chunksize=None):
"""
Equivalent to Multiprocessing map_async
chunksize is not used here
"""
c = _getcloud()
jids = c.map(func, iterable)
return AsyncResult(jids)
def imap(func, iterable, chunksize = None):
"""
Equivalent to Multiprocessing imap
chunksize is used only to control the cloud.iresult stage
"""
c = _getcloud()
jids = c.map(func, iterable)
return c.iresult(jids,chunksize)
def imap_unordered(func, iterable, chunksize = None):
"""
Same as imap
"""
return imap(func, iterable, chunksize)
| 0.803212 | 0.152001 |
from __future__ import absolute_import
"""
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2012 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
try:
import json
except:
# Python 2.5 compatibility
import simplejson as json
import logging
import os
import platform
import time
import scicloud as cloud
from .cloudlog import stdout as print_stdout, stderr as print_stderr
from .util import common
cloudLog = logging.getLogger('Cloud.volume')
plat = platform.system()
_urls = {'list': 'volume/list/',
'create': 'volume/create/',
'mkdir': 'volume/mkdir/',
'sync_initiate': 'volume/sync_initiate/',
'sync_terminate': 'volume/sync_terminate/',
'delete': 'volume/delete/',
'check_release': 'volume/check_release/',
'ls': 'volume/ls/',
'rm': 'volume/rm/'
}
_volume_path_delimiter = ':'
_SYNC_READY = 'ready'
_SYNC_NOVACANCY = 'novacancy'
_SYNC_ERROR = 'error'
_RELEASE_DONE = 'done'
_RELEASE_IN_PROGRESS = 'waiting'
_RELEASE_ERROR = 'error'
def _send_vol_request(request_type, data, jsonize_values=True):
type_url = _urls.get(request_type)
if type_url is None:
raise LookupError('Invalid vol request type %s' % request_type)
return common._send_request(type_url, data, jsonize_values)
"""
volume management
"""
def get_list(name=None, desc=False):
"""Returns a list of dictionaries describing user's volumes.
If *name* is specified, only shows info for the volume with that name.
If *desc* is True (default=False), then the description is also displayed.
Volume information is returned as list of dictionaries. The keys within
each returned dictionary are:
* name:
name of the volume
* desc:
description of the volume (if desc option is True)
* mnt_path:
filesystem path where volume contents can be accessed by a job
* created:
time when the volume was created
"""
v_list = _send_vol_request('list', {'name': name, 'desc': desc})
return [common._fix_time_element(v, 'created') for v in v_list['volumes']]
def create(name, mount_path, desc=None):
"""Creates a new cloud volume.
* name:
name of the new volume (max 64 chars)
* mount_path:
If an absolute path is specified, that path is where this volume
will be mounted when jobs are run specifying access to this volume,
i.e. mount point where jobs can access the contents of this volume.
If a relative path is specified, then the mount point is the
specified path relative to /home/scivm, which is the directory
where all jobs initially start.
* desc:
(optional) description of the volume (max 1024 chars)
"""
if len(name) < 2:
raise cloud.CloudException('Volume name must be at least 2 characters.')
_send_vol_request('create',
{'name': name, 'mnt_path': mount_path,
'desc': desc or ''})
cloudLog.debug('created volume %s', name)
def mkdir(volume_path, parents=False):
"""Creates directory(ies) at volume_path, if they don't already exist.
* volume_path:
A cloud volume path spec or a list of specs, that indicates the
directory(ies) to create.
* parents:
If True, does not error if the directory already exists, and makes any
necessary parent directories.
"""
vol_name, vol_paths = common.parse_remote_paths(volume_path)
res = _send_vol_request('mkdir',
{'name': vol_name, 'paths': vol_paths,
'parents': parents})
if res.get('modified'):
_wait_for_release(vol_name)
msg = 'created %s in volume %s' % (', '.join(vol_paths), vol_name)
cloudLog.debug(msg)
print_stdout(msg)
def sync(source, dest, delete=False):
"""Syncs data between a cloud volumes and the local filesystem.
Either *source* or *dest* should specify a cloud volume path, but not both.
A cloud volume path is of the format:
volume_name:[path-within-volume]
where path-within-volume cannot be an absolute path (There is no concept of
the root of the filesystem in a volume: All path specifications are relative
to the top level of the volume). Note that the colon is what indicates this
is a volume path specification. Local paths should point to a local
directory or file. If the local path is a directory, whether the
directory itself or the contents of the directory are synced depends on the
presence of a trailing slash. A trailing slash indicates that the contents
should be synced, while its absence would lead to the directory itself being
synced to the volume. *source* can be a list of paths, all of which should
either be local paths, or volume paths in the same cloud volume.
Example::
sync('~/dataset1', 'myvolume1:')
will ensure that a directory named 'dataset1' will exist at the top level
of the cloud volume 'myvolume1', that contains all the contents of
'dataset1'. On the other hand,
sync('~/dataset1/', 'myvolume1:')
will copy all the contents of 'dataset1' to the top level of 'myvolume1'.
This behavior mirrors the file-copying tool 'rsync'.
If *delete* is True, files that exist in *dest* but not in *source* will be
deleted. By default, such files will not be removed.
"""
conn = cloud._getcloudnetconnection()
retry_attempts = conn.retry_attempts
dest_is_local = common.is_local_path(dest)
l_paths, r_paths = (dest, source) if dest_is_local else (source, dest)
local_paths = common.parse_local_paths(l_paths)
vol_name, vol_paths = common.parse_remote_paths(r_paths)
for vol_path in vol_paths:
if os.path.isabs(vol_path):
raise cloud.CloudException('Volume path cannot be absolute')
# acquire syncslot and syncserver info to complete the real remote paths
success = release = False
exit_code = -1
syncserver, syncslot = _acquire_syncslot(vol_name)
try:
cloudLog.debug('Acquired syncslot %s on server %s', syncslot, syncserver)
r_base = '%s@%s:volume/' % (syncslot, syncserver)
r_paths = ' '.join(['%s%s' % (r_base, v_path) for v_path in vol_paths])
l_paths = ' '.join(local_paths)
sync_args = (r_paths, l_paths) if dest_is_local else (l_paths, r_paths)
for attempt in xrange(retry_attempts):
exit_code, stdout, stderr = common.rsync_session(*sync_args,
delete=delete)
if not exit_code:
break
cloudLog.error('sync attempt failed:\n%s', stderr)
print_stdout(str(stderr))
print_stdout('Retrying volume sync...')
else:
raise Exception('sync failed multiple attempts... '
'Please contact PiCloud support')
except KeyboardInterrupt:
cloudLog.error('Sync interrupted by keyboard')
print 'Sync interrupted by keyboard'
except Exception as e:
cloudLog.error('Sync errored with:\n%s', e)
print e
finally:
print_stdout('Cleanup...')
success = not exit_code
release = success and not dest_is_local
_send_vol_request('sync_terminate', {'name': vol_name,
'syncslot': syncslot,
'syncserver': syncserver,
'release': release})
if release:
print_stdout('Ensuring redundancy...')
_wait_for_release(vol_name)
if success:
print_stdout('Sync successfully completed.')
else:
raise cloud.CloudException('Volume sync failed with error code %s. '
'See cloud.log' % exit_code)
def delete(name):
"""Deletes the scivm volume identified by *name*."""
_send_vol_request('delete', {'name': name})
cloudLog.debug('deleted volume %s', name)
def ls(volume_path, extended_info=False):
"""Lists the contents at *volume_path*.
* volume_path:
A cloud volume path spec or a list of specs, whose contents are to be
returned.
* extended_info:
If True, in addition to the names of files and directories comprising
the contents of the volume_path, the size (in bytes) and the modified
times are returned. (Default is False)
Returns a list of tuples, one for each volume path specified. The first
element of the tuple is the volume path spec, and the second element of the
tuple is a list of dictionaries for each file or directory present in the
volume path.
"""
vol_name, vol_paths = common.parse_remote_paths(volume_path)
res = _send_vol_request('ls',
{'name': vol_name, 'paths': vol_paths,
'extended_info': extended_info})
fixed_listings = []
for v_path, listings in res.get('listings'):
v_path = '%s:%s' % (vol_name, v_path)
if extended_info:
listings = [common._fix_time_element(v, 'modified') for v in listings]
fixed_listings.append((v_path, listings))
return fixed_listings
def rm(volume_path, recursive=False):
"""Removes contents at *volume_path*.
* volume_path:
A cloud volume path spec or a list of specs, whose contents are to be
removed.
* recursive:
If True, will remove the contents at *volume_path* recursively, if it
is a directory. If *recursive* is False, and *volume_path* points to
a non-empty directory, it is an error. (Default is False)
"""
vol_name, vol_paths = common.parse_remote_paths(volume_path)
res = _send_vol_request('rm',
{'name': vol_name,
'paths': vol_paths,
'recursive': recursive})
if res.get('modified'):
_wait_for_release(vol_name)
cloudLog.debug('removed %s from volume %s', ', '.join(vol_paths), vol_name)
def _acquire_syncslot(volume_name):
"""Requests syncslot from PiCloud. Current behavior is to try 12 times,
waiting 5 seconds between failed attempts."""
num_retries = 12
wait_time = 5 # seconds
print_stdout('Connecting with PiCloud to initiate sync', False)
while num_retries:
print_stdout('.', False)
res = _send_vol_request('sync_initiate', {'name': volume_name})
status = res.get('status')
if status == _SYNC_NOVACANCY:
num_retries -= 1
time.sleep(wait_time)
continue
if status not in [_SYNC_READY, _SYNC_ERROR]:
status = _SYNC_ERROR
break
print_stdout('')
if status == _SYNC_NOVACANCY:
cloudLog.error('No available syncslot')
raise cloud.CloudException('Volume sync is unavailable at the moment. '
'Please try again in a few minutes. '
'We Apologize for the inconvenience.')
if status == _SYNC_ERROR:
cloudLog.error('Error acquiring syncslot')
raise cloud.CloudException('Could not complete volume sync. '
'Please contact PiCloud support.')
return res.get('syncserver'), res.get('syncslot')
def _wait_for_release(volume_name, wait_interval=3):
"""Polls volume's status until it's no longer waiting release."""
while True:
res = _send_vol_request('check_release', {'name': volume_name})
status = res['status']
if status == _RELEASE_ERROR:
raise cloud.CloudException('Sync failed on volume %s' % volume_name)
if status == _RELEASE_DONE:
break
time.sleep(3)
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/volume.py
|
volume.py
|
from __future__ import absolute_import
"""
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2012 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
try:
import json
except:
# Python 2.5 compatibility
import simplejson as json
import logging
import os
import platform
import time
import scicloud as cloud
from .cloudlog import stdout as print_stdout, stderr as print_stderr
from .util import common
cloudLog = logging.getLogger('Cloud.volume')
plat = platform.system()
_urls = {'list': 'volume/list/',
'create': 'volume/create/',
'mkdir': 'volume/mkdir/',
'sync_initiate': 'volume/sync_initiate/',
'sync_terminate': 'volume/sync_terminate/',
'delete': 'volume/delete/',
'check_release': 'volume/check_release/',
'ls': 'volume/ls/',
'rm': 'volume/rm/'
}
_volume_path_delimiter = ':'
_SYNC_READY = 'ready'
_SYNC_NOVACANCY = 'novacancy'
_SYNC_ERROR = 'error'
_RELEASE_DONE = 'done'
_RELEASE_IN_PROGRESS = 'waiting'
_RELEASE_ERROR = 'error'
def _send_vol_request(request_type, data, jsonize_values=True):
type_url = _urls.get(request_type)
if type_url is None:
raise LookupError('Invalid vol request type %s' % request_type)
return common._send_request(type_url, data, jsonize_values)
"""
volume management
"""
def get_list(name=None, desc=False):
"""Returns a list of dictionaries describing user's volumes.
If *name* is specified, only shows info for the volume with that name.
If *desc* is True (default=False), then the description is also displayed.
Volume information is returned as list of dictionaries. The keys within
each returned dictionary are:
* name:
name of the volume
* desc:
description of the volume (if desc option is True)
* mnt_path:
filesystem path where volume contents can be accessed by a job
* created:
time when the volume was created
"""
v_list = _send_vol_request('list', {'name': name, 'desc': desc})
return [common._fix_time_element(v, 'created') for v in v_list['volumes']]
def create(name, mount_path, desc=None):
"""Creates a new cloud volume.
* name:
name of the new volume (max 64 chars)
* mount_path:
If an absolute path is specified, that path is where this volume
will be mounted when jobs are run specifying access to this volume,
i.e. mount point where jobs can access the contents of this volume.
If a relative path is specified, then the mount point is the
specified path relative to /home/scivm, which is the directory
where all jobs initially start.
* desc:
(optional) description of the volume (max 1024 chars)
"""
if len(name) < 2:
raise cloud.CloudException('Volume name must be at least 2 characters.')
_send_vol_request('create',
{'name': name, 'mnt_path': mount_path,
'desc': desc or ''})
cloudLog.debug('created volume %s', name)
def mkdir(volume_path, parents=False):
"""Creates directory(ies) at volume_path, if they don't already exist.
* volume_path:
A cloud volume path spec or a list of specs, that indicates the
directory(ies) to create.
* parents:
If True, does not error if the directory already exists, and makes any
necessary parent directories.
"""
vol_name, vol_paths = common.parse_remote_paths(volume_path)
res = _send_vol_request('mkdir',
{'name': vol_name, 'paths': vol_paths,
'parents': parents})
if res.get('modified'):
_wait_for_release(vol_name)
msg = 'created %s in volume %s' % (', '.join(vol_paths), vol_name)
cloudLog.debug(msg)
print_stdout(msg)
def sync(source, dest, delete=False):
"""Syncs data between a cloud volumes and the local filesystem.
Either *source* or *dest* should specify a cloud volume path, but not both.
A cloud volume path is of the format:
volume_name:[path-within-volume]
where path-within-volume cannot be an absolute path (There is no concept of
the root of the filesystem in a volume: All path specifications are relative
to the top level of the volume). Note that the colon is what indicates this
is a volume path specification. Local paths should point to a local
directory or file. If the local path is a directory, whether the
directory itself or the contents of the directory are synced depends on the
presence of a trailing slash. A trailing slash indicates that the contents
should be synced, while its absence would lead to the directory itself being
synced to the volume. *source* can be a list of paths, all of which should
either be local paths, or volume paths in the same cloud volume.
Example::
sync('~/dataset1', 'myvolume1:')
will ensure that a directory named 'dataset1' will exist at the top level
of the cloud volume 'myvolume1', that contains all the contents of
'dataset1'. On the other hand,
sync('~/dataset1/', 'myvolume1:')
will copy all the contents of 'dataset1' to the top level of 'myvolume1'.
This behavior mirrors the file-copying tool 'rsync'.
If *delete* is True, files that exist in *dest* but not in *source* will be
deleted. By default, such files will not be removed.
"""
conn = cloud._getcloudnetconnection()
retry_attempts = conn.retry_attempts
dest_is_local = common.is_local_path(dest)
l_paths, r_paths = (dest, source) if dest_is_local else (source, dest)
local_paths = common.parse_local_paths(l_paths)
vol_name, vol_paths = common.parse_remote_paths(r_paths)
for vol_path in vol_paths:
if os.path.isabs(vol_path):
raise cloud.CloudException('Volume path cannot be absolute')
# acquire syncslot and syncserver info to complete the real remote paths
success = release = False
exit_code = -1
syncserver, syncslot = _acquire_syncslot(vol_name)
try:
cloudLog.debug('Acquired syncslot %s on server %s', syncslot, syncserver)
r_base = '%s@%s:volume/' % (syncslot, syncserver)
r_paths = ' '.join(['%s%s' % (r_base, v_path) for v_path in vol_paths])
l_paths = ' '.join(local_paths)
sync_args = (r_paths, l_paths) if dest_is_local else (l_paths, r_paths)
for attempt in xrange(retry_attempts):
exit_code, stdout, stderr = common.rsync_session(*sync_args,
delete=delete)
if not exit_code:
break
cloudLog.error('sync attempt failed:\n%s', stderr)
print_stdout(str(stderr))
print_stdout('Retrying volume sync...')
else:
raise Exception('sync failed multiple attempts... '
'Please contact PiCloud support')
except KeyboardInterrupt:
cloudLog.error('Sync interrupted by keyboard')
print 'Sync interrupted by keyboard'
except Exception as e:
cloudLog.error('Sync errored with:\n%s', e)
print e
finally:
print_stdout('Cleanup...')
success = not exit_code
release = success and not dest_is_local
_send_vol_request('sync_terminate', {'name': vol_name,
'syncslot': syncslot,
'syncserver': syncserver,
'release': release})
if release:
print_stdout('Ensuring redundancy...')
_wait_for_release(vol_name)
if success:
print_stdout('Sync successfully completed.')
else:
raise cloud.CloudException('Volume sync failed with error code %s. '
'See cloud.log' % exit_code)
def delete(name):
"""Deletes the scivm volume identified by *name*."""
_send_vol_request('delete', {'name': name})
cloudLog.debug('deleted volume %s', name)
def ls(volume_path, extended_info=False):
"""Lists the contents at *volume_path*.
* volume_path:
A cloud volume path spec or a list of specs, whose contents are to be
returned.
* extended_info:
If True, in addition to the names of files and directories comprising
the contents of the volume_path, the size (in bytes) and the modified
times are returned. (Default is False)
Returns a list of tuples, one for each volume path specified. The first
element of the tuple is the volume path spec, and the second element of the
tuple is a list of dictionaries for each file or directory present in the
volume path.
"""
vol_name, vol_paths = common.parse_remote_paths(volume_path)
res = _send_vol_request('ls',
{'name': vol_name, 'paths': vol_paths,
'extended_info': extended_info})
fixed_listings = []
for v_path, listings in res.get('listings'):
v_path = '%s:%s' % (vol_name, v_path)
if extended_info:
listings = [common._fix_time_element(v, 'modified') for v in listings]
fixed_listings.append((v_path, listings))
return fixed_listings
def rm(volume_path, recursive=False):
"""Removes contents at *volume_path*.
* volume_path:
A cloud volume path spec or a list of specs, whose contents are to be
removed.
* recursive:
If True, will remove the contents at *volume_path* recursively, if it
is a directory. If *recursive* is False, and *volume_path* points to
a non-empty directory, it is an error. (Default is False)
"""
vol_name, vol_paths = common.parse_remote_paths(volume_path)
res = _send_vol_request('rm',
{'name': vol_name,
'paths': vol_paths,
'recursive': recursive})
if res.get('modified'):
_wait_for_release(vol_name)
cloudLog.debug('removed %s from volume %s', ', '.join(vol_paths), vol_name)
def _acquire_syncslot(volume_name):
"""Requests syncslot from PiCloud. Current behavior is to try 12 times,
waiting 5 seconds between failed attempts."""
num_retries = 12
wait_time = 5 # seconds
print_stdout('Connecting with PiCloud to initiate sync', False)
while num_retries:
print_stdout('.', False)
res = _send_vol_request('sync_initiate', {'name': volume_name})
status = res.get('status')
if status == _SYNC_NOVACANCY:
num_retries -= 1
time.sleep(wait_time)
continue
if status not in [_SYNC_READY, _SYNC_ERROR]:
status = _SYNC_ERROR
break
print_stdout('')
if status == _SYNC_NOVACANCY:
cloudLog.error('No available syncslot')
raise cloud.CloudException('Volume sync is unavailable at the moment. '
'Please try again in a few minutes. '
'We Apologize for the inconvenience.')
if status == _SYNC_ERROR:
cloudLog.error('Error acquiring syncslot')
raise cloud.CloudException('Could not complete volume sync. '
'Please contact PiCloud support.')
return res.get('syncserver'), res.get('syncslot')
def _wait_for_release(volume_name, wait_interval=3):
"""Polls volume's status until it's no longer waiting release."""
while True:
res = _send_vol_request('check_release', {'name': volume_name})
status = res['status']
if status == _RELEASE_ERROR:
raise cloud.CloudException('Sync failed on volume %s' % volume_name)
if status == _RELEASE_DONE:
break
time.sleep(3)
| 0.690559 | 0.138258 |
from __future__ import absolute_import
"""
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2013 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
try:
import json
except:
# Python 2.5 compatibility
import simplejson as json
import logging
import platform
import random
import re
import sys
import string
import time
import scicloud as cloud
from .cloudlog import stdout as print_stdout, stderr as print_stderr
from .util import credentials
from .util import common
cloudLog = logging.getLogger('Cloud.environment')
plat = platform.system()
_urls = {'list': 'environment/list/',
'list_bases': 'environment/list_bases/',
'create': 'environment/create/',
'edit_info': 'environment/edit_info/',
'modify': 'environment/modify/',
'save': 'environment/save/',
'save_shutdown': 'environment/save_shutdown/',
'shutdown': 'environment/shutdown/',
'clone': 'environment/clone/',
'delete': 'environment/delete/',
}
# environment status types
_STATUS_CREATING = 'new'
_STATUS_READY = 'ready'
_STATUS_ERROR = 'error'
# environment action types
_ACTION_IDLE = 'idle'
_ACTION_SETUP = 'setup'
_ACTION_EDIT = 'edit'
_ACTION_SAVE = 'save'
_ACTION_SETUP_ERROR = 'setup_error'
_ACTION_SAVE_ERROR = 'save_error'
def _send_env_request(request_type, data, jsonize_values=True):
type_url = _urls.get(request_type)
if type_url is None:
raise LookupError('Invalid env request type %s' % request_type)
return common._send_request(type_url, data, jsonize_values)
"""
environment management
"""
def list_envs(name=None):
"""Returns a list of dictionaries describing user's environments.
If *name* is given, only shows info for the environment with that name.
Environment information is returned as list of dictionaries. The keys
within each returned dictionary are:
* name: name of the environment
* status: status of the environment
* action: the action state of the environment (e.g. under edit)
* created: time when the environment was created
* last_modifed: last time a modification was saved
* hostname: hostname of setup server if being modified
* setup_time: time setup server has been up if being modified
"""
resp = _send_env_request('list', {'env_name': name})
return [common._fix_time_element(env, ['created', 'last_modified'])
for env in resp['envs_list']]
def list_bases():
"""Returns a list of dictionaries describing available bases. The keys
within each returned dictionary are:
* id: id of the base (to be used when referencing bases in other functions)
* name: brief descriptive name of the base
"""
resp = _send_env_request('list_bases', {})
return resp['bases_list']
def create(name, base, desc=None):
"""Creates a new cloud environment.
* name: name of the new environment (max 30 chars)
* base: name of the base OS to use for the environment (use list_bases to
see list of bases and their names)
* desc: Optional description of the environment (max 2000 chars)
Returns the hostname of the setup server where the newly created
environment can be modified.
"""
pattern = '^[a-zA-Z0-9_-]*$'
if not name:
raise cloud.CloudException('No environment name given')
elif len(name) > 30:
raise cloud.CloudException('Environment name cannot be more than 30'
' characters')
elif not re.match(pattern, name):
raise cloud.CloudException('Environment name must consist of letters,'
' numbers, underscores, or hyphens')
if desc and len(desc) > 2000:
raise cloud.CloudException('Environment description cannot be more'
' than 2000 characters')
resp = _send_env_request('create',
{'env_name': name, 'base_name': base,
'env_desc': desc or ''})
cloudLog.debug('created environment %s', resp['env_name'])
return get_setup_hostname(name)
def edit_info(name, new_name=None, new_desc=None):
"""Edits name and description of an existing environment.
* name: current name of the environment
* new_name: Optional new name of the environment (max 30 chars)
* new_desc: Optional new description of the environment (max 2000 chars)
"""
if new_name is None and new_desc is None:
return
pattern = '^[a-zA-Z0-9_-]*$'
if not name:
raise cloud.CloudException('No environment name given')
if new_name is not None:
if len(new_name) > 30:
raise cloud.CloudException('Environment name cannot be more than 30'
' characters')
elif not re.match(pattern, name):
raise cloud.CloudException('Environment name must consist of letters,'
' numbers, underscores, or hyphens')
if new_desc is not None and len(new_desc) > 2000:
raise cloud.CloudException('Environment description cannot be more'
' than 2000 characters')
resp = _send_env_request('edit_info',
{'old_env_name': name, 'new_name': new_name,
'new_desc': new_desc})
cloudLog.debug('edited info for environment %s', resp['env_name'])
def modify(name):
"""Modifies an existing environment.
* name: name of environment to modify
Returns the hostname of the setup server where environment can be modified.
"""
resp = _send_env_request('modify', {'env_name': name})
cloudLog.debug('modify requested for env %s', resp['env_name'])
return get_setup_hostname(name)
def save(name):
"""Saves the current modified version of the environment, without tearing
down the setup server.
* name: name of the environment to save
This is a blocking function. When it returns without errors, the new
version of the environment is available for use by all workers.
"""
resp = _send_env_request('save', {'env_name': name})
cloudLog.debug('save requested for env %s', resp['env_name'])
wait_for_edit(name)
def save_shutdown(name):
"""Saves the current modified version of the environment, and tears down
the setup server when saving is done.
* name: name of the environment to save
This is a blocking function. When it returns without errors, the new
version of the environment is available for use by all workers.
"""
resp = _send_env_request('save_shutdown', {'env_name': name})
cloudLog.debug('save_shutdown requested for env %s', resp['env_name'])
wait_for_idle(name)
def shutdown(name):
"""Tears down the setup server without saving the environment modification.
* name: name of the environment to save
"""
resp = _send_env_request('shutdown', {'env_name': name})
cloudLog.debug('shutdown requested for env %s', resp['env_name'])
wait_for_idle(name)
def clone(parent_name, new_name=None, new_desc=None):
"""Creates a new cloud environment by cloning an existing one.
* parent_name: name of the existing environment to clone
* new_name: new name of the environment. default is
parent_name + "_clone". (max 30 chars)
* new_desc: Optional description of the environment if different from
parent environment description. (max 2000 chars)
"""
pattern = '^[a-zA-Z0-9_-]*$'
new_name = new_name or (parent_name + '_clone')
if len(new_name) > 30:
raise cloud.CloudException('Environment name cannot be more than 30'
' characters')
elif not re.match(pattern, new_name):
raise cloud.CloudException('Environment name must consist of letters,'
' numbers, underscores, or hyphens')
if new_desc and len(new_desc) > 2000:
raise cloud.CloudException('Environment description cannot be more'
' than 2000 characters')
resp = _send_env_request('create',
{'parent_env_name': parent_name,
'env_name': new_name,
'env_desc': new_desc})
cloudLog.debug('created environment %s', resp['env_name'])
wait_for_idle(new_name)
def delete(name):
"""Deletes and existing environment.
* name: Name of the environment to save
"""
resp = _send_env_request('delete', {'env_name': name})
cloudLog.debug('delete requested for env %s', resp['env_name'])
def get_setup_hostname(name):
"""Returns the hostname of the setup server where environment can be
modified. raises exception if the environment does not have a setup server
already launched.
* name: name of the environment whose setup server hostname is desired
"""
env_info = wait_for_edit(name, _ACTION_IDLE)
if env_info is None:
raise cloud.CloudException('Environment is not being modified')
return env_info['hostname']
def get_key_path():
"""Return the key file path for sshing into setup server."""
api_key = cloud.connection_info().get('api_key')
return credentials.get_sshkey_path(api_key)
def ssh(name, cmd=None):
"""Creates an ssh session to the environment setup server.
* name: Name of the environment to make an ssh connection
* cmd: By default, this function creates an interactive ssh session.
If cmd is given, however, it executes the cmd on the setup server
and returns the output of the command execution.
"""
hostname = get_setup_hostname(name)
key_path = get_key_path()
status, stdout, stderr = common.ssh_session('scivm', hostname, key_path,
run_cmd=cmd)
if status:
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
sys.exit(status)
if cmd and stdout:
return stdout
def rsync(src_path, dest_path, delete=False, pipe_output=False):
"""Syncs data between a custom environment and the local filesystem. A
setup server for the environment must already be launched. Also, keep in
mind that the scivm user account (which is used for the rsync operation)
has write permissions only to the home directory and /tmp on the setup
server. If additional permissions are required, consider doing the rsync
manually from the setup server using sudo, or rsync to the home directory
then do a subsequent move using sudo.
Either *src_path* or *dest_path* should specify an environment path, but
not both. An environment path is of the format:
env_name:[path-within-environment]
Note that the colon is what indicates this is an environment path
specification.
*src_path* can be a list of paths, all of which should either be local
paths, or environment paths. If *src_path* is a directory, a trailing slash
indicates that its contents should be rsynced, while ommission of slash
would lead to the directory itself being rsynced to the environment.
Example::
rsync('~/dataset1', 'my_env:')
will ensure that a directory named 'dataset1' will exist in the user
scivm's home directory of environment 'my_env'. On the other hand,
rsync(~/dataset1/', 'my_env:')
will copy all the contents of 'dataset1' to the home directory of user
scivm. See rsync manual for more information.
If *delete* is True, files that exist in *dest_path* but not in *src_path*
will be deleted. By default, such files will not be removed.
"""
conn = cloud._getcloudnetconnection()
retry_attempts = conn.retry_attempts
dest_is_local = common.is_local_path(dest_path)
l_paths, r_paths = ((dest_path, src_path) if dest_is_local else
(src_path, dest_path))
local_paths = common.parse_local_paths(l_paths)
env_name, env_paths = common.parse_remote_paths(r_paths)
hostname = get_setup_hostname(env_name)
try:
r_base = 'scivm@%s:' % hostname
r_paths = ' '.join(['%s%s' % (r_base, path) for path in env_paths])
l_paths = ' '.join(local_paths)
sync_args = (r_paths, l_paths) if dest_is_local else (l_paths, r_paths)
for attempt in xrange(retry_attempts):
exit_code, _, _ = common.rsync_session(*sync_args, delete=delete,
pipe_output=pipe_output)
if not exit_code:
break
print_stderr('Retrying environment rsync...')
else:
raise Exception('rsync failed multiple attempts... '
'Please contact PiCloud support')
except Exception as e:
cloudLog.error('Environment rsync errored with:\n%s', e)
print e
def run_script(name, filename):
"""Runs a script on the environment setup server, and returns the output.
* name: Environment whose setup server should run the script
filename: local path where the script to be run can be found
"""
POPU = string.ascii_letters + string.digits
dest_file = ''.join(random.sample(POPU, 16))
try:
rsync(filename, '%s:%s' % (name, dest_file), pipe_output=True)
run = "chmod 700 {0}; ./{0} &> {0}.out; cat {0}.out".format(dest_file)
output = ssh(name, run)
except Exception as e:
cloudLog.error('script could not be run: %s', str(e))
print 'Script could not be run on the setup server.'
print e
else:
return output
finally:
ssh(name, "rm -rf %s*" % dest_file)
def wait_for_idle(name, invalid_actions=None):
"""Waits for environment to be in idle action state."""
return _wait_for(name=name, action=_ACTION_IDLE,
invalid_actions=invalid_actions)
def wait_for_edit(name, invalid_actions=None):
"""Waits for environment to be in edit action state."""
return _wait_for(name=name, action=_ACTION_EDIT,
invalid_actions=invalid_actions)
def _wait_for(name, action, invalid_actions=None, poll_frequency=2,
max_poll_duration=1800):
"""Generic wait function for polling until environment reaches the
specified action state. Raises exception if the environment ever falls into
an error status or action state.
"""
invalid_actions = invalid_actions or []
if not hasattr(invalid_actions, '__iter__'):
invalid_actions = [invalid_actions]
for _ in xrange(max_poll_duration / poll_frequency):
resp = list_envs(name)
if len(resp) == 0:
raise cloud.CloudException('No matching environment found.')
elif len(resp) != 1:
cloudLog.error('single env query returned %s results', len(resp))
raise cloud.CloudException('Unexpected result from PiCloud. '
'Please contact PiCloud support.')
env_info = resp.pop()
resp_status = env_info['status']
resp_action = env_info['action']
if resp_status == _STATUS_ERROR:
raise cloud.CloudException('Environment creation failed. '
'Please contact PiCloud support.')
elif resp_status == _STATUS_READY:
if resp_action == _ACTION_SETUP_ERROR:
raise cloud.CloudException('Setup server launch failed. '
'Please contact PiCloud support.')
elif resp_action == _ACTION_SAVE_ERROR:
raise cloud.CloudException('Environment save failed. '
'Please contact PiCloud support.')
elif resp_action in invalid_actions:
return None
elif resp_status == _STATUS_READY and action == resp_action:
return env_info
elif resp_status == _STATUS_CREATING:
pass
time.sleep(poll_frequency)
raise cloud.CloudException('Environment operation timed out. '
'Please contact PiCloud support.')
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/environment.py
|
environment.py
|
from __future__ import absolute_import
"""
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2013 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
try:
import json
except:
# Python 2.5 compatibility
import simplejson as json
import logging
import platform
import random
import re
import sys
import string
import time
import scicloud as cloud
from .cloudlog import stdout as print_stdout, stderr as print_stderr
from .util import credentials
from .util import common
cloudLog = logging.getLogger('Cloud.environment')
plat = platform.system()
_urls = {'list': 'environment/list/',
'list_bases': 'environment/list_bases/',
'create': 'environment/create/',
'edit_info': 'environment/edit_info/',
'modify': 'environment/modify/',
'save': 'environment/save/',
'save_shutdown': 'environment/save_shutdown/',
'shutdown': 'environment/shutdown/',
'clone': 'environment/clone/',
'delete': 'environment/delete/',
}
# environment status types
_STATUS_CREATING = 'new'
_STATUS_READY = 'ready'
_STATUS_ERROR = 'error'
# environment action types
_ACTION_IDLE = 'idle'
_ACTION_SETUP = 'setup'
_ACTION_EDIT = 'edit'
_ACTION_SAVE = 'save'
_ACTION_SETUP_ERROR = 'setup_error'
_ACTION_SAVE_ERROR = 'save_error'
def _send_env_request(request_type, data, jsonize_values=True):
type_url = _urls.get(request_type)
if type_url is None:
raise LookupError('Invalid env request type %s' % request_type)
return common._send_request(type_url, data, jsonize_values)
"""
environment management
"""
def list_envs(name=None):
"""Returns a list of dictionaries describing user's environments.
If *name* is given, only shows info for the environment with that name.
Environment information is returned as list of dictionaries. The keys
within each returned dictionary are:
* name: name of the environment
* status: status of the environment
* action: the action state of the environment (e.g. under edit)
* created: time when the environment was created
* last_modifed: last time a modification was saved
* hostname: hostname of setup server if being modified
* setup_time: time setup server has been up if being modified
"""
resp = _send_env_request('list', {'env_name': name})
return [common._fix_time_element(env, ['created', 'last_modified'])
for env in resp['envs_list']]
def list_bases():
"""Returns a list of dictionaries describing available bases. The keys
within each returned dictionary are:
* id: id of the base (to be used when referencing bases in other functions)
* name: brief descriptive name of the base
"""
resp = _send_env_request('list_bases', {})
return resp['bases_list']
def create(name, base, desc=None):
"""Creates a new cloud environment.
* name: name of the new environment (max 30 chars)
* base: name of the base OS to use for the environment (use list_bases to
see list of bases and their names)
* desc: Optional description of the environment (max 2000 chars)
Returns the hostname of the setup server where the newly created
environment can be modified.
"""
pattern = '^[a-zA-Z0-9_-]*$'
if not name:
raise cloud.CloudException('No environment name given')
elif len(name) > 30:
raise cloud.CloudException('Environment name cannot be more than 30'
' characters')
elif not re.match(pattern, name):
raise cloud.CloudException('Environment name must consist of letters,'
' numbers, underscores, or hyphens')
if desc and len(desc) > 2000:
raise cloud.CloudException('Environment description cannot be more'
' than 2000 characters')
resp = _send_env_request('create',
{'env_name': name, 'base_name': base,
'env_desc': desc or ''})
cloudLog.debug('created environment %s', resp['env_name'])
return get_setup_hostname(name)
def edit_info(name, new_name=None, new_desc=None):
"""Edits name and description of an existing environment.
* name: current name of the environment
* new_name: Optional new name of the environment (max 30 chars)
* new_desc: Optional new description of the environment (max 2000 chars)
"""
if new_name is None and new_desc is None:
return
pattern = '^[a-zA-Z0-9_-]*$'
if not name:
raise cloud.CloudException('No environment name given')
if new_name is not None:
if len(new_name) > 30:
raise cloud.CloudException('Environment name cannot be more than 30'
' characters')
elif not re.match(pattern, name):
raise cloud.CloudException('Environment name must consist of letters,'
' numbers, underscores, or hyphens')
if new_desc is not None and len(new_desc) > 2000:
raise cloud.CloudException('Environment description cannot be more'
' than 2000 characters')
resp = _send_env_request('edit_info',
{'old_env_name': name, 'new_name': new_name,
'new_desc': new_desc})
cloudLog.debug('edited info for environment %s', resp['env_name'])
def modify(name):
"""Modifies an existing environment.
* name: name of environment to modify
Returns the hostname of the setup server where environment can be modified.
"""
resp = _send_env_request('modify', {'env_name': name})
cloudLog.debug('modify requested for env %s', resp['env_name'])
return get_setup_hostname(name)
def save(name):
"""Saves the current modified version of the environment, without tearing
down the setup server.
* name: name of the environment to save
This is a blocking function. When it returns without errors, the new
version of the environment is available for use by all workers.
"""
resp = _send_env_request('save', {'env_name': name})
cloudLog.debug('save requested for env %s', resp['env_name'])
wait_for_edit(name)
def save_shutdown(name):
"""Saves the current modified version of the environment, and tears down
the setup server when saving is done.
* name: name of the environment to save
This is a blocking function. When it returns without errors, the new
version of the environment is available for use by all workers.
"""
resp = _send_env_request('save_shutdown', {'env_name': name})
cloudLog.debug('save_shutdown requested for env %s', resp['env_name'])
wait_for_idle(name)
def shutdown(name):
"""Tears down the setup server without saving the environment modification.
* name: name of the environment to save
"""
resp = _send_env_request('shutdown', {'env_name': name})
cloudLog.debug('shutdown requested for env %s', resp['env_name'])
wait_for_idle(name)
def clone(parent_name, new_name=None, new_desc=None):
"""Creates a new cloud environment by cloning an existing one.
* parent_name: name of the existing environment to clone
* new_name: new name of the environment. default is
parent_name + "_clone". (max 30 chars)
* new_desc: Optional description of the environment if different from
parent environment description. (max 2000 chars)
"""
pattern = '^[a-zA-Z0-9_-]*$'
new_name = new_name or (parent_name + '_clone')
if len(new_name) > 30:
raise cloud.CloudException('Environment name cannot be more than 30'
' characters')
elif not re.match(pattern, new_name):
raise cloud.CloudException('Environment name must consist of letters,'
' numbers, underscores, or hyphens')
if new_desc and len(new_desc) > 2000:
raise cloud.CloudException('Environment description cannot be more'
' than 2000 characters')
resp = _send_env_request('create',
{'parent_env_name': parent_name,
'env_name': new_name,
'env_desc': new_desc})
cloudLog.debug('created environment %s', resp['env_name'])
wait_for_idle(new_name)
def delete(name):
"""Deletes and existing environment.
* name: Name of the environment to save
"""
resp = _send_env_request('delete', {'env_name': name})
cloudLog.debug('delete requested for env %s', resp['env_name'])
def get_setup_hostname(name):
"""Returns the hostname of the setup server where environment can be
modified. raises exception if the environment does not have a setup server
already launched.
* name: name of the environment whose setup server hostname is desired
"""
env_info = wait_for_edit(name, _ACTION_IDLE)
if env_info is None:
raise cloud.CloudException('Environment is not being modified')
return env_info['hostname']
def get_key_path():
"""Return the key file path for sshing into setup server."""
api_key = cloud.connection_info().get('api_key')
return credentials.get_sshkey_path(api_key)
def ssh(name, cmd=None):
"""Creates an ssh session to the environment setup server.
* name: Name of the environment to make an ssh connection
* cmd: By default, this function creates an interactive ssh session.
If cmd is given, however, it executes the cmd on the setup server
and returns the output of the command execution.
"""
hostname = get_setup_hostname(name)
key_path = get_key_path()
status, stdout, stderr = common.ssh_session('scivm', hostname, key_path,
run_cmd=cmd)
if status:
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
sys.exit(status)
if cmd and stdout:
return stdout
def rsync(src_path, dest_path, delete=False, pipe_output=False):
"""Syncs data between a custom environment and the local filesystem. A
setup server for the environment must already be launched. Also, keep in
mind that the scivm user account (which is used for the rsync operation)
has write permissions only to the home directory and /tmp on the setup
server. If additional permissions are required, consider doing the rsync
manually from the setup server using sudo, or rsync to the home directory
then do a subsequent move using sudo.
Either *src_path* or *dest_path* should specify an environment path, but
not both. An environment path is of the format:
env_name:[path-within-environment]
Note that the colon is what indicates this is an environment path
specification.
*src_path* can be a list of paths, all of which should either be local
paths, or environment paths. If *src_path* is a directory, a trailing slash
indicates that its contents should be rsynced, while ommission of slash
would lead to the directory itself being rsynced to the environment.
Example::
rsync('~/dataset1', 'my_env:')
will ensure that a directory named 'dataset1' will exist in the user
scivm's home directory of environment 'my_env'. On the other hand,
rsync(~/dataset1/', 'my_env:')
will copy all the contents of 'dataset1' to the home directory of user
scivm. See rsync manual for more information.
If *delete* is True, files that exist in *dest_path* but not in *src_path*
will be deleted. By default, such files will not be removed.
"""
conn = cloud._getcloudnetconnection()
retry_attempts = conn.retry_attempts
dest_is_local = common.is_local_path(dest_path)
l_paths, r_paths = ((dest_path, src_path) if dest_is_local else
(src_path, dest_path))
local_paths = common.parse_local_paths(l_paths)
env_name, env_paths = common.parse_remote_paths(r_paths)
hostname = get_setup_hostname(env_name)
try:
r_base = 'scivm@%s:' % hostname
r_paths = ' '.join(['%s%s' % (r_base, path) for path in env_paths])
l_paths = ' '.join(local_paths)
sync_args = (r_paths, l_paths) if dest_is_local else (l_paths, r_paths)
for attempt in xrange(retry_attempts):
exit_code, _, _ = common.rsync_session(*sync_args, delete=delete,
pipe_output=pipe_output)
if not exit_code:
break
print_stderr('Retrying environment rsync...')
else:
raise Exception('rsync failed multiple attempts... '
'Please contact PiCloud support')
except Exception as e:
cloudLog.error('Environment rsync errored with:\n%s', e)
print e
def run_script(name, filename):
"""Runs a script on the environment setup server, and returns the output.
* name: Environment whose setup server should run the script
filename: local path where the script to be run can be found
"""
POPU = string.ascii_letters + string.digits
dest_file = ''.join(random.sample(POPU, 16))
try:
rsync(filename, '%s:%s' % (name, dest_file), pipe_output=True)
run = "chmod 700 {0}; ./{0} &> {0}.out; cat {0}.out".format(dest_file)
output = ssh(name, run)
except Exception as e:
cloudLog.error('script could not be run: %s', str(e))
print 'Script could not be run on the setup server.'
print e
else:
return output
finally:
ssh(name, "rm -rf %s*" % dest_file)
def wait_for_idle(name, invalid_actions=None):
"""Waits for environment to be in idle action state."""
return _wait_for(name=name, action=_ACTION_IDLE,
invalid_actions=invalid_actions)
def wait_for_edit(name, invalid_actions=None):
"""Waits for environment to be in edit action state."""
return _wait_for(name=name, action=_ACTION_EDIT,
invalid_actions=invalid_actions)
def _wait_for(name, action, invalid_actions=None, poll_frequency=2,
max_poll_duration=1800):
"""Generic wait function for polling until environment reaches the
specified action state. Raises exception if the environment ever falls into
an error status or action state.
"""
invalid_actions = invalid_actions or []
if not hasattr(invalid_actions, '__iter__'):
invalid_actions = [invalid_actions]
for _ in xrange(max_poll_duration / poll_frequency):
resp = list_envs(name)
if len(resp) == 0:
raise cloud.CloudException('No matching environment found.')
elif len(resp) != 1:
cloudLog.error('single env query returned %s results', len(resp))
raise cloud.CloudException('Unexpected result from PiCloud. '
'Please contact PiCloud support.')
env_info = resp.pop()
resp_status = env_info['status']
resp_action = env_info['action']
if resp_status == _STATUS_ERROR:
raise cloud.CloudException('Environment creation failed. '
'Please contact PiCloud support.')
elif resp_status == _STATUS_READY:
if resp_action == _ACTION_SETUP_ERROR:
raise cloud.CloudException('Setup server launch failed. '
'Please contact PiCloud support.')
elif resp_action == _ACTION_SAVE_ERROR:
raise cloud.CloudException('Environment save failed. '
'Please contact PiCloud support.')
elif resp_action in invalid_actions:
return None
elif resp_status == _STATUS_READY and action == resp_action:
return env_info
elif resp_status == _STATUS_CREATING:
pass
time.sleep(poll_frequency)
raise cloud.CloudException('Environment operation timed out. '
'Please contact PiCloud support.')
| 0.660282 | 0.083404 |
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2012 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
import time as _time
from .cloud import CloudException as _CloudException
from .cloud import CloudTimeoutError as _CloudTimeoutError
from . import _getcloud
def _wait_for_test(jid, test_func, timeout=None, timeout_msg='Job wait timed out'):
"""Keep testing job until test_func(jid) returns a true value
Return return value of test_func"""
poll_interval = 1.0
abort_time = _time.time() + timeout if timeout else None
while True:
retval = test_func(jid)
if retval:
return retval
if abort_time and _time.time() > abort_time:
raise _CloudTimeoutError(timeout_msg, jid=jid)
_time.sleep(poll_interval)
def _checkint(var, name=''):
if not isinstance(var, (int, long)):
raise TypeError('%s must be a single integer' % name)
# all possible status transitions
_status_transitions = {'waiting' : ['stalled', 'killed', 'queued'],
'queued' : ['killed', 'processing'],
'processing' : ['killed', 'error', 'done'],
'stalled' : [],
'killed' : [],
'error' : [],
'done' : []
}
# all possible future states (could autogenerate from above)
_possible_future_statuses = {'waiting' : ['stalled', 'killed', 'queued', 'processing', 'done', 'error'],
'queued' : ['killed', 'processing', 'done', 'error'],
'processing' : ['killed', 'error', 'done'],
'stalled' : [],
'killed' : [],
'error' : [],
'done' : []
}
def _status_test_wrapper(test_status):
"""wrapper function to conduct status tests"""
return status_test
def status(jid, test_status, timeout=None):
"""Wait until job's status is ``test_status``
Raise CloudException if no longer possible to reach status
Returns job's current status (which will be equal to test_status)
"""
_checkint(jid, 'jid')
def status_test(jid):
cl = _getcloud()
cur_status = cl.status(jid)
if test_status == cur_status:
return cur_status
if test_status not in _possible_future_statuses[cur_status]:
raise _CloudException('Job has status %s. Will never (again) be %s' % (cur_status, test_status),
jid=jid, status=cur_status)
return False
return _wait_for_test(jid, status_test, timeout=timeout,
timeout_msg='Job did not reach status %s before timeout' % test_status)
def port(jid, port, protocol='tcp', timeout=None):
"""Wait until job has opened ``port`` (under protocol ``protocol``)
for listening.
Returns port translation dictionary.
See docstring for :func:`cloud.shortcuts.get_connection_info` for description
of returned dictionary
"""
_checkint(jid, 'jid')
_checkint(port, 'port')
cl = _getcloud()
processing_poll_interval = 1.0 # polling on status wait
port_poll_interval = 0.7 # polling on port wait
abort_time = _time.time() + timeout if timeout else None
status = None
while True:
jid_info = cl.info(jid, ['ports','status'])[jid]
status = jid_info['status']
port_info = jid_info.get('ports')
if status in _possible_future_statuses['processing']:
raise _CloudException('Job is already finished with status %s' % status,
jid=jid, status=status)
elif not port_info:
if cl.is_simulated() and status == 'processing':
return {'address' : '127.0.0.1', 'port' : port}
elif abort_time and _time.time() > abort_time:
raise _CloudTimeoutError('Job did not start processing before timeout',
jid=jid, status=status)
_time.sleep(processing_poll_interval)
continue
port_proto_info = port_info[protocol]
if port not in port_proto_info:
if abort_time and _time.time() > abort_time:
raise _CloudTimeoutError('Job did not open port %s before timeout' % port,
jid=jid)
_time.sleep(port_poll_interval)
continue
return port_proto_info[port]
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/wait_for.py
|
wait_for.py
|
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2012 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
import time as _time
from .cloud import CloudException as _CloudException
from .cloud import CloudTimeoutError as _CloudTimeoutError
from . import _getcloud
def _wait_for_test(jid, test_func, timeout=None, timeout_msg='Job wait timed out'):
"""Keep testing job until test_func(jid) returns a true value
Return return value of test_func"""
poll_interval = 1.0
abort_time = _time.time() + timeout if timeout else None
while True:
retval = test_func(jid)
if retval:
return retval
if abort_time and _time.time() > abort_time:
raise _CloudTimeoutError(timeout_msg, jid=jid)
_time.sleep(poll_interval)
def _checkint(var, name=''):
if not isinstance(var, (int, long)):
raise TypeError('%s must be a single integer' % name)
# all possible status transitions
_status_transitions = {'waiting' : ['stalled', 'killed', 'queued'],
'queued' : ['killed', 'processing'],
'processing' : ['killed', 'error', 'done'],
'stalled' : [],
'killed' : [],
'error' : [],
'done' : []
}
# all possible future states (could autogenerate from above)
_possible_future_statuses = {'waiting' : ['stalled', 'killed', 'queued', 'processing', 'done', 'error'],
'queued' : ['killed', 'processing', 'done', 'error'],
'processing' : ['killed', 'error', 'done'],
'stalled' : [],
'killed' : [],
'error' : [],
'done' : []
}
def _status_test_wrapper(test_status):
"""wrapper function to conduct status tests"""
return status_test
def status(jid, test_status, timeout=None):
"""Wait until job's status is ``test_status``
Raise CloudException if no longer possible to reach status
Returns job's current status (which will be equal to test_status)
"""
_checkint(jid, 'jid')
def status_test(jid):
cl = _getcloud()
cur_status = cl.status(jid)
if test_status == cur_status:
return cur_status
if test_status not in _possible_future_statuses[cur_status]:
raise _CloudException('Job has status %s. Will never (again) be %s' % (cur_status, test_status),
jid=jid, status=cur_status)
return False
return _wait_for_test(jid, status_test, timeout=timeout,
timeout_msg='Job did not reach status %s before timeout' % test_status)
def port(jid, port, protocol='tcp', timeout=None):
"""Wait until job has opened ``port`` (under protocol ``protocol``)
for listening.
Returns port translation dictionary.
See docstring for :func:`cloud.shortcuts.get_connection_info` for description
of returned dictionary
"""
_checkint(jid, 'jid')
_checkint(port, 'port')
cl = _getcloud()
processing_poll_interval = 1.0 # polling on status wait
port_poll_interval = 0.7 # polling on port wait
abort_time = _time.time() + timeout if timeout else None
status = None
while True:
jid_info = cl.info(jid, ['ports','status'])[jid]
status = jid_info['status']
port_info = jid_info.get('ports')
if status in _possible_future_statuses['processing']:
raise _CloudException('Job is already finished with status %s' % status,
jid=jid, status=status)
elif not port_info:
if cl.is_simulated() and status == 'processing':
return {'address' : '127.0.0.1', 'port' : port}
elif abort_time and _time.time() > abort_time:
raise _CloudTimeoutError('Job did not start processing before timeout',
jid=jid, status=status)
_time.sleep(processing_poll_interval)
continue
port_proto_info = port_info[protocol]
if port not in port_proto_info:
if abort_time and _time.time() > abort_time:
raise _CloudTimeoutError('Job did not open port %s before timeout' % port,
jid=jid)
_time.sleep(port_poll_interval)
continue
return port_proto_info[port]
| 0.720663 | 0.146118 |
from __future__ import absolute_import
"""
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2011 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
import scicloud as cloud
_key_list = 'key/list/'
_key_get = 'key/%s/'
_key_activate = 'key/%s/activate/'
_key_deactivate = 'key/%s/deactivate/'
_key_create = 'key/'
def list_keys(username, password, active_only=False):
"""Returns a list of all api keys. If *active_only* is True, only
active keys are returned. *username* and *password* should be your
PiCloud login information."""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_list,
{},
get_values={'active_only': active_only},
auth=(username, password))
return resp['api_keys']
def get_key(username, password, api_key):
"""Returns information including api_secretkey, active status, and
note for the specified *api_key*. *username* and *password* should
be your PiCloud login information."""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_get % api_key,
{},
auth=(username, password))
return resp['key']
def activate_key(username, password, api_key):
"""Activates the specified *api_key*. *username* and *password*
should be your PiCloud login information."""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_activate % api_key,
{},
auth=(username, password))
return True
def deactivate_key(username, password, api_key):
"""Deactivates the specified *api_key*. *username* and *password*
should be your PiCloud login information."""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_deactivate % api_key,
{},
auth=(username, password))
return True
def create_key(username, password):
"""Creates a new api_key. *username* and *password*
should be your PiCloud login information."""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_create,
{},
auth=(username, password))
return resp['key']
def get_key_by_key(api_key, api_secretkey):
"""
Similar to *get_key*, but access information via api_key credentials
(api_key and api_secretkey).
"""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_get % api_key,
{},
auth=(api_key, api_secretkey))
return resp['key']
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/account.py
|
account.py
|
from __future__ import absolute_import
"""
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2011 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
import scicloud as cloud
_key_list = 'key/list/'
_key_get = 'key/%s/'
_key_activate = 'key/%s/activate/'
_key_deactivate = 'key/%s/deactivate/'
_key_create = 'key/'
def list_keys(username, password, active_only=False):
"""Returns a list of all api keys. If *active_only* is True, only
active keys are returned. *username* and *password* should be your
PiCloud login information."""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_list,
{},
get_values={'active_only': active_only},
auth=(username, password))
return resp['api_keys']
def get_key(username, password, api_key):
"""Returns information including api_secretkey, active status, and
note for the specified *api_key*. *username* and *password* should
be your PiCloud login information."""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_get % api_key,
{},
auth=(username, password))
return resp['key']
def activate_key(username, password, api_key):
"""Activates the specified *api_key*. *username* and *password*
should be your PiCloud login information."""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_activate % api_key,
{},
auth=(username, password))
return True
def deactivate_key(username, password, api_key):
"""Deactivates the specified *api_key*. *username* and *password*
should be your PiCloud login information."""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_deactivate % api_key,
{},
auth=(username, password))
return True
def create_key(username, password):
"""Creates a new api_key. *username* and *password*
should be your PiCloud login information."""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_create,
{},
auth=(username, password))
return resp['key']
def get_key_by_key(api_key, api_secretkey):
"""
Similar to *get_key*, but access information via api_key credentials
(api_key and api_secretkey).
"""
conn = cloud._getcloudnetconnection()
resp = conn.send_request(_key_get % api_key,
{},
auth=(api_key, api_secretkey))
return resp['key']
| 0.735737 | 0.062217 |
from __future__ import with_statement
from __future__ import absolute_import
"""
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2011 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
import os
import random
import re
import sys
import tempfile
from subprocess import Popen, PIPE
try:
from json import dumps as json_serialize
except ImportError: #If python version < 2.6, we need to use simplejson
from simplejson import dumps as json_serialize
from .util import template
from .rest import _low_level_publish
from .cron import _low_level_register
from .cloud import CloudException
from . import _getcloud
def _get_cloud_and_params(command, kwargs, ignore = []):
for kwd in kwargs:
if not kwd.startswith('_'):
raise ValueError('wildcard kwargs must be cloud kwd')
cloud = _getcloud()
cloud._checkOpen()
params = cloud._getJobParameters(None, kwargs, ignore)
params['func_name'] = command
params['fast_serialization'] = 2 # guarenteed to pass
params['language'] = 'shell'
return cloud, params
def execute(command, argdict, return_file=None, ignore_exit_status=False, cwd=None, **kwargs):
"""Execute (possibly) templated *command*. Returns Job IDentifier (jid)
* argdict - Dictionary mapping template parameters to values
* return_file: Contents of this file will be result of job. result is stdout if not provided
* ignore_exit_status: if true, a non-zero exit code will not result in job erroring
* cwd: Current working directory to execute command within
* kwargs: See cloud.call underscored keyword arguments
"""
template.validate_command_args(command, argdict)
_handle_args_upload(argdict)
cloud, params = _get_cloud_and_params(command, kwargs)
jid = cloud.adapter.job_call(params, _wrap_execute_program(command, return_file, ignore_exit_status, cwd = cwd),
(), argdict)
return jid
def execute_map(command, common_argdict, map_argdict, return_file=None,
ignore_exit_status=False, cwd=None, **kwargs):
"""Execute templated command in parallel. Return list of Job Identifiers (jids). See cloud.map
for more information about mapping. Arguments to this are:
* common_argdict - Dictionary mapping template parameters to values for ALL map jobs
* map_argdict - Dictionary mapping template parameters to a list of values
The nth mapjob will have its template parameter substituted by the nth value in the list
Note that all elements of map_argdict.values() must have the same length;
The number of mapjobs produced will be equal to that length
* return_file: Contents of this file will be result of job. result is stdout if not provided
* ignore_exit_status: if true, a non-zero exit code will not result in job erroring
* cwd: Current working directory to execute command within
* kwargs: See cloud.map underscored keyword arguments
"""
#print 'c/m', common_argdict, map_argdict
combined_dct = {}
combined_dct.update(common_argdict)
combined_dct.update(map_argdict)
template.validate_command_args(command, combined_dct)
_handle_args_upload(common_argdict)
# Convert map_argdict into a dist of dicts
num_args = None
map_dct_iters = {}
# Error handling
for key, val_list in map_argdict.items():
if not num_args:
num_args = len(val_list)
if not val_list:
raise ValueError('Key %s must map to a non-empty argument list' % key)
elif num_args != len(val_list):
raise ValueError('Key %s had %s args. Expected %s to conform to other keys' % (key, len(val_list), num_args))
map_dct_iters[key] = iter(val_list)
map_template_lists = [] # will be list of template dictionaries
if not num_args:
raise ValueError('At least one element must be provided in map_argdict')
for _ in xrange(num_args):
map_template = {}
for key, dct_iter in map_dct_iters.items():
nxtval = next(dct_iter)
map_template[key] = nxtval
_handle_args_upload(map_template)
map_template_lists.append(map_template)
cloud, params = _get_cloud_and_params(command, kwargs)
jids = cloud.adapter.jobs_map(params,
_wrap_execute_program(command, return_file,
ignore_exit_status, common_argdict, cwd=cwd),
None, map_template_lists)
return jids
def rest_publish(command, label, return_file=None,
ignore_exit_status=False, **kwargs):
"""Publish shell *command* to PiCloud so it can be invoked through the PiCloud REST API
The published function will be managed in the future by a unique (URL encoded) *label*.
Returns url of published function. See cloud.rest.publish
See cloud.shell.execute for description other arguments
See cloud.rest.publish for description of **kwargs
"""
if not label:
raise ValueError('label must be provided')
m = re.match(r'^[A-Z0-9a-z_+-.]+$', label)
if not m:
raise TypeError('Label can only consist of valid URI characters (alphanumeric or from set(_+-.$)')
try:
label = label.decode('ascii').encode('ascii')
except (UnicodeDecodeError, UnicodeEncodeError): #should not be possible
raise TypeError('label must be an ASCII string')
cloud, params = _get_cloud_and_params(command, kwargs,
ignore=['_label', '_depends_on', '_depends_on_errors'] )
# shell argspecs are dictionaries
cmd_params = template.extract_vars(command)
argspec = {'prms' : cmd_params,
'cmd' : command}
argspec_serialized = json_serialize(argspec)
if len(argspec_serialized) >= 255: #won't fit in db - clear command
del argspec['command']
argspec_serialized = json_serialize(argspec)
if len(argspec_serialized) >= 255: #commands too large; cannot type check
argspec_serialized = json_serialize({})
params['argspec'] = argspec_serialized
return _low_level_publish(_wrap_execute_program(command, return_file, ignore_exit_status),
label, 'raw', 'actiondct',
params, func_desc='command invoked in shell')['uri']
def cron_register(command, label, schedule, return_file = None,
ignore_exit_status=False, **kwargs):
"""Register shell *command* to be run periodically on PiCloud according to *schedule*
The cron can be managed in the future by the specified *label*.
Flags only relevant if you call cloud.result() on the cron job:
return_file: Contents of this file will be result of job created by REST invoke.
result is stdout if not provided
ignore_exit_status: if true, a non-zero exit code will not result in job erroring
"""
cloud, params = _get_cloud_and_params(command, kwargs,
ignore=['_label', '_depends_on', '_depends_on_errors'] )
func = _wrap_execute_program(command, return_file, ignore_exit_status)
return _low_level_register(func, label, schedule, params)
"""execution logic"""
def _execute_shell_program(command, return_file, ignore_exit_status, template_args, cwd = None):
"""Executes a shell program on the cloud"""
_handle_args_download(template_args, cwd)
templated_cmd = template.generate_command(command, template_args)
if not return_file: # must save commands stdout to a file
stdout_handle = PIPE
else:
stdout_handle = sys.stdout
# ensure /home/scivm/ is present if any python interpreter is launched
env = os.environ
cur_path = env.get('PYTHONPATH','')
if cur_path:
cur_path = ':%s' % cur_path
env['PYTHONPATH'] = '/home/scivm/' + cur_path
#p = Popen(templated_cmd, shell=True, stdout=stdout_handle, stderr=PIPE, cwd=cwd, env=env)
# execute in context of BASH for environment variables
p = Popen(["/bin/bash", "-ic", templated_cmd], stdout=stdout_handle, stderr=PIPE, cwd=cwd, env=env)
if stdout_handle == PIPE:
# attach tee to direct stdout to file
return_file = tempfile.mktemp('shellcmd_stdout')
tee_cmd = 'tee %s' % return_file
p_out = p.stdout
tout = Popen(tee_cmd, shell=True, stdin=p_out, stdout=sys.stdout, stderr=sys.stderr, cwd=cwd)
else:
tout = None
# capture stderr for exceptions
stderr_file = tempfile.mktemp('shellcmd_stderr')
tee_cmd = 'tee %s' % stderr_file
p_err = p.stderr
terr = Popen(tee_cmd, shell=True, stdin=p_err, stdout=sys.stderr, stderr=sys.stderr, cwd=cwd)
retcode = p.wait()
# give tee time to flush stdout
terr.wait()
if tout:
tout.wait()
if retcode:
msg = 'command terminated with nonzero return code %s' % retcode
if ignore_exit_status:
print >> sys.stderr, msg
else:
msg += '\nstderr follows:\n'
with open(stderr_file) as ferr:
# ensure don't exceed storage limits
ferr.seek(0,2)
ferr_size = ferr.tell()
ferr.seek(max(0,ferr_size - 15000000), 0)
msg += ferr.read()
raise CloudException(msg)
if cwd and not cwd.endswith('/'):
cwd = cwd + '/'
return_path = cwd + return_file if cwd and not return_file.startswith('/') else return_file
try:
with open(return_path,'rb') as f: # If this raises an exception, return file could not be read
retval = f.read()
except (IOError, OSError), e:
if len(e.args) == 2:
e.args = (e.args[0], e.args[1] + '\nCannot read return file!')
raise
if stdout_handle == PIPE:
os.remove(return_file)
os.remove(stderr_file)
return retval
def _execute_program_unwrapper(command, return_file, ignore_exit_status, wrapped_args, template_args, cwd = None):
"""unwraps closure generated in _wrap_execute_program._execute_program_unwrapper_closure"""
args = template_args
if wrapped_args:
args.update(wrapped_args)
return _execute_shell_program(command, return_file, ignore_exit_status, args, cwd)
def _wrap_execute_program(command, return_file, ignore_exit_status, wrapped_args=None, cwd = None):
"""Used to put common arguments inside the stored function itself
close over these arguments
At execution, template_args are merged with wrapped_args
"""
def _execute_program_unwrapper_closure(**template_args):
"""
minimal function to avoid opcode differences between python2.6 and python2.7
Code of this function is stored in pickle object; _execute_program_unwrapper is a global environment reference
"""
return _execute_program_unwrapper(command, return_file, ignore_exit_status,
wrapped_args, template_args, cwd)
return _execute_program_unwrapper_closure
"""helper functions"""
"""File uploading logic
There are some inefficiencies here.
By closing the file data in a function, we lose the ability to stream it from disk
In practical usage, this probably won't matter and can always be changed later
by using a rest invoke interface
"""
action_default = 'action_default'
action_upload = 'action_upload'
def _encode_upload_action(file_name):
# upload file by binding it to a closure
f = open(file_name,'rb')
contents = f.read()
f.close()
base_name = os.path.basename(file_name)
return {'action' : action_upload,
'filename' : base_name,
'contents' : contents}
def _encode_default_action(arg):
return {'action' : action_default,
'value' : arg}
def _handle_args_upload(arg_dct):
""""
arg_dct is a dictionary describing a job
Each key is parameter that maps to its argument value
If an argument is a file, it is automatically replaced by a function
that handles file unpacking
"""
for param, arg in arg_dct.items():
if arg.startswith('@'): # defines a file
arg_dct[param] = _encode_upload_action(arg[1:])
else:
arg_dct[param] = _encode_default_action(arg)
"""downloading"""
def _decode_upload_action(action_dct, cwd):
"""place data in the current directory
file name is name
if name already exists, append random integers to name until it doesn't
"""
name = action_dct['filename']
contents = action_dct['contents']
cloud = _getcloud()
if not cloud.running_on_cloud(): # simulation
name = tempfile.mktemp(suffix=name)
started = False
while os.path.exists(name):
if not started:
name+='-'
started = True
name += str(random.randint(0,9))
if cwd and not cwd.endswith('/'):
cwd = cwd + '/'
fullpath = cwd + name if cwd else name
# Write user-uploaded file to local storage. (Can fail due to permission issues)
# Be sure it has executable permissions on incase it is a shell script
f = os.fdopen(os.open(fullpath,os.O_CREAT|os.O_RDWR,0777),'wb')
f.write(contents)
f.close()
return name # use local name to fill in template
def _decode_default_action(action_dct, cwd):
return action_dct['value']
def _handle_args_download(arg_dct, cwd):
decode_map = {
action_upload : _decode_upload_action,
action_default : _decode_default_action
}
for param, action_dct in arg_dct.items():
arg_dct[param] = decode_map[action_dct['action']](action_dct, cwd)
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/shell.py
|
shell.py
|
from __future__ import with_statement
from __future__ import absolute_import
"""
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2011 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
import os
import random
import re
import sys
import tempfile
from subprocess import Popen, PIPE
try:
from json import dumps as json_serialize
except ImportError: #If python version < 2.6, we need to use simplejson
from simplejson import dumps as json_serialize
from .util import template
from .rest import _low_level_publish
from .cron import _low_level_register
from .cloud import CloudException
from . import _getcloud
def _get_cloud_and_params(command, kwargs, ignore = []):
for kwd in kwargs:
if not kwd.startswith('_'):
raise ValueError('wildcard kwargs must be cloud kwd')
cloud = _getcloud()
cloud._checkOpen()
params = cloud._getJobParameters(None, kwargs, ignore)
params['func_name'] = command
params['fast_serialization'] = 2 # guarenteed to pass
params['language'] = 'shell'
return cloud, params
def execute(command, argdict, return_file=None, ignore_exit_status=False, cwd=None, **kwargs):
"""Execute (possibly) templated *command*. Returns Job IDentifier (jid)
* argdict - Dictionary mapping template parameters to values
* return_file: Contents of this file will be result of job. result is stdout if not provided
* ignore_exit_status: if true, a non-zero exit code will not result in job erroring
* cwd: Current working directory to execute command within
* kwargs: See cloud.call underscored keyword arguments
"""
template.validate_command_args(command, argdict)
_handle_args_upload(argdict)
cloud, params = _get_cloud_and_params(command, kwargs)
jid = cloud.adapter.job_call(params, _wrap_execute_program(command, return_file, ignore_exit_status, cwd = cwd),
(), argdict)
return jid
def execute_map(command, common_argdict, map_argdict, return_file=None,
ignore_exit_status=False, cwd=None, **kwargs):
"""Execute templated command in parallel. Return list of Job Identifiers (jids). See cloud.map
for more information about mapping. Arguments to this are:
* common_argdict - Dictionary mapping template parameters to values for ALL map jobs
* map_argdict - Dictionary mapping template parameters to a list of values
The nth mapjob will have its template parameter substituted by the nth value in the list
Note that all elements of map_argdict.values() must have the same length;
The number of mapjobs produced will be equal to that length
* return_file: Contents of this file will be result of job. result is stdout if not provided
* ignore_exit_status: if true, a non-zero exit code will not result in job erroring
* cwd: Current working directory to execute command within
* kwargs: See cloud.map underscored keyword arguments
"""
#print 'c/m', common_argdict, map_argdict
combined_dct = {}
combined_dct.update(common_argdict)
combined_dct.update(map_argdict)
template.validate_command_args(command, combined_dct)
_handle_args_upload(common_argdict)
# Convert map_argdict into a dist of dicts
num_args = None
map_dct_iters = {}
# Error handling
for key, val_list in map_argdict.items():
if not num_args:
num_args = len(val_list)
if not val_list:
raise ValueError('Key %s must map to a non-empty argument list' % key)
elif num_args != len(val_list):
raise ValueError('Key %s had %s args. Expected %s to conform to other keys' % (key, len(val_list), num_args))
map_dct_iters[key] = iter(val_list)
map_template_lists = [] # will be list of template dictionaries
if not num_args:
raise ValueError('At least one element must be provided in map_argdict')
for _ in xrange(num_args):
map_template = {}
for key, dct_iter in map_dct_iters.items():
nxtval = next(dct_iter)
map_template[key] = nxtval
_handle_args_upload(map_template)
map_template_lists.append(map_template)
cloud, params = _get_cloud_and_params(command, kwargs)
jids = cloud.adapter.jobs_map(params,
_wrap_execute_program(command, return_file,
ignore_exit_status, common_argdict, cwd=cwd),
None, map_template_lists)
return jids
def rest_publish(command, label, return_file=None,
ignore_exit_status=False, **kwargs):
"""Publish shell *command* to PiCloud so it can be invoked through the PiCloud REST API
The published function will be managed in the future by a unique (URL encoded) *label*.
Returns url of published function. See cloud.rest.publish
See cloud.shell.execute for description other arguments
See cloud.rest.publish for description of **kwargs
"""
if not label:
raise ValueError('label must be provided')
m = re.match(r'^[A-Z0-9a-z_+-.]+$', label)
if not m:
raise TypeError('Label can only consist of valid URI characters (alphanumeric or from set(_+-.$)')
try:
label = label.decode('ascii').encode('ascii')
except (UnicodeDecodeError, UnicodeEncodeError): #should not be possible
raise TypeError('label must be an ASCII string')
cloud, params = _get_cloud_and_params(command, kwargs,
ignore=['_label', '_depends_on', '_depends_on_errors'] )
# shell argspecs are dictionaries
cmd_params = template.extract_vars(command)
argspec = {'prms' : cmd_params,
'cmd' : command}
argspec_serialized = json_serialize(argspec)
if len(argspec_serialized) >= 255: #won't fit in db - clear command
del argspec['command']
argspec_serialized = json_serialize(argspec)
if len(argspec_serialized) >= 255: #commands too large; cannot type check
argspec_serialized = json_serialize({})
params['argspec'] = argspec_serialized
return _low_level_publish(_wrap_execute_program(command, return_file, ignore_exit_status),
label, 'raw', 'actiondct',
params, func_desc='command invoked in shell')['uri']
def cron_register(command, label, schedule, return_file = None,
ignore_exit_status=False, **kwargs):
"""Register shell *command* to be run periodically on PiCloud according to *schedule*
The cron can be managed in the future by the specified *label*.
Flags only relevant if you call cloud.result() on the cron job:
return_file: Contents of this file will be result of job created by REST invoke.
result is stdout if not provided
ignore_exit_status: if true, a non-zero exit code will not result in job erroring
"""
cloud, params = _get_cloud_and_params(command, kwargs,
ignore=['_label', '_depends_on', '_depends_on_errors'] )
func = _wrap_execute_program(command, return_file, ignore_exit_status)
return _low_level_register(func, label, schedule, params)
"""execution logic"""
def _execute_shell_program(command, return_file, ignore_exit_status, template_args, cwd = None):
"""Executes a shell program on the cloud"""
_handle_args_download(template_args, cwd)
templated_cmd = template.generate_command(command, template_args)
if not return_file: # must save commands stdout to a file
stdout_handle = PIPE
else:
stdout_handle = sys.stdout
# ensure /home/scivm/ is present if any python interpreter is launched
env = os.environ
cur_path = env.get('PYTHONPATH','')
if cur_path:
cur_path = ':%s' % cur_path
env['PYTHONPATH'] = '/home/scivm/' + cur_path
#p = Popen(templated_cmd, shell=True, stdout=stdout_handle, stderr=PIPE, cwd=cwd, env=env)
# execute in context of BASH for environment variables
p = Popen(["/bin/bash", "-ic", templated_cmd], stdout=stdout_handle, stderr=PIPE, cwd=cwd, env=env)
if stdout_handle == PIPE:
# attach tee to direct stdout to file
return_file = tempfile.mktemp('shellcmd_stdout')
tee_cmd = 'tee %s' % return_file
p_out = p.stdout
tout = Popen(tee_cmd, shell=True, stdin=p_out, stdout=sys.stdout, stderr=sys.stderr, cwd=cwd)
else:
tout = None
# capture stderr for exceptions
stderr_file = tempfile.mktemp('shellcmd_stderr')
tee_cmd = 'tee %s' % stderr_file
p_err = p.stderr
terr = Popen(tee_cmd, shell=True, stdin=p_err, stdout=sys.stderr, stderr=sys.stderr, cwd=cwd)
retcode = p.wait()
# give tee time to flush stdout
terr.wait()
if tout:
tout.wait()
if retcode:
msg = 'command terminated with nonzero return code %s' % retcode
if ignore_exit_status:
print >> sys.stderr, msg
else:
msg += '\nstderr follows:\n'
with open(stderr_file) as ferr:
# ensure don't exceed storage limits
ferr.seek(0,2)
ferr_size = ferr.tell()
ferr.seek(max(0,ferr_size - 15000000), 0)
msg += ferr.read()
raise CloudException(msg)
if cwd and not cwd.endswith('/'):
cwd = cwd + '/'
return_path = cwd + return_file if cwd and not return_file.startswith('/') else return_file
try:
with open(return_path,'rb') as f: # If this raises an exception, return file could not be read
retval = f.read()
except (IOError, OSError), e:
if len(e.args) == 2:
e.args = (e.args[0], e.args[1] + '\nCannot read return file!')
raise
if stdout_handle == PIPE:
os.remove(return_file)
os.remove(stderr_file)
return retval
def _execute_program_unwrapper(command, return_file, ignore_exit_status, wrapped_args, template_args, cwd = None):
"""unwraps closure generated in _wrap_execute_program._execute_program_unwrapper_closure"""
args = template_args
if wrapped_args:
args.update(wrapped_args)
return _execute_shell_program(command, return_file, ignore_exit_status, args, cwd)
def _wrap_execute_program(command, return_file, ignore_exit_status, wrapped_args=None, cwd = None):
"""Used to put common arguments inside the stored function itself
close over these arguments
At execution, template_args are merged with wrapped_args
"""
def _execute_program_unwrapper_closure(**template_args):
"""
minimal function to avoid opcode differences between python2.6 and python2.7
Code of this function is stored in pickle object; _execute_program_unwrapper is a global environment reference
"""
return _execute_program_unwrapper(command, return_file, ignore_exit_status,
wrapped_args, template_args, cwd)
return _execute_program_unwrapper_closure
"""helper functions"""
"""File uploading logic
There are some inefficiencies here.
By closing the file data in a function, we lose the ability to stream it from disk
In practical usage, this probably won't matter and can always be changed later
by using a rest invoke interface
"""
action_default = 'action_default'
action_upload = 'action_upload'
def _encode_upload_action(file_name):
# upload file by binding it to a closure
f = open(file_name,'rb')
contents = f.read()
f.close()
base_name = os.path.basename(file_name)
return {'action' : action_upload,
'filename' : base_name,
'contents' : contents}
def _encode_default_action(arg):
return {'action' : action_default,
'value' : arg}
def _handle_args_upload(arg_dct):
""""
arg_dct is a dictionary describing a job
Each key is parameter that maps to its argument value
If an argument is a file, it is automatically replaced by a function
that handles file unpacking
"""
for param, arg in arg_dct.items():
if arg.startswith('@'): # defines a file
arg_dct[param] = _encode_upload_action(arg[1:])
else:
arg_dct[param] = _encode_default_action(arg)
"""downloading"""
def _decode_upload_action(action_dct, cwd):
"""place data in the current directory
file name is name
if name already exists, append random integers to name until it doesn't
"""
name = action_dct['filename']
contents = action_dct['contents']
cloud = _getcloud()
if not cloud.running_on_cloud(): # simulation
name = tempfile.mktemp(suffix=name)
started = False
while os.path.exists(name):
if not started:
name+='-'
started = True
name += str(random.randint(0,9))
if cwd and not cwd.endswith('/'):
cwd = cwd + '/'
fullpath = cwd + name if cwd else name
# Write user-uploaded file to local storage. (Can fail due to permission issues)
# Be sure it has executable permissions on incase it is a shell script
f = os.fdopen(os.open(fullpath,os.O_CREAT|os.O_RDWR,0777),'wb')
f.write(contents)
f.close()
return name # use local name to fill in template
def _decode_default_action(action_dct, cwd):
return action_dct['value']
def _handle_args_download(arg_dct, cwd):
decode_map = {
action_upload : _decode_upload_action,
action_default : _decode_default_action
}
for param, action_dct in arg_dct.items():
arg_dct[param] = decode_map[action_dct['action']](action_dct, cwd)
| 0.50415 | 0.095687 |
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import cloudpickle
import pickledebug
class Serializer(object):
serializedObject = None
_exception = None
def __init__(self, obj):
"""Serialize a python object"""
self.obj = obj
def set_os_env_vars(self, os_env_vars):
raise ValueError('Cannot transport os_env_vars on default serializer')
def getexception(self):
return self._exception
def run_serialization(self, min_size_to_save=0):
#min_size_to_save handled by subclass
try:
self.serializedObject = pickle.dumps(self.obj, protocol = 2)
return self.serializedObject
except pickle.PickleError, e:
self._exception = e
raise
def get_module_dependencies(self): #can't resolve here..
return []
class CloudSerializer(Serializer):
"""Use clould pickler"""
_pickler = None
_pickler_class = cloudpickle.CloudPickler
os_env_vars = []
def set_os_env_vars(self, os_env_vars):
self.os_env_vars = os_env_vars
def run_serialization(self, min_size_to_save=0):
f = StringIO()
self._pickler = self._pickler_class(f, protocol =2)
self._pickler.os_env_vars = self.os_env_vars
self.set_logged_object_minsize(min_size_to_save)
try:
self._pickler.dump(self.obj)
self.serializedObject = f.getvalue()
return self.serializedObject
except pickle.PickleError, e:
self._exception = e
raise
def set_logged_object_minsize(self, minsize):
#implemented by subclass
pass
def get_module_dependencies(self):
return self._pickler.modules
class DebugSerializer(CloudSerializer):
_pickler_class = pickledebug.CloudDebugPickler
def write_debug_report(self, outfile,hideHeader=False):
self._pickler.write_report(self.obj, outfile,hideHeader=hideHeader)
def str_debug_report(self,hideHeader=False):
"""Get debug report as string"""
strfile = StringIO()
self._pickler.write_report(self.obj, strfile,hideHeader=hideHeader)
return strfile.getvalue()
def set_report_minsize(self, minsize):
self._pickler.printingMinSize = minsize
def set_logged_object_minsize(self, minsize):
self._pickler.min_size_to_save = minsize
class Deserializer(object):
deserializedObj = None
def __init__(self, str):
"""Expects a python string as a pickled object which will be deserialized"""
self.deserializedObj = pickle.loads(str)
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/serialization/serializationhandlers.py
|
serializationhandlers.py
|
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import cloudpickle
import pickledebug
class Serializer(object):
serializedObject = None
_exception = None
def __init__(self, obj):
"""Serialize a python object"""
self.obj = obj
def set_os_env_vars(self, os_env_vars):
raise ValueError('Cannot transport os_env_vars on default serializer')
def getexception(self):
return self._exception
def run_serialization(self, min_size_to_save=0):
#min_size_to_save handled by subclass
try:
self.serializedObject = pickle.dumps(self.obj, protocol = 2)
return self.serializedObject
except pickle.PickleError, e:
self._exception = e
raise
def get_module_dependencies(self): #can't resolve here..
return []
class CloudSerializer(Serializer):
"""Use clould pickler"""
_pickler = None
_pickler_class = cloudpickle.CloudPickler
os_env_vars = []
def set_os_env_vars(self, os_env_vars):
self.os_env_vars = os_env_vars
def run_serialization(self, min_size_to_save=0):
f = StringIO()
self._pickler = self._pickler_class(f, protocol =2)
self._pickler.os_env_vars = self.os_env_vars
self.set_logged_object_minsize(min_size_to_save)
try:
self._pickler.dump(self.obj)
self.serializedObject = f.getvalue()
return self.serializedObject
except pickle.PickleError, e:
self._exception = e
raise
def set_logged_object_minsize(self, minsize):
#implemented by subclass
pass
def get_module_dependencies(self):
return self._pickler.modules
class DebugSerializer(CloudSerializer):
_pickler_class = pickledebug.CloudDebugPickler
def write_debug_report(self, outfile,hideHeader=False):
self._pickler.write_report(self.obj, outfile,hideHeader=hideHeader)
def str_debug_report(self,hideHeader=False):
"""Get debug report as string"""
strfile = StringIO()
self._pickler.write_report(self.obj, strfile,hideHeader=hideHeader)
return strfile.getvalue()
def set_report_minsize(self, minsize):
self._pickler.printingMinSize = minsize
def set_logged_object_minsize(self, minsize):
self._pickler.min_size_to_save = minsize
class Deserializer(object):
deserializedObj = None
def __init__(self, str):
"""Expects a python string as a pickled object which will be deserialized"""
self.deserializedObj = pickle.loads(str)
| 0.442637 | 0.050799 |
from __future__ import with_statement
"""
This module is responsible for managing and writing serialization reports
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
import errno, os, datetime, stat, time, threading
import shutil
import distutils
import distutils.dir_util
from . import pickledebug
from .serializationhandlers import DebugSerializer
from .. import cloudconfig as cc
from ..cloudlog import cloudLog, purgeDays
from ..util import fix_sudo_path
from pickledebug import DebugPicklingError
class SerializationReport():
c = """Path to save object serialization meta-data.
This path is relative to ~/.scivm/"""
serializeLoggingPath = \
cc.logging_configurable('serialize_logging_path',
default='datalogs/',
comment=c)
#k = __import__('f')
#p = __builtins__.__import__('g')
pid = None #process identifier
cntLock = None
def __init__(self, subdir = ""):
"""
Create logging directory with proper path if subdir is set
"""
if subdir:
logpath = os.path.expanduser(os.path.join(cc.baselocation,self.serializeLoggingPath,subdir))
self.purge_old_logs(logpath)
#uses pidgin's log path format
date = str(datetime.datetime.today().date())
date = date.replace(':','-')
time = str(datetime.datetime.today().time())[:8]
time = time.replace(':','')
timestamp = date + '.' + time
logpath = os.path.join(logpath,timestamp)
try_limit = 10000
ctr = 0
basepath = logpath
while True:
try:
if not distutils.dir_util.mkpath(logpath):
raise distutils.errors.DistutilsFileError('retry')
except distutils.errors.DistutilsFileError, e:
if ctr >= try_limit:
raise IOError("can't make file %s. Error is %s" % (logpath,str(e)))
ctr+=1
logpath = basepath + '-%d' % ctr
else:
break
cloudLog.info("Serialization reports will be written to %s " % logpath)
fix_sudo_path(logpath)
self.logPath = logpath
self.pickleCount = {}
self.cntLock = threading.Lock()
def purge_old_logs(self, logpath):
"""Remove subdirectories with modified time older than purgeDays days"""
try:
subdirs = os.listdir(logpath)
except OSError, e:
if e.errno != errno.ENOENT:
cloudLog.debug('Could not purge %s due to %s', logpath, str(e))
return
now = time.time()
allowed_difference = purgeDays * 24 * 3600 #purge days in seconds
for s in subdirs: #walk through log subdirectories
new_dir = os.path.join(logpath,s)
try:
stat_result = os.stat(new_dir)
except OSError:
cloudLog.warn('Could not stat %s', new_dir, exc_info = True)
continue
if stat.S_ISDIR(stat_result.st_mode) and (now - stat_result.st_mtime) > allowed_difference:
cloudLog.debug('Deleting %s (%s days old)', new_dir, (now - stat_result.st_ctime)/(24*3600))
try:
shutil.rmtree(new_dir)
except OSError:
cloudLog.warn('Could not delete %s', new_dir, exc_info = True)
def update_counter(self, baselogname):
baselogname = baselogname.replace('<','').replace('>','')
with self.cntLock:
cnt = self.pickleCount.get(baselogname,0)
cnt+=1
self.pickleCount[baselogname] = cnt
return cnt
def get_report_file(self, logname, ext, cnt = None, pid = None):
"""Returns the name of a report file with cnt and pid filled in"""
logname = logname.replace('<','').replace('>','')
mid = ''
if pid:
mid += 'P%d.' % pid
if cnt:
mid += '%d.' % cnt
logname = logname % mid
logname+= ext
return os.path.join(self.logPath,logname)
def open_report_file(self, logname, ext, cnt = None, pid = None):
"""Open an arbitrary report file with cnt and pid filled in"""
return file(self.get_report_file(logname, ext, cnt, pid),'w')
"""Reporting"""
def save_report(self, dbgserializer, logname, cnt = None, pid = ''):
if not hasattr(dbgserializer,'write_debug_report'):
#due to serialization level being cloud.call argument, we might not have
# a write_debug_report in active serializer, even though this object exists
return
#HACK for default detection
if type(pid) == str:
pid = self.pid
reportf = self.open_report_file(logname, '.xml', cnt, pid)
dbgserializer.write_debug_report(reportf)
reportf.close()
return reportf.name
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/serialization/report.py
|
report.py
|
from __future__ import with_statement
"""
This module is responsible for managing and writing serialization reports
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
import errno, os, datetime, stat, time, threading
import shutil
import distutils
import distutils.dir_util
from . import pickledebug
from .serializationhandlers import DebugSerializer
from .. import cloudconfig as cc
from ..cloudlog import cloudLog, purgeDays
from ..util import fix_sudo_path
from pickledebug import DebugPicklingError
class SerializationReport():
c = """Path to save object serialization meta-data.
This path is relative to ~/.scivm/"""
serializeLoggingPath = \
cc.logging_configurable('serialize_logging_path',
default='datalogs/',
comment=c)
#k = __import__('f')
#p = __builtins__.__import__('g')
pid = None #process identifier
cntLock = None
def __init__(self, subdir = ""):
"""
Create logging directory with proper path if subdir is set
"""
if subdir:
logpath = os.path.expanduser(os.path.join(cc.baselocation,self.serializeLoggingPath,subdir))
self.purge_old_logs(logpath)
#uses pidgin's log path format
date = str(datetime.datetime.today().date())
date = date.replace(':','-')
time = str(datetime.datetime.today().time())[:8]
time = time.replace(':','')
timestamp = date + '.' + time
logpath = os.path.join(logpath,timestamp)
try_limit = 10000
ctr = 0
basepath = logpath
while True:
try:
if not distutils.dir_util.mkpath(logpath):
raise distutils.errors.DistutilsFileError('retry')
except distutils.errors.DistutilsFileError, e:
if ctr >= try_limit:
raise IOError("can't make file %s. Error is %s" % (logpath,str(e)))
ctr+=1
logpath = basepath + '-%d' % ctr
else:
break
cloudLog.info("Serialization reports will be written to %s " % logpath)
fix_sudo_path(logpath)
self.logPath = logpath
self.pickleCount = {}
self.cntLock = threading.Lock()
def purge_old_logs(self, logpath):
"""Remove subdirectories with modified time older than purgeDays days"""
try:
subdirs = os.listdir(logpath)
except OSError, e:
if e.errno != errno.ENOENT:
cloudLog.debug('Could not purge %s due to %s', logpath, str(e))
return
now = time.time()
allowed_difference = purgeDays * 24 * 3600 #purge days in seconds
for s in subdirs: #walk through log subdirectories
new_dir = os.path.join(logpath,s)
try:
stat_result = os.stat(new_dir)
except OSError:
cloudLog.warn('Could not stat %s', new_dir, exc_info = True)
continue
if stat.S_ISDIR(stat_result.st_mode) and (now - stat_result.st_mtime) > allowed_difference:
cloudLog.debug('Deleting %s (%s days old)', new_dir, (now - stat_result.st_ctime)/(24*3600))
try:
shutil.rmtree(new_dir)
except OSError:
cloudLog.warn('Could not delete %s', new_dir, exc_info = True)
def update_counter(self, baselogname):
baselogname = baselogname.replace('<','').replace('>','')
with self.cntLock:
cnt = self.pickleCount.get(baselogname,0)
cnt+=1
self.pickleCount[baselogname] = cnt
return cnt
def get_report_file(self, logname, ext, cnt = None, pid = None):
"""Returns the name of a report file with cnt and pid filled in"""
logname = logname.replace('<','').replace('>','')
mid = ''
if pid:
mid += 'P%d.' % pid
if cnt:
mid += '%d.' % cnt
logname = logname % mid
logname+= ext
return os.path.join(self.logPath,logname)
def open_report_file(self, logname, ext, cnt = None, pid = None):
"""Open an arbitrary report file with cnt and pid filled in"""
return file(self.get_report_file(logname, ext, cnt, pid),'w')
"""Reporting"""
def save_report(self, dbgserializer, logname, cnt = None, pid = ''):
if not hasattr(dbgserializer,'write_debug_report'):
#due to serialization level being cloud.call argument, we might not have
# a write_debug_report in active serializer, even though this object exists
return
#HACK for default detection
if type(pid) == str:
pid = self.pid
reportf = self.open_report_file(logname, '.xml', cnt, pid)
dbgserializer.write_debug_report(reportf)
reportf.close()
return reportf.name
| 0.434341 | 0.058239 |
from ..cloud import CloudException
class CloudConnection(object):
"""Abstract connection class to deal with low-level communication of cloud adapter"""
_isopen = False
_adapter = None
@property
def opened(self):
"""Returns whether the connection is open"""
return self._isopen
def open(self):
"""called when this connection is to be used"""
if self._adapter and not self._adapter.opened:
self._adapter.open()
self._isopen = True
def close(self):
"""called when this connection is no longer needed"""
if not self.opened:
raise CloudException("%s: Cannot close a closed connection", str(self))
self._isopen = False
@property
def adapter(self):
return self._adapter
def needs_restart(self, **kwargs):
"""Called to determine if the cloud must be restarted due to different connection parameters"""
return False
def job_add(self, params, logdata = None):
raise NotImplementedError
def jobs_join(self, jids, timeout = None):
"""
Allows connection to manage joining
If connection manages joining, it should return a list of statuses
describing the finished job
Else, return False
"""
return False
def jobs_map(self, params, mapargs, mapkwargs = None, logdata = None):
raise NotImplementedError
def jobs_result(self, jids):
raise NotImplementedError
def jobs_kill(self, jids):
raise NotImplementedError
def jobs_delete(self, jids):
raise NotImplementedError
def jobs_info(self, jids, info_requested):
raise NotImplementedError
def is_simulated(self):
raise NotImplementedError
def connection_info(self):
return {'opened': self.opened, 'connection_type' :None}
def modules_check(self, modules):
pass
def modules_add(self, modules):
pass
def packages_list(self):
"""
Get list of packages from server
"""
return []
def force_adapter_report(self):
"""
Should the SerializationReport for the SerializationAdapter be coerced to be instantiated?
"""
return False
def report_name(self):
raise NotImplementedError
def get_report_dir(self):
raise TypeError('get_report_dir is only valid on connection hooks')
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/transport/connection.py
|
connection.py
|
from ..cloud import CloudException
class CloudConnection(object):
"""Abstract connection class to deal with low-level communication of cloud adapter"""
_isopen = False
_adapter = None
@property
def opened(self):
"""Returns whether the connection is open"""
return self._isopen
def open(self):
"""called when this connection is to be used"""
if self._adapter and not self._adapter.opened:
self._adapter.open()
self._isopen = True
def close(self):
"""called when this connection is no longer needed"""
if not self.opened:
raise CloudException("%s: Cannot close a closed connection", str(self))
self._isopen = False
@property
def adapter(self):
return self._adapter
def needs_restart(self, **kwargs):
"""Called to determine if the cloud must be restarted due to different connection parameters"""
return False
def job_add(self, params, logdata = None):
raise NotImplementedError
def jobs_join(self, jids, timeout = None):
"""
Allows connection to manage joining
If connection manages joining, it should return a list of statuses
describing the finished job
Else, return False
"""
return False
def jobs_map(self, params, mapargs, mapkwargs = None, logdata = None):
raise NotImplementedError
def jobs_result(self, jids):
raise NotImplementedError
def jobs_kill(self, jids):
raise NotImplementedError
def jobs_delete(self, jids):
raise NotImplementedError
def jobs_info(self, jids, info_requested):
raise NotImplementedError
def is_simulated(self):
raise NotImplementedError
def connection_info(self):
return {'opened': self.opened, 'connection_type' :None}
def modules_check(self, modules):
pass
def modules_add(self, modules):
pass
def packages_list(self):
"""
Get list of packages from server
"""
return []
def force_adapter_report(self):
"""
Should the SerializationReport for the SerializationAdapter be coerced to be instantiated?
"""
return False
def report_name(self):
raise NotImplementedError
def get_report_dir(self):
raise TypeError('get_report_dir is only valid on connection hooks')
| 0.743447 | 0.153042 |
import os
import sys
class NoOptionError(Exception):
"""A requested option was not found."""
def __init__(self, option):
Exception.__init__(self, "No key %r" % option)
self.option = option
extraInfo = {
'Account': 'PiCloud account information. This is the only section that you need to worry about.',
'Logging': 'Control what should be logged and where',
'Transport': 'PiCloud information transfer',
'Multiprocessing': 'Options that control running the cloud locally',
'Simulation': 'Options for simulation mode that override Multiprocessing and Logging options'
}
class ConfigManager(object):
backend = None
hiddenSets = []
@staticmethod
def getCommentStr(section, option):
return option.lower()
def __init__(self, defaults=None):
self.sections = {}
self.optioncomment = {}
def read(self, fname):
"""Return True on successful read"""
import os
import sys
dir = os.path.dirname(fname)
conf = os.path.basename(fname)
pyfile = os.path.splitext(conf)[0]
addedEntry = False
try:
if dir not in sys.path:
sys.path.append(dir)
addedEntry = True
if not os.path.exists(fname):
try:
os.unlink("".join([dir, os.sep, pyfile, '.pyc'])) #force recompilation
except OSError:
pass
import types
self.backend = types.ModuleType('cloudconf')
return False #force rewrite
else:
try:
if pyfile in sys.modules:
self.backend = sys.modules[pyfile]
else:
self.backend = __import__(pyfile)
except ImportError, e:
import types
sys.stderr.write('CLOUD ERROR: Malformed cloudconf.py:\n %s\nUsing default settings.\n' % str(e))
self.backend = types.ModuleType('cloudconf')
finally:
if addedEntry:
sys.path.remove(dir)
return True
def get(self, section, option, comment = None):
if not hasattr(self.backend, option):
raise NoOptionError(option)
value = getattr(self.backend, option)
self.sections.setdefault(section, {})[option] = value
if comment:
self.optioncomment[self.getCommentStr(section, option)] = comment
return value
def hiddenset(self, *args):
"""Defer set commands"""
self.hiddenSets.append(args)
def showHidden(self):
"""Do all deferred (hidden) sets -- not thread safe"""
for hiddenSet in self.hiddenSets:
self.set(*hiddenSet)
self.hiddenSets = []
def set(self, section, option, value, comment = None):
self.sections.setdefault(section, {})[option] = value
if comment:
self.optioncomment[self.getCommentStr(section, option)] = comment
#print 'setting backend %s to %s' % (option, value)
setattr(self.backend,option,value)
def write(self, fp):
"""Write configuration file with defaults
Include any comments"""
#hack to ensure account comes first:
sections = self.sections.keys()
sections.sort()
for section in sections:
cmt = '"' * 3
fp.write('%s\n%s\n' % (cmt, section))
ei = extraInfo.get(section)
if ei:
fp.write('%s\n%s\n' % (ei, cmt))
else:
fp.write('%s\n' % cmt)
started = False
for (key, value) in self.sections[section].items():
if key != "__name__":
comment = self.optioncomment.get(self.getCommentStr(section, key))
if comment:
if started:
fp.write('\n')
for cel in comment.split('\n'):
fp.write('# %s\n' % cel.strip())
#print 'write %s=%s with type %s'% (key, repr(value), type(value))
fp.write("%s = %s\n" %
(key, repr(value).replace('\n', '\n\t')))
started = True
fp.write("\n\n")
class ConfigSettings(object):
"""This object provides the ability to programmatically edit the cloud configuration (found in cloudconf.py).
``commit()`` must be called to update the cloud module with new settings - and restart all active clouds
"""
@staticmethod
def _loader(path,prefix, do_reload):
"""Bind """
files = os.listdir(path)
delayed = []
for f in files:
if f.endswith('.py'):
endname = f[:-3]
if endname == 'cloudconfig' or endname == 'configmanager' or endname == 'setup' or endname == 'writeconfig' or endname == 'cli':
continue
if endname == '__init__':
delayed.append(prefix[:-1]) #do not load __init__ until submodules reloaded
continue
elif endname == 'mp':
modname = prefix + endname
delayed.append(modname)
else:
modname = prefix + endname
#print modname #LOG ME
if do_reload:
if modname in sys.modules:
try:
reload(sys.modules[modname])
except ImportError:
pass
else:
try:
__import__(modname)
except ImportError:
pass
elif os.path.isdir(path + f):
newpath = path + f + os.sep
ConfigSettings._loader(newpath,prefix + f + '.',do_reload)
if delayed:
if '__init__' in delayed: #must come last
delayed.remove('__init__')
delayed.append('__init__')
for delay_mod in delayed:
if do_reload:
if delay_mod in sys.modules:
try:
reload(sys.modules[delay_mod])
except ImportError:
pass
else:
try:
__import__(delay_mod)
except ImportError:
pass
delayed = []
def _showhidden(self):
"""Show hidden variables"""
self.__confmanager.showHidden()
self.__init__(self.__confmanager) #restart
def commit(self):
"""Update cloud with new settings.
.. warning::
This will restart any active cloud instances, wiping mp/simulated jobs and setkey information
"""
import scicloud as cloud
setattr(cloud,'__immutable', False)
cloud.cloudinterface._setcloud(cloud, type=None)
if hasattr(cloud,'mp'):
setattr(cloud.mp,'__immutable', False)
cloud.cloudinterface._setcloud(cloud.mp, type=None)
#Reload cloud modules in correct order
mods = cloud._modHook.mods[:]
for modstr in mods:
mod = sys.modules.get(modstr)
if mod and modstr not in ['cloud.util.configmanager', 'cloud.cloudconfig']:
try:
reload(mod)
except ImportError:
pass
reload(cloud)
cloud._modHook.mods = mods #restore mods after it is wiped
def __init__(self, confmanager, do_reload=False):
backend = confmanager.backend
self.__confmanager = confmanager
def _set_prop(item):
if hasattr(backend, item):
typ = type(getattr(backend, option))
if typ is type(None):
typ = None
else:
typ = None
#print 'item %s has type %s' % (item, typ)
def __inner__(self, value):
if typ:
try:
k = typ(value)
setattr(backend,item, k)
except ValueError, e:
raise ValueError('Configuration option %s must have type %s.' % (option, typ.__name__))
return __inner__
def _get_prop(item):
def __inner__(self):
return getattr(backend, item)
return __inner__
import scicloud as cloud
ConfigSettings._loader(cloud.__path__[0] + os.sep ,'scicloud.',do_reload)
for options in confmanager.sections.values():
for option in options:
prop = property(_get_prop(option), _set_prop(option), None, confmanager.optioncomment.get(ConfigManager.getCommentStr("",option)))
setattr(self.__class__, option, prop)
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/util/configmanager.py
|
configmanager.py
|
import os
import sys
class NoOptionError(Exception):
"""A requested option was not found."""
def __init__(self, option):
Exception.__init__(self, "No key %r" % option)
self.option = option
extraInfo = {
'Account': 'PiCloud account information. This is the only section that you need to worry about.',
'Logging': 'Control what should be logged and where',
'Transport': 'PiCloud information transfer',
'Multiprocessing': 'Options that control running the cloud locally',
'Simulation': 'Options for simulation mode that override Multiprocessing and Logging options'
}
class ConfigManager(object):
backend = None
hiddenSets = []
@staticmethod
def getCommentStr(section, option):
return option.lower()
def __init__(self, defaults=None):
self.sections = {}
self.optioncomment = {}
def read(self, fname):
"""Return True on successful read"""
import os
import sys
dir = os.path.dirname(fname)
conf = os.path.basename(fname)
pyfile = os.path.splitext(conf)[0]
addedEntry = False
try:
if dir not in sys.path:
sys.path.append(dir)
addedEntry = True
if not os.path.exists(fname):
try:
os.unlink("".join([dir, os.sep, pyfile, '.pyc'])) #force recompilation
except OSError:
pass
import types
self.backend = types.ModuleType('cloudconf')
return False #force rewrite
else:
try:
if pyfile in sys.modules:
self.backend = sys.modules[pyfile]
else:
self.backend = __import__(pyfile)
except ImportError, e:
import types
sys.stderr.write('CLOUD ERROR: Malformed cloudconf.py:\n %s\nUsing default settings.\n' % str(e))
self.backend = types.ModuleType('cloudconf')
finally:
if addedEntry:
sys.path.remove(dir)
return True
def get(self, section, option, comment = None):
if not hasattr(self.backend, option):
raise NoOptionError(option)
value = getattr(self.backend, option)
self.sections.setdefault(section, {})[option] = value
if comment:
self.optioncomment[self.getCommentStr(section, option)] = comment
return value
def hiddenset(self, *args):
"""Defer set commands"""
self.hiddenSets.append(args)
def showHidden(self):
"""Do all deferred (hidden) sets -- not thread safe"""
for hiddenSet in self.hiddenSets:
self.set(*hiddenSet)
self.hiddenSets = []
def set(self, section, option, value, comment = None):
self.sections.setdefault(section, {})[option] = value
if comment:
self.optioncomment[self.getCommentStr(section, option)] = comment
#print 'setting backend %s to %s' % (option, value)
setattr(self.backend,option,value)
def write(self, fp):
"""Write configuration file with defaults
Include any comments"""
#hack to ensure account comes first:
sections = self.sections.keys()
sections.sort()
for section in sections:
cmt = '"' * 3
fp.write('%s\n%s\n' % (cmt, section))
ei = extraInfo.get(section)
if ei:
fp.write('%s\n%s\n' % (ei, cmt))
else:
fp.write('%s\n' % cmt)
started = False
for (key, value) in self.sections[section].items():
if key != "__name__":
comment = self.optioncomment.get(self.getCommentStr(section, key))
if comment:
if started:
fp.write('\n')
for cel in comment.split('\n'):
fp.write('# %s\n' % cel.strip())
#print 'write %s=%s with type %s'% (key, repr(value), type(value))
fp.write("%s = %s\n" %
(key, repr(value).replace('\n', '\n\t')))
started = True
fp.write("\n\n")
class ConfigSettings(object):
"""This object provides the ability to programmatically edit the cloud configuration (found in cloudconf.py).
``commit()`` must be called to update the cloud module with new settings - and restart all active clouds
"""
@staticmethod
def _loader(path,prefix, do_reload):
"""Bind """
files = os.listdir(path)
delayed = []
for f in files:
if f.endswith('.py'):
endname = f[:-3]
if endname == 'cloudconfig' or endname == 'configmanager' or endname == 'setup' or endname == 'writeconfig' or endname == 'cli':
continue
if endname == '__init__':
delayed.append(prefix[:-1]) #do not load __init__ until submodules reloaded
continue
elif endname == 'mp':
modname = prefix + endname
delayed.append(modname)
else:
modname = prefix + endname
#print modname #LOG ME
if do_reload:
if modname in sys.modules:
try:
reload(sys.modules[modname])
except ImportError:
pass
else:
try:
__import__(modname)
except ImportError:
pass
elif os.path.isdir(path + f):
newpath = path + f + os.sep
ConfigSettings._loader(newpath,prefix + f + '.',do_reload)
if delayed:
if '__init__' in delayed: #must come last
delayed.remove('__init__')
delayed.append('__init__')
for delay_mod in delayed:
if do_reload:
if delay_mod in sys.modules:
try:
reload(sys.modules[delay_mod])
except ImportError:
pass
else:
try:
__import__(delay_mod)
except ImportError:
pass
delayed = []
def _showhidden(self):
"""Show hidden variables"""
self.__confmanager.showHidden()
self.__init__(self.__confmanager) #restart
def commit(self):
"""Update cloud with new settings.
.. warning::
This will restart any active cloud instances, wiping mp/simulated jobs and setkey information
"""
import scicloud as cloud
setattr(cloud,'__immutable', False)
cloud.cloudinterface._setcloud(cloud, type=None)
if hasattr(cloud,'mp'):
setattr(cloud.mp,'__immutable', False)
cloud.cloudinterface._setcloud(cloud.mp, type=None)
#Reload cloud modules in correct order
mods = cloud._modHook.mods[:]
for modstr in mods:
mod = sys.modules.get(modstr)
if mod and modstr not in ['cloud.util.configmanager', 'cloud.cloudconfig']:
try:
reload(mod)
except ImportError:
pass
reload(cloud)
cloud._modHook.mods = mods #restore mods after it is wiped
def __init__(self, confmanager, do_reload=False):
backend = confmanager.backend
self.__confmanager = confmanager
def _set_prop(item):
if hasattr(backend, item):
typ = type(getattr(backend, option))
if typ is type(None):
typ = None
else:
typ = None
#print 'item %s has type %s' % (item, typ)
def __inner__(self, value):
if typ:
try:
k = typ(value)
setattr(backend,item, k)
except ValueError, e:
raise ValueError('Configuration option %s must have type %s.' % (option, typ.__name__))
return __inner__
def _get_prop(item):
def __inner__(self):
return getattr(backend, item)
return __inner__
import scicloud as cloud
ConfigSettings._loader(cloud.__path__[0] + os.sep ,'scicloud.',do_reload)
for options in confmanager.sections.values():
for option in options:
prop = property(_get_prop(option), _set_prop(option), None, confmanager.optioncomment.get(ConfigManager.getCommentStr("",option)))
setattr(self.__class__, option, prop)
| 0.276202 | 0.066691 |
from __future__ import with_statement
'''
Provides for storage and retrieval of PiCloud credentials
Current credentials include:
- cloudauth: key/secretkey
- ssh private keys (environments/volumes)
'''
import distutils
import os
from ConfigParser import RawConfigParser
from .. import cloudconfig as cc
import logging
cloudLog = logging.getLogger('Cloud.credentials')
credentials_dir = os.path.expanduser(os.path.join(cc.baselocation,'credentials'))
"""general"""
key_cache = {} # dictionary mapping key to all authentication information
def save_keydef(key_def, api_key=None):
"""Save key definition to necessary files. Overwrite existing credential
If *api_key* not None, verify it matches key_def
"""
key_def['api_key'] = int(key_def['api_key'])
if not api_key:
api_key = key_def['api_key']
else:
assert (key_def['api_key'] == int(api_key))
key_cache[api_key] = key_def
write_cloudauth(key_def) #flush authorization
write_sshkey(key_def) #flush ssh key
def download_key_by_key(api_key, api_secretkey):
"""Download and cache key"""
api_key = int(api_key)
from ..account import get_key_by_key
key_def = get_key_by_key(api_key, api_secretkey)
cloudLog.debug('Saving key for api_key %s' % api_key)
save_keydef(key_def, api_key)
return key_def
def download_key_by_login(api_key, username, password):
"""Download and cache key by using PiCloud login information"""
api_key = int(api_key)
from ..account import get_key
key_def = get_key(username, password, api_key)
save_keydef(key_def, api_key)
return key_def
def verify_key(api_key):
"""Return true if we have valid sshkey and cloudauth for this key.
False if any information is missing"""
key_def = key_cache.get(api_key, {})
if 'api_secretkey' not in key_def:
if not resolve_secretkey(api_key):
cloudLog.debug('verify_key failed: could not find secretkey for %s', api_key)
return False
if not 'private_key' in key_def:
res = verify_sshkey(api_key)
if not res:
cloudLog.debug('verify_key failed: could not find sshkey for %s', api_key)
return res
def get_credentials_path(api_key):
"""Resolve directory where credentials are stored for a given api_key
Create directory if it does not exist"""
path = os.path.join(credentials_dir, str(api_key))
try:
distutils.dir_util.mkpath(path)
except distutils.errors.DistutilsFileError:
cloudLog.exception('Could not generate credentials path %s' % path)
return path
""" Api keys"""
#constants:
api_key_section = 'ApiKey'
def get_cloudauth_path(api_key):
"""Locate cloudauth path"""
base_path = get_credentials_path(api_key)
return os.path.join(base_path, 'cloudauth')
def read_cloudauth(api_key):
"""Load cloudauth for api_key"""
path = get_cloudauth_path(api_key)
if not os.path.exists(path):
raise IOError('path %s not found' % path)
config = RawConfigParser()
config.read(path)
key_def = key_cache.get(api_key, {})
key = config.getint(api_key_section, 'key')
if key != api_key:
raise ValueError('Cloudauth Credentials do not match. Expected key %s, found key %s' % (api_key, key))
key_def['api_key'] = key
key_def['api_secretkey'] = config.get(api_key_section, 'secretkey')
key_cache[int(api_key)] = key_def
return key_def
def get_saved_secretkey(api_key):
"""Resolve the secret key for this api_key from the saved cloudauth credentials"""
api_key = int(api_key)
key_def = key_cache.get(api_key)
if not key_def:
key_def = read_cloudauth(api_key)
return key_def['api_secretkey']
def write_cloudauth(key_def):
"""Write key/secret key information defined by key_def into cloudauth"""
api_key = str(key_def['api_key'])
api_secretkey = key_def['api_secretkey']
path = get_cloudauth_path(api_key)
config = RawConfigParser()
config.add_section(api_key_section)
config.set(api_key_section, 'key', api_key)
config.set(api_key_section, 'secretkey', api_secretkey)
try:
with open(path, 'wb') as configfile:
config.write(configfile)
except IOError, e:
cloudLog.exception('Could not save cloudauth credentials to %s' % path)
try:
os.chmod(path, 0600)
except:
cloudLog.exception('Could not set permissions on %s' % path)
def resolve_secretkey(api_key):
"""Find secretkey for this api_key
Return None if key cannot be found
"""
try:
secretkey = get_saved_secretkey(api_key)
except Exception, e:
if not isinstance(e, IOError):
cloudLog.exception('Unexpected error reading credentials for api_key %s' % api_key)
return None
else:
return secretkey
""" SSH private keys
These private keys are used to connect to PiCloud
"""
def get_sshkey_path(api_key):
"""Locate where SSH key is stored"""
base_path = get_credentials_path(api_key)
return os.path.join(base_path,'id_rsa')
def read_sshkey(api_key):
"""Read sshkey from file.
Save to cache and return key_def. key will be in key_def['private_key']"""
path = get_sshkey_path(api_key)
with open(path, 'rb') as f:
private_key = f.read()
key_def = key_cache.get(api_key, {})
key_def['api_key'] = api_key
key_def['private_key'] = private_key
key_cache[int(api_key)] = key_def
return key_def
def verify_sshkey(api_key):
"""Verify sshkey presence
Todo: Actually validate key
"""
path = get_sshkey_path(api_key)
if os.path.exists(path):
try:
os.chmod(path, 0600)
except:
cloudLog.exception('Could not set permissions on %s' % path)
return True
return False
def write_sshkey(key_def):
"""Save key_def['private_key'] to sshkey_path"""
private_key = key_def['private_key']
api_key = key_def['api_key']
path = get_sshkey_path(api_key)
try:
with open(path, 'wb') as f:
f.write(private_key)
except IOError, e:
cloudLog.exception('Could not save ssh private key to %s' % path)
else:
try:
os.chmod(path, 0600)
except:
cloudLog.exception('Could not set permissions on %s' % path)
def test(key, secretkey):
key_has = verify_key(key)
print 'have key already? %s' % key_has
if not key_has:
print 'downloading'
download_key_by_key(key, secretkey)
key_has = verify_key(key)
print 'have key now? %s' % key_has
secretkey = resolve_secretkey(key)
print 'your key is %s' % secretkey
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/util/credentials.py
|
credentials.py
|
from __future__ import with_statement
'''
Provides for storage and retrieval of PiCloud credentials
Current credentials include:
- cloudauth: key/secretkey
- ssh private keys (environments/volumes)
'''
import distutils
import os
from ConfigParser import RawConfigParser
from .. import cloudconfig as cc
import logging
cloudLog = logging.getLogger('Cloud.credentials')
credentials_dir = os.path.expanduser(os.path.join(cc.baselocation,'credentials'))
"""general"""
key_cache = {} # dictionary mapping key to all authentication information
def save_keydef(key_def, api_key=None):
"""Save key definition to necessary files. Overwrite existing credential
If *api_key* not None, verify it matches key_def
"""
key_def['api_key'] = int(key_def['api_key'])
if not api_key:
api_key = key_def['api_key']
else:
assert (key_def['api_key'] == int(api_key))
key_cache[api_key] = key_def
write_cloudauth(key_def) #flush authorization
write_sshkey(key_def) #flush ssh key
def download_key_by_key(api_key, api_secretkey):
"""Download and cache key"""
api_key = int(api_key)
from ..account import get_key_by_key
key_def = get_key_by_key(api_key, api_secretkey)
cloudLog.debug('Saving key for api_key %s' % api_key)
save_keydef(key_def, api_key)
return key_def
def download_key_by_login(api_key, username, password):
"""Download and cache key by using PiCloud login information"""
api_key = int(api_key)
from ..account import get_key
key_def = get_key(username, password, api_key)
save_keydef(key_def, api_key)
return key_def
def verify_key(api_key):
"""Return true if we have valid sshkey and cloudauth for this key.
False if any information is missing"""
key_def = key_cache.get(api_key, {})
if 'api_secretkey' not in key_def:
if not resolve_secretkey(api_key):
cloudLog.debug('verify_key failed: could not find secretkey for %s', api_key)
return False
if not 'private_key' in key_def:
res = verify_sshkey(api_key)
if not res:
cloudLog.debug('verify_key failed: could not find sshkey for %s', api_key)
return res
def get_credentials_path(api_key):
"""Resolve directory where credentials are stored for a given api_key
Create directory if it does not exist"""
path = os.path.join(credentials_dir, str(api_key))
try:
distutils.dir_util.mkpath(path)
except distutils.errors.DistutilsFileError:
cloudLog.exception('Could not generate credentials path %s' % path)
return path
""" Api keys"""
#constants:
api_key_section = 'ApiKey'
def get_cloudauth_path(api_key):
"""Locate cloudauth path"""
base_path = get_credentials_path(api_key)
return os.path.join(base_path, 'cloudauth')
def read_cloudauth(api_key):
"""Load cloudauth for api_key"""
path = get_cloudauth_path(api_key)
if not os.path.exists(path):
raise IOError('path %s not found' % path)
config = RawConfigParser()
config.read(path)
key_def = key_cache.get(api_key, {})
key = config.getint(api_key_section, 'key')
if key != api_key:
raise ValueError('Cloudauth Credentials do not match. Expected key %s, found key %s' % (api_key, key))
key_def['api_key'] = key
key_def['api_secretkey'] = config.get(api_key_section, 'secretkey')
key_cache[int(api_key)] = key_def
return key_def
def get_saved_secretkey(api_key):
"""Resolve the secret key for this api_key from the saved cloudauth credentials"""
api_key = int(api_key)
key_def = key_cache.get(api_key)
if not key_def:
key_def = read_cloudauth(api_key)
return key_def['api_secretkey']
def write_cloudauth(key_def):
"""Write key/secret key information defined by key_def into cloudauth"""
api_key = str(key_def['api_key'])
api_secretkey = key_def['api_secretkey']
path = get_cloudauth_path(api_key)
config = RawConfigParser()
config.add_section(api_key_section)
config.set(api_key_section, 'key', api_key)
config.set(api_key_section, 'secretkey', api_secretkey)
try:
with open(path, 'wb') as configfile:
config.write(configfile)
except IOError, e:
cloudLog.exception('Could not save cloudauth credentials to %s' % path)
try:
os.chmod(path, 0600)
except:
cloudLog.exception('Could not set permissions on %s' % path)
def resolve_secretkey(api_key):
"""Find secretkey for this api_key
Return None if key cannot be found
"""
try:
secretkey = get_saved_secretkey(api_key)
except Exception, e:
if not isinstance(e, IOError):
cloudLog.exception('Unexpected error reading credentials for api_key %s' % api_key)
return None
else:
return secretkey
""" SSH private keys
These private keys are used to connect to PiCloud
"""
def get_sshkey_path(api_key):
"""Locate where SSH key is stored"""
base_path = get_credentials_path(api_key)
return os.path.join(base_path,'id_rsa')
def read_sshkey(api_key):
"""Read sshkey from file.
Save to cache and return key_def. key will be in key_def['private_key']"""
path = get_sshkey_path(api_key)
with open(path, 'rb') as f:
private_key = f.read()
key_def = key_cache.get(api_key, {})
key_def['api_key'] = api_key
key_def['private_key'] = private_key
key_cache[int(api_key)] = key_def
return key_def
def verify_sshkey(api_key):
"""Verify sshkey presence
Todo: Actually validate key
"""
path = get_sshkey_path(api_key)
if os.path.exists(path):
try:
os.chmod(path, 0600)
except:
cloudLog.exception('Could not set permissions on %s' % path)
return True
return False
def write_sshkey(key_def):
"""Save key_def['private_key'] to sshkey_path"""
private_key = key_def['private_key']
api_key = key_def['api_key']
path = get_sshkey_path(api_key)
try:
with open(path, 'wb') as f:
f.write(private_key)
except IOError, e:
cloudLog.exception('Could not save ssh private key to %s' % path)
else:
try:
os.chmod(path, 0600)
except:
cloudLog.exception('Could not set permissions on %s' % path)
def test(key, secretkey):
key_has = verify_key(key)
print 'have key already? %s' % key_has
if not key_has:
print 'downloading'
download_key_by_key(key, secretkey)
key_has = verify_key(key)
print 'have key now? %s' % key_has
secretkey = resolve_secretkey(key)
print 'your key is %s' % secretkey
| 0.351089 | 0.07521 |
import re
from collections import defaultdict
variable_extract = re.compile(r'(?:[^\$\\]|\A){(\w+?)}')
def extract_vars(command_str):
"""Extract variables from a command string"""
matches = variable_extract.findall(command_str)
return list(set(matches))
variable_extract_dup = re.compile(r'([^\$\\]|\A){{(\w+?)}}') # matches vars in duplicate curlies
def generate_command(command_str, var_dct, skip_validate = False):
"""Fill in variables in command_str with ones from var_dct"""
if not skip_validate:
validate_command_args(command_str, var_dct)
# first duplicate all curlies
command_str = command_str.replace('{', '{{')
command_str = command_str.replace('}', '}}')
#print command_str
# now un-duplicate template variables
command_str = variable_extract_dup.sub('\\1{\\2}', command_str)
#print command_str
formatted_cmd = command_str.format(**var_dct)
# replace escaped items
formatted_cmd = formatted_cmd.replace('\\{', '{')
formatted_cmd = formatted_cmd.replace('\\}', '}')
return formatted_cmd
def validate_command_args(command_str, var_dct):
command_vars = extract_vars(command_str)
for var in command_vars:
if var not in var_dct:
raise ValueError('Paremeter %s in command "%s" was not bound' % (var, command_str))
for var in var_dct:
if var and var not in command_vars:
raise ValueError('Argument named %s is not defined in command "%s"' % (var, command_str))
def _var_format_error(item):
return ValueError('%s: Incorrect format. Variables must be formatted as name=value' % item)
def extract_args(arg_list, allow_map = False):
"""Returns dictionary mapping keyword to list of arguments.
every list should be of length one if allow_map is false
"""
kwds = {}
if not arg_list:
return kwds
for arg in arg_list:
parts = arg.split('=', 1)
if len(parts) != 2:
raise _var_format_error(arg)
key, value = parts
if not key or not value:
raise _var_format_error(arg)
if key in kwds:
raise ValueError('key %s is multiply defined' % key)
if not allow_map:
kwds[key] = value
else:
kwd_values = []
# split string on non-escaped ','
buf_str = ''
while True:
idx = value.find(',')
if idx == -1:
break
if value[idx - 1] == '\\': #escaped
buf_str = buf_str + value[:idx+1]
else:
kwd_values.append(buf_str + value[:idx])
buf_str = ''
value = value[idx+1:]
if buf_str or value:
kwd_values.append(buf_str + value)
kwds[key] = kwd_values
return kwds
if __name__ == '__main__':
cmdstr = 'base'
print generate_command(cmdstr, {})
cmdstr = 'bash {} ${env} {1..2}'
print generate_command(cmdstr, {})
cmdstr = '{hello} bash {} ${{env_sub}} {1..2} {bye}'
print generate_command(cmdstr, {'hello' : 'HELLO',
'env_sub' : 'ENV_SUB',
'bye' : 'BYE'})
cmdstr = '{hello} bash {} ${{env_sub}} {1..2} \{bye\}'
print generate_command(cmdstr, {'hello' : 'HELLO',
'env_sub' : 'ENV_SUB'})
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/util/template.py
|
template.py
|
import re
from collections import defaultdict
variable_extract = re.compile(r'(?:[^\$\\]|\A){(\w+?)}')
def extract_vars(command_str):
"""Extract variables from a command string"""
matches = variable_extract.findall(command_str)
return list(set(matches))
variable_extract_dup = re.compile(r'([^\$\\]|\A){{(\w+?)}}') # matches vars in duplicate curlies
def generate_command(command_str, var_dct, skip_validate = False):
"""Fill in variables in command_str with ones from var_dct"""
if not skip_validate:
validate_command_args(command_str, var_dct)
# first duplicate all curlies
command_str = command_str.replace('{', '{{')
command_str = command_str.replace('}', '}}')
#print command_str
# now un-duplicate template variables
command_str = variable_extract_dup.sub('\\1{\\2}', command_str)
#print command_str
formatted_cmd = command_str.format(**var_dct)
# replace escaped items
formatted_cmd = formatted_cmd.replace('\\{', '{')
formatted_cmd = formatted_cmd.replace('\\}', '}')
return formatted_cmd
def validate_command_args(command_str, var_dct):
command_vars = extract_vars(command_str)
for var in command_vars:
if var not in var_dct:
raise ValueError('Paremeter %s in command "%s" was not bound' % (var, command_str))
for var in var_dct:
if var and var not in command_vars:
raise ValueError('Argument named %s is not defined in command "%s"' % (var, command_str))
def _var_format_error(item):
return ValueError('%s: Incorrect format. Variables must be formatted as name=value' % item)
def extract_args(arg_list, allow_map = False):
"""Returns dictionary mapping keyword to list of arguments.
every list should be of length one if allow_map is false
"""
kwds = {}
if not arg_list:
return kwds
for arg in arg_list:
parts = arg.split('=', 1)
if len(parts) != 2:
raise _var_format_error(arg)
key, value = parts
if not key or not value:
raise _var_format_error(arg)
if key in kwds:
raise ValueError('key %s is multiply defined' % key)
if not allow_map:
kwds[key] = value
else:
kwd_values = []
# split string on non-escaped ','
buf_str = ''
while True:
idx = value.find(',')
if idx == -1:
break
if value[idx - 1] == '\\': #escaped
buf_str = buf_str + value[:idx+1]
else:
kwd_values.append(buf_str + value[:idx])
buf_str = ''
value = value[idx+1:]
if buf_str or value:
kwd_values.append(buf_str + value)
kwds[key] = kwd_values
return kwds
if __name__ == '__main__':
cmdstr = 'base'
print generate_command(cmdstr, {})
cmdstr = 'bash {} ${env} {1..2}'
print generate_command(cmdstr, {})
cmdstr = '{hello} bash {} ${{env_sub}} {1..2} {bye}'
print generate_command(cmdstr, {'hello' : 'HELLO',
'env_sub' : 'ENV_SUB',
'bye' : 'BYE'})
cmdstr = '{hello} bash {} ${{env_sub}} {1..2} \{bye\}'
print generate_command(cmdstr, {'hello' : 'HELLO',
'env_sub' : 'ENV_SUB'})
| 0.154983 | 0.143788 |
import sys
import types
import cPickle
import inspect
import datetime
import os
from functools import partial
from warnings import warn
def islambda(func):
return getattr(func,'func_name') == '<lambda>'
def funcname(func):
"""Return name of a callable (function, class, partial, etc.)"""
module = ""
if hasattr(func,'__module__'):
module = (func.__module__ if func.__module__ else '__main__')
"""Return a human readable name associated with a function"""
if inspect.ismethod(func):
nme = '.'.join([module,func.im_class.__name__,func.__name__])
elif inspect.isfunction(func):
nme = '.'.join([module,func.__name__])
elif inspect.isbuiltin(func):
return '.'.join([module,func.__name__])
elif isinstance(func,partial):
return 'partial_of_' + funcname(func.func)
elif inspect.isclass(func):
nme = '.'.join([module,func.__name__])
if hasattr(func, '__init__') and inspect.ismethod(func.__init__):
func = func.__init__
else:
return nme #can't extract more info for classes
else:
nme = 'type %s' % type(func)
if hasattr(func, '__name__'):
nme = '%s of %s' % (func.__name__, type(func))
return nme
nme += ' at ' + ':'.join([func.func_code.co_filename,str(func.func_code.co_firstlineno)])
return nme
def min_args(func):
"""Return minimum (required) number args this function has"""
if inspect.isfunction(func):
op_args = len(func.func_defaults) if func.func_defaults else 0
return func.func_code.co_argcount - op_args
elif inspect.ismethod(func):
return min_args(func.im_func) - 1
elif inspect.isclass(func):
if hasattr(func, '__init__'): #check class constructor
return min_args(func.__init__)
else:
return 0
raise TypeError('cannot deal with type: %s' % type(func))
def max_args(func):
"""Return maximum (required + default) number of arguments callable can take"""
if inspect.isfunction(func):
return func.func_code.co_argcount
elif inspect.ismethod(func):
return max_args(func.im_func) - 1
elif inspect.isclass(func) and hasattr(func, '__init__'): #check class constructor
if hasattr(func, '__init__'): #check class constructor
return max_args(func.__init__)
else:
return 0
raise TypeError('cannot deal with type: %s' % type(func))
def getargspec(func):
"""Returns an argspec or None if it can't be resolved
Our argspec is similar to inspect's except the name & if it is a method is appended as the first argument
Returns (name, is_method, args, *args, **kwargs, defaults)
"""
try:
argspec = inspect.getargspec(func)
except TypeError:
return None
out_list = [func.__name__, int(inspect.ismethod(func))]
out_list.extend(argspec)
return out_list
def validate_func_arguments(func, test_args, test_kwargs):
"""First pass validation to see if args/kwargs are compatible with the argspec
Probably doesn't catch everything that will error
Known to miss:
Validate that anonymous tuple params receive tuples
This is only valid for python 2.x
Returns true if validation passed; false if validation not supported
Exception raised if validation fails
"""
try:
argspec = inspect.getargspec(func)
except TypeError: #we can't check non-functions
return False
return validate_func_arguments_from_spec( (func.__name__, int(inspect.ismethod(func))) + argspec,
test_args,
test_kwargs.keys())
def validate_func_arguments_from_spec(argspec, test_args, test_kwargs_keys):
name, is_method, args, varargs, varkw, defaults = argspec
if defaults == None:
defaults = []
else:
defaults = list(defaults)
if is_method: #ignore self/cls
args = args[1:]
name += '()' #conform to python error reporting
test_args_len = len(test_args)
#kwd exist?
if not varkw:
for kw in test_kwargs_keys:
if kw not in args:
raise TypeError("%s got an unexpected keyword argument '%s'" % (name, kw))
#kwd not already bound by passed arg?
kwd_bound = args[test_args_len:] #These must all be default or bound to kwds
if not varkw:
for kw in test_kwargs_keys:
if kw not in kwd_bound:
raise TypeError("%s got multiple values for keyword argument '%s'" % (name, kw))
#verify argument count
firstdefault = len(args) - len(defaults)
nondefargs = args[:firstdefault]
defaults_injected = 0
for kw in test_kwargs_keys:
if kw in nondefargs:
defaults.append(None) #pretend another default is there for counting
defaults_injected += 1
min = len(args) - len(defaults)
max = len(args)
#correct for default injection
min+=defaults_injected
max+=defaults_injected
test_args_len += defaults_injected
if varargs:
max = sys.maxint
if min < 0:
min = 0
if test_args_len < min or max < test_args_len:
err_msg = '%s takes %s arguments (%d given)'
if min == max:
arg_c_msg = 'exactly %s' % min
elif test_args_len < min:
arg_c_msg = 'at least %s' % min
else:
arg_c_msg = 'at most %s' % max
raise TypeError(err_msg % (name, arg_c_msg, test_args_len))
return True
def fix_time_element(dct, key):
"""Fix time elements in dictionaries coming off the wire"""
item = dct.get(key)
if item == 'None': #returned by web instead of a NoneType None
item = None
dct[key] = item
if item:
dct[key] = datetime.datetime.strptime(item,'%Y-%m-%d %H:%M:%S')
return dct
def fix_sudo_path(path):
"""Correct permissions on path if using sudo from another user and keeping old users home directory"""
if os.name != 'posix':
return
sudo_uid = os.environ.get('SUDO_UID')
sudo_user = os.environ.get('SUDO_USER')
if sudo_uid != None and sudo_user:
sudo_uid = int(sudo_uid)
home = os.environ.get('HOME')
sudo_user_home = os.path.expanduser('~' + sudo_user)
# important: Only make modifications if user's home was not changed with sudo (e.g. sudo -H)
if home == sudo_user_home:
sudo_gid = os.environ.get('SUDO_GID')
sudo_gid = int(sudo_gid) if sudo_gid else -1
try:
os.chown(path, sudo_uid, sudo_gid)
except Exception, e:
warn('PiCloud cannot fix SUDO Paths. Error is %s:%s' % (type(e), str(e)))
"""Ordered Dictionary"""
import UserDict
class OrderedDict(UserDict.DictMixin):
def __init__(self, it = None):
self._keys = []
self._data = {}
if it:
for k,v in it:
self.__setitem__(k,v)
def __setitem__(self, key, value):
if key not in self._data:
self._keys.append(key)
self._data[key] = value
def insertAt(self, loc, key, value):
if key in self._data:
del self._data[self._data.index(key)]
self._keys.insert(loc, key)
self._data[key] = value
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
self._keys.remove(key)
def keys(self):
return list(self._keys)
def copy(self):
copyDict = OrderedDict()
copyDict._data = self._data.copy()
copyDict._keys = self._keys[:]
return copyDict
"""Python 2.5 support"""
from itertools import izip, chain, repeat
if sys.version_info[:2] < (2,6):
def izip_longest(*args):
def sentinel(counter = ([None]*(len(args)-1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = repeat(None)
iters = [chain(it, sentinel(), fillers) for it in args]
try:
for tup in izip(*iters):
yield tup
except IndexError:
pass
if __name__ == '__main__':
"""Validate the validate_func_arguments function"""
def foo0():
pass
def foo1(a):
pass
def foo2(a, b=2):
pass
def foo21(a, b):
pass
def foo3(a, (x,y), b):
"""lovely anonymous function"""
pass
def consist(func, *args, **kwargs):
typerror = None
try:
func(*args, **kwargs)
except TypeError, e:
typerror = e
print '%s %s %s' % (func, args, kwargs)
try:
validate_func_arguments(func, args, kwargs)
except TypeError, e:
if not typerror:
print 'unexpected typerror! %s' % str(e)
raise
else:
print '%s == %s' % (typerror, str(e))
else:
if typerror:
print 'missed error! %s' % typerror
raise
else:
print 'no error!'
consist(foo0)
consist(foo0, 2)
consist(foo0, k=2)
consist(foo0, 3, k=4)
consist(foo1)
consist(foo1, b=2)
consist(foo1, a=2)
consist(foo1, 2)
consist(foo1, 3)
consist(foo1, 3, a=2)
consist(foo1, 3, b=2)
consist(foo2)
consist(foo2, b=2)
consist(foo2, b=2, c=3)
consist(foo2, a=2)
consist(foo2, a=2, b=2)
consist(foo2, a=2, b=2, c=3)
consist(foo2, 2, a=10)
consist(foo2, 3)
consist(foo2, 3, 4)
consist(foo2, 3, 4, 7)
consist(foo2, 3, b=2)
consist(foo2, 3, a=10, b=2)
consist(foo2, 3, b=2, c=2)
consist(foo2, 3, a=10, b=2, c=4)
consist(foo21, 3, 4)
consist(foo21, 3, b=4)
consist(foo21, a=3, b=4)
consist(foo21, b=4)
consist(foo21, a=4)
consist(foo21)
consist(foo21, 4, 3, 5)
consist(foo3, 2, (4,3), 9)
consist(foo3, 2, (4,3), b=9)
consist(foo3, 2, (4,3), a=9)
consist(foo3, 2, (4,3), a=9, b=9)
consist(foo3, 2, a=9, b=9)
consist(foo3, 2, (4,3))
#we can't catch below..
#consist(foo3, 2, 10, 12)
|
scicloud
|
/scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/util/__init__.py
|
__init__.py
|
import sys
import types
import cPickle
import inspect
import datetime
import os
from functools import partial
from warnings import warn
def islambda(func):
return getattr(func,'func_name') == '<lambda>'
def funcname(func):
"""Return name of a callable (function, class, partial, etc.)"""
module = ""
if hasattr(func,'__module__'):
module = (func.__module__ if func.__module__ else '__main__')
"""Return a human readable name associated with a function"""
if inspect.ismethod(func):
nme = '.'.join([module,func.im_class.__name__,func.__name__])
elif inspect.isfunction(func):
nme = '.'.join([module,func.__name__])
elif inspect.isbuiltin(func):
return '.'.join([module,func.__name__])
elif isinstance(func,partial):
return 'partial_of_' + funcname(func.func)
elif inspect.isclass(func):
nme = '.'.join([module,func.__name__])
if hasattr(func, '__init__') and inspect.ismethod(func.__init__):
func = func.__init__
else:
return nme #can't extract more info for classes
else:
nme = 'type %s' % type(func)
if hasattr(func, '__name__'):
nme = '%s of %s' % (func.__name__, type(func))
return nme
nme += ' at ' + ':'.join([func.func_code.co_filename,str(func.func_code.co_firstlineno)])
return nme
def min_args(func):
"""Return minimum (required) number args this function has"""
if inspect.isfunction(func):
op_args = len(func.func_defaults) if func.func_defaults else 0
return func.func_code.co_argcount - op_args
elif inspect.ismethod(func):
return min_args(func.im_func) - 1
elif inspect.isclass(func):
if hasattr(func, '__init__'): #check class constructor
return min_args(func.__init__)
else:
return 0
raise TypeError('cannot deal with type: %s' % type(func))
def max_args(func):
"""Return maximum (required + default) number of arguments callable can take"""
if inspect.isfunction(func):
return func.func_code.co_argcount
elif inspect.ismethod(func):
return max_args(func.im_func) - 1
elif inspect.isclass(func) and hasattr(func, '__init__'): #check class constructor
if hasattr(func, '__init__'): #check class constructor
return max_args(func.__init__)
else:
return 0
raise TypeError('cannot deal with type: %s' % type(func))
def getargspec(func):
"""Returns an argspec or None if it can't be resolved
Our argspec is similar to inspect's except the name & if it is a method is appended as the first argument
Returns (name, is_method, args, *args, **kwargs, defaults)
"""
try:
argspec = inspect.getargspec(func)
except TypeError:
return None
out_list = [func.__name__, int(inspect.ismethod(func))]
out_list.extend(argspec)
return out_list
def validate_func_arguments(func, test_args, test_kwargs):
"""First pass validation to see if args/kwargs are compatible with the argspec
Probably doesn't catch everything that will error
Known to miss:
Validate that anonymous tuple params receive tuples
This is only valid for python 2.x
Returns true if validation passed; false if validation not supported
Exception raised if validation fails
"""
try:
argspec = inspect.getargspec(func)
except TypeError: #we can't check non-functions
return False
return validate_func_arguments_from_spec( (func.__name__, int(inspect.ismethod(func))) + argspec,
test_args,
test_kwargs.keys())
def validate_func_arguments_from_spec(argspec, test_args, test_kwargs_keys):
name, is_method, args, varargs, varkw, defaults = argspec
if defaults == None:
defaults = []
else:
defaults = list(defaults)
if is_method: #ignore self/cls
args = args[1:]
name += '()' #conform to python error reporting
test_args_len = len(test_args)
#kwd exist?
if not varkw:
for kw in test_kwargs_keys:
if kw not in args:
raise TypeError("%s got an unexpected keyword argument '%s'" % (name, kw))
#kwd not already bound by passed arg?
kwd_bound = args[test_args_len:] #These must all be default or bound to kwds
if not varkw:
for kw in test_kwargs_keys:
if kw not in kwd_bound:
raise TypeError("%s got multiple values for keyword argument '%s'" % (name, kw))
#verify argument count
firstdefault = len(args) - len(defaults)
nondefargs = args[:firstdefault]
defaults_injected = 0
for kw in test_kwargs_keys:
if kw in nondefargs:
defaults.append(None) #pretend another default is there for counting
defaults_injected += 1
min = len(args) - len(defaults)
max = len(args)
#correct for default injection
min+=defaults_injected
max+=defaults_injected
test_args_len += defaults_injected
if varargs:
max = sys.maxint
if min < 0:
min = 0
if test_args_len < min or max < test_args_len:
err_msg = '%s takes %s arguments (%d given)'
if min == max:
arg_c_msg = 'exactly %s' % min
elif test_args_len < min:
arg_c_msg = 'at least %s' % min
else:
arg_c_msg = 'at most %s' % max
raise TypeError(err_msg % (name, arg_c_msg, test_args_len))
return True
def fix_time_element(dct, key):
"""Fix time elements in dictionaries coming off the wire"""
item = dct.get(key)
if item == 'None': #returned by web instead of a NoneType None
item = None
dct[key] = item
if item:
dct[key] = datetime.datetime.strptime(item,'%Y-%m-%d %H:%M:%S')
return dct
def fix_sudo_path(path):
"""Correct permissions on path if using sudo from another user and keeping old users home directory"""
if os.name != 'posix':
return
sudo_uid = os.environ.get('SUDO_UID')
sudo_user = os.environ.get('SUDO_USER')
if sudo_uid != None and sudo_user:
sudo_uid = int(sudo_uid)
home = os.environ.get('HOME')
sudo_user_home = os.path.expanduser('~' + sudo_user)
# important: Only make modifications if user's home was not changed with sudo (e.g. sudo -H)
if home == sudo_user_home:
sudo_gid = os.environ.get('SUDO_GID')
sudo_gid = int(sudo_gid) if sudo_gid else -1
try:
os.chown(path, sudo_uid, sudo_gid)
except Exception, e:
warn('PiCloud cannot fix SUDO Paths. Error is %s:%s' % (type(e), str(e)))
"""Ordered Dictionary"""
import UserDict
class OrderedDict(UserDict.DictMixin):
def __init__(self, it = None):
self._keys = []
self._data = {}
if it:
for k,v in it:
self.__setitem__(k,v)
def __setitem__(self, key, value):
if key not in self._data:
self._keys.append(key)
self._data[key] = value
def insertAt(self, loc, key, value):
if key in self._data:
del self._data[self._data.index(key)]
self._keys.insert(loc, key)
self._data[key] = value
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
self._keys.remove(key)
def keys(self):
return list(self._keys)
def copy(self):
copyDict = OrderedDict()
copyDict._data = self._data.copy()
copyDict._keys = self._keys[:]
return copyDict
"""Python 2.5 support"""
from itertools import izip, chain, repeat
if sys.version_info[:2] < (2,6):
def izip_longest(*args):
def sentinel(counter = ([None]*(len(args)-1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = repeat(None)
iters = [chain(it, sentinel(), fillers) for it in args]
try:
for tup in izip(*iters):
yield tup
except IndexError:
pass
if __name__ == '__main__':
"""Validate the validate_func_arguments function"""
def foo0():
pass
def foo1(a):
pass
def foo2(a, b=2):
pass
def foo21(a, b):
pass
def foo3(a, (x,y), b):
"""lovely anonymous function"""
pass
def consist(func, *args, **kwargs):
typerror = None
try:
func(*args, **kwargs)
except TypeError, e:
typerror = e
print '%s %s %s' % (func, args, kwargs)
try:
validate_func_arguments(func, args, kwargs)
except TypeError, e:
if not typerror:
print 'unexpected typerror! %s' % str(e)
raise
else:
print '%s == %s' % (typerror, str(e))
else:
if typerror:
print 'missed error! %s' % typerror
raise
else:
print 'no error!'
consist(foo0)
consist(foo0, 2)
consist(foo0, k=2)
consist(foo0, 3, k=4)
consist(foo1)
consist(foo1, b=2)
consist(foo1, a=2)
consist(foo1, 2)
consist(foo1, 3)
consist(foo1, 3, a=2)
consist(foo1, 3, b=2)
consist(foo2)
consist(foo2, b=2)
consist(foo2, b=2, c=3)
consist(foo2, a=2)
consist(foo2, a=2, b=2)
consist(foo2, a=2, b=2, c=3)
consist(foo2, 2, a=10)
consist(foo2, 3)
consist(foo2, 3, 4)
consist(foo2, 3, 4, 7)
consist(foo2, 3, b=2)
consist(foo2, 3, a=10, b=2)
consist(foo2, 3, b=2, c=2)
consist(foo2, 3, a=10, b=2, c=4)
consist(foo21, 3, 4)
consist(foo21, 3, b=4)
consist(foo21, a=3, b=4)
consist(foo21, b=4)
consist(foo21, a=4)
consist(foo21)
consist(foo21, 4, 3, 5)
consist(foo3, 2, (4,3), 9)
consist(foo3, 2, (4,3), b=9)
consist(foo3, 2, (4,3), a=9)
consist(foo3, 2, (4,3), a=9, b=9)
consist(foo3, 2, a=9, b=9)
consist(foo3, 2, (4,3))
#we can't catch below..
#consist(foo3, 2, 10, 12)
| 0.306527 | 0.096621 |
# _SciCM_: Scientific Colour Maps
[](https://github.com/MBravoS/scicm/releases) [](https://pypi.python.org/pypi/scicm)
<p align="center">
<img src="https://raw.githubusercontent.com/MBravoS/scicm/master/images/logo.png" width="300">
</p>
**_SciCM_** is a Python package aimed at providing a large set of colour maps designed for scientific data visualisation.
The colour maps in _SciCM_ have been designed to be as interchangeable as possible within the same category, e.g., all diverging colour maps included in _SciCM_ do an (almost) equal job of displaying the data.
All colour maps included in _SciCM_ remain readable for people with red-green colour blindness (the most common type).
This design frees the user in their choice of colour map to use for their data visualisation.
_SciCM_ also includes some simple colour map manipulation tools, for users that want to further customise their colour maps.
## Quick start
Upon importing _SciCM_, the colour maps are registered with matplotlib, so they can be accessed by passing `cmap='scicm.cmapname'` to any plotting function that accepts a colour map (e.g. the `cmap` keyword in matplotlib).
The colour map objects themselves can also be explicitly accessed using `scicm.cm.cmapname`.
All colour maps have a reversed version, accessible through the same naming convention used by matplotlib (i.e. `cmapname_r`).
A simple example of _SciCM_ in use:
```python
import numpy as np, matplotlib.pyplot as plt, scicm
x = np.random.default_rng().normal(size=(200, 200))
plt.imshow(x, cmap='scicm.Stone')
plt.show()
```
### Included Colour Maps
<p align="center">
<img src="https://raw.githubusercontent.com/MBravoS/scicm/master/images/scicm_all.png" width="800">
</p>
## Documentation and use guides
_SciCM_'s GitHub Wiki contains an [extended quick start guide](https://github.com/MBravoS/scicm/wiki/Quick-Start), the [full documentation](https://github.com/MBravoS/scicm/wiki/Code-Documentation) of the package, and a [guide on how to choose the best colour map for your data](https://github.com/MBravoS/scicm/wiki/How-to-choose-which-colour-map-to-use).
## _SciCM_ in the broader colour map Python package ecosystem
_SciCM_ is not the first package to include "good" (perceptually-uniform) colour maps, but meaningfully expands the current availabily of such maps.
Compared to other similar packages:
- [_matplotlib_](https://matplotlib.org/stable/tutorials/colors/colormaps.html): Includes only 5 perceptually-uniform maps, which is less than 10% of all the available colour maps. The main aim of _SciCM_ is to provide perceptually-uniform alternatives to the sequential, diverging, and cyclic colour map types in _matplotlib_.
- [_ColorCET_](https://github.com/holoviz/colorcet): Perhaps the closest colour map package to _SciCM_ in both scope and size. The main difference being that _ColorCET_ features a large set of variations for a small number of individual colour maps, whereas _SciCM_ provides a large set of variations for a small number of colour map "types".
- [_cmocean_](https://github.com/matplotlib/cmocean): A relatively small set of perceptually uniform colour maps, with a design clearly catered for geographic and oceanographic use. Of note is the `oxy` colour map included in _cmocean_, which was the main source of inspiration for _SciCM_'s segmented category of colour maps.
- [_CMasher_](https://github.com/1313e/CMasher): While there is some overlap between both packages, _CMasher_ and _SciCM_ are natural companions, as the two focus on offering alternatives to different sets of _matplotlib_'s colour map categories.
## Installation guide
The package is available for installation using pip:
>pip install scicm
Although you may wish to install it directly from GitHub, the following example being for the _master_ branch:
>pip install git+https://github.com/MBravoS/scicm.git@master
## How to cite the use of _SciCM_
If you are submitting work that uses _SciCM_ for publication in a scientific journal, please include a mention of your use.
Some journals include a dedicated section for this purpose (e.g., the [_Software_ section in the Astrophysical Journal](https://journals.aas.org/aastexguide/#software)), which would be the natural place to mention SciCM (please include a link to this repository).
If such a section is not included on your journal or choice, please consider adding the following to your acknowledgements:
> The analysis in this work has been performed using the Python programming language, with the open-source package _SciCM_ (https://github.com/MBravoS/scicm).
Feel free to expand the previous statement to include the rest of the sofware used in your work!
Note that we aim to submit _SciCM_ for publication sometime in 2023, so how to acknowledge your use of _SciCM_ will (hopefully) soon change.
|
scicm
|
/scicm-1.0.4.tar.gz/scicm-1.0.4/README.md
|
README.md
|
import numpy as np, matplotlib.pyplot as plt, scicm
x = np.random.default_rng().normal(size=(200, 200))
plt.imshow(x, cmap='scicm.Stone')
plt.show()
| 0.375248 | 0.921711 |
===================
SCICO Release Notes
===================
Version 0.0.4 (2023-08-03)
----------------------------
• Add new `Function` class for representing array-to-array mappings with more
than one input.
• Add new methods and a function for computing Jacobian-vector products for
`Operator` objects.
• Add new proximal ADMM solvers.
• Add new ADMM subproblem solvers for problems involving a sum-of-convolutions
operator.
• Extend support for other ML models including UNet, ODP and MoDL.
• Add functionality for training Flax-based ML models and for data generation.
• Enable diagnostics for ML training loops.
• Support ``jaxlib`` and ``jax`` versions 0.4.3 to 0.4.14.
• Change required packages and version numbers, including more recent version
for `flax`.
• Add new methods and a function for computing Jacobian-vector products for
`Operator` objects.
• Drop support for Python 3.7.
• Add support for 3D tomographic projection with the ASTRA Toolbox.
Version 0.0.3 (2022-09-21)
----------------------------
• Change required packages and version numbers, including more recent version
requirements for `numpy`, `scipy`, `svmbir`, and `ray`.
• Package `bm4d` removed from main requirements list due to issue #342.
• Support ``jaxlib`` versions 0.3.0 to 0.3.15 and ``jax`` versions
0.3.0 to 0.3.17.
• Rename linear operators in ``radon_astra`` and ``radon_svmbir`` modules
to ``TomographicProjector``.
• Add support for fan beam CT in ``radon_svmbir`` module.
• Add function ``linop.linop_from_function`` for constructing linear
operators from functions.
• Enable addition operator for functionals.
• Completely new implementation of ``BlockArray`` class.
• Additional solvers in ``scico.solver``.
• New Huber norm (``HuberNorm``) and set distance functionals (``SetDistance``
and ``SquaredSetDistance``).
• New loss functions ``loss.SquaredL2AbsLoss`` and
``loss.SquaredL2SquaredAbsLoss`` for phase retrieval problems.
• Add interface to BM4D denoiser.
• Change interfaces of ``linop.FiniteDifference`` and ``linop.DFT``.
• Change filenames of some example scripts (and corresponding notebooks).
• Add support for Python 3.7.
• New ``DiagonalStack`` linear operator.
• Add support for non-linear operators to ``optimize.PDHG`` optimizer class.
• Various bug fixes.
Version 0.0.2 (2022-02-14)
----------------------------
• Additional optimization algorithms: Linearized ADMM and PDHG.
• Additional Abel transform and array slicing linear operators.
• Additional nuclear norm functional.
• New module ``scico.ray.tune`` providing a simplified interface to Ray Tune.
• Move optimization algorithms into ``optimize`` subpackage.
• Additional iteration stats columns for iterative ADMM subproblem solvers.
• Renamed "Primal Rsdl" to "Prml Rsdl" in displayed iteration stats.
• Move some functions from ``util`` and ``math`` modules to new ``array``
module.
• Bump pinned ``jaxlib`` and ``jax`` versions to 0.3.0.
Version 0.0.1 (2021-11-24)
----------------------------
• Initial release.
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/CHANGES.rst
|
CHANGES.rst
|
===================
SCICO Release Notes
===================
Version 0.0.4 (2023-08-03)
----------------------------
• Add new `Function` class for representing array-to-array mappings with more
than one input.
• Add new methods and a function for computing Jacobian-vector products for
`Operator` objects.
• Add new proximal ADMM solvers.
• Add new ADMM subproblem solvers for problems involving a sum-of-convolutions
operator.
• Extend support for other ML models including UNet, ODP and MoDL.
• Add functionality for training Flax-based ML models and for data generation.
• Enable diagnostics for ML training loops.
• Support ``jaxlib`` and ``jax`` versions 0.4.3 to 0.4.14.
• Change required packages and version numbers, including more recent version
for `flax`.
• Add new methods and a function for computing Jacobian-vector products for
`Operator` objects.
• Drop support for Python 3.7.
• Add support for 3D tomographic projection with the ASTRA Toolbox.
Version 0.0.3 (2022-09-21)
----------------------------
• Change required packages and version numbers, including more recent version
requirements for `numpy`, `scipy`, `svmbir`, and `ray`.
• Package `bm4d` removed from main requirements list due to issue #342.
• Support ``jaxlib`` versions 0.3.0 to 0.3.15 and ``jax`` versions
0.3.0 to 0.3.17.
• Rename linear operators in ``radon_astra`` and ``radon_svmbir`` modules
to ``TomographicProjector``.
• Add support for fan beam CT in ``radon_svmbir`` module.
• Add function ``linop.linop_from_function`` for constructing linear
operators from functions.
• Enable addition operator for functionals.
• Completely new implementation of ``BlockArray`` class.
• Additional solvers in ``scico.solver``.
• New Huber norm (``HuberNorm``) and set distance functionals (``SetDistance``
and ``SquaredSetDistance``).
• New loss functions ``loss.SquaredL2AbsLoss`` and
``loss.SquaredL2SquaredAbsLoss`` for phase retrieval problems.
• Add interface to BM4D denoiser.
• Change interfaces of ``linop.FiniteDifference`` and ``linop.DFT``.
• Change filenames of some example scripts (and corresponding notebooks).
• Add support for Python 3.7.
• New ``DiagonalStack`` linear operator.
• Add support for non-linear operators to ``optimize.PDHG`` optimizer class.
• Various bug fixes.
Version 0.0.2 (2022-02-14)
----------------------------
• Additional optimization algorithms: Linearized ADMM and PDHG.
• Additional Abel transform and array slicing linear operators.
• Additional nuclear norm functional.
• New module ``scico.ray.tune`` providing a simplified interface to Ray Tune.
• Move optimization algorithms into ``optimize`` subpackage.
• Additional iteration stats columns for iterative ADMM subproblem solvers.
• Renamed "Primal Rsdl" to "Prml Rsdl" in displayed iteration stats.
• Move some functions from ``util`` and ``math`` modules to new ``array``
module.
• Bump pinned ``jaxlib`` and ``jax`` versions to 0.3.0.
Version 0.0.1 (2021-11-24)
----------------------------
• Initial release.
| 0.919787 | 0.73137 |
.. image:: https://img.shields.io/badge/python-3.8+-green.svg
:target: https://www.python.org/
:alt: Python >= 3.8
.. image:: https://img.shields.io/github/license/lanl/scico.svg
:target: https://github.com/lanl/scico/blob/main/LICENSE
:alt: Package License
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
:target: https://github.com/psf/black
:alt: Code style
.. image:: https://readthedocs.org/projects/scico/badge/?version=latest
:target: http://scico.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://github.com/lanl/scico/actions/workflows/lint.yml/badge.svg
:target: https://github.com/lanl/scico/actions/workflows/lint.yml
:alt: Lint status
.. image:: https://github.com/lanl/scico/actions/workflows/pytest_ubuntu.yml/badge.svg
:target: https://github.com/lanl/scico/actions/workflows/pytest_ubuntu.yml
:alt: Test status
.. image:: https://codecov.io/gh/lanl/scico/branch/main/graph/badge.svg?token=wQimmjnzFf
:target: https://codecov.io/gh/lanl/scico
:alt: Test coverage
.. image:: https://www.codefactor.io/repository/github/lanl/scico/badge/main
:target: https://www.codefactor.io/repository/github/lanl/scico/overview/main
:alt: CodeFactor
.. image:: https://badge.fury.io/py/scico.svg
:target: https://badge.fury.io/py/scico
:alt: PyPI package version
.. image:: https://static.pepy.tech/personalized-badge/scico?period=month&left_color=grey&right_color=brightgreen
:target: https://pepy.tech/project/scico
:alt: PyPI download statistics
.. image:: https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.svg
:target: https://nbviewer.jupyter.org/github/lanl/scico-data/tree/main/notebooks/index.ipynb
:alt: View notebooks at nbviewer
.. image:: https://mybinder.org/badge_logo.svg
:target: https://mybinder.org/v2/gh/lanl/scico-data/binder?labpath=notebooks%2Findex.ipynb
:alt: Run notebooks on binder
.. image:: https://colab.research.google.com/assets/colab-badge.svg
:target: https://colab.research.google.com/github/lanl/scico-data/blob/colab/notebooks/index.ipynb
:alt: Run notebooks on google colab
.. image:: https://joss.theoj.org/papers/10.21105/joss.04722/status.svg
:target: https://doi.org/10.21105/joss.04722
:alt: JOSS paper
Scientific Computational Imaging Code (SCICO)
=============================================
SCICO is a Python package for solving the inverse problems that arise in scientific imaging applications. Its primary focus is providing methods for solving ill-posed inverse problems by using an appropriate prior model of the reconstruction space. SCICO includes a growing suite of operators, cost functionals, regularizers, and optimization routines that may be combined to solve a wide range of problems, and is designed so that it is easy to add new building blocks. SCICO is built on top of `JAX <https://github.com/google/jax>`_, which provides features such as automatic gradient calculation and GPU acceleration.
`Documentation <https://scico.rtfd.io/>`_ is available online. If you use this software for published work, please cite the corresponding `JOSS Paper <https://doi.org/10.21105/joss.04722>`_ (see bibtex entry ``balke-2022-scico`` in ``docs/source/references.bib``).
Installation
============
See the `online documentation <https://scico.rtfd.io/en/latest/install.html>`_ for installation instructions.
Usage Examples
==============
Usage examples are available as Python scripts and Jupyter Notebooks. Example scripts are located in ``examples/scripts``. The corresponding Jupyter Notebooks are provided in the `scico-data <https://github.com/lanl/scico-data>`_ submodule and symlinked to ``examples/notebooks``. They are also viewable on `GitHub <https://github.com/lanl/scico-data/tree/main/notebooks>`_ or `nbviewer <https://nbviewer.jupyter.org/github/lanl/scico-data/tree/main/notebooks/index.ipynb>`_, or can be run online by `binder <https://mybinder.org/v2/gh/lanl/scico-data/binder?labpath=notebooks%2Findex.ipynb>`_.
License
=======
SCICO is distributed as open-source software under a BSD 3-Clause License (see the ``LICENSE`` file for details).
LANL open source approval reference C20091.
(c) 2020-2023. Triad National Security, LLC. All rights reserved.
This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S. Department of Energy/National Nuclear Security Administration. All rights in the program are reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear Security Administration. The Government has granted for itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare derivative works, distribute copies to the public, perform publicly and display publicly, and to permit others to do so.
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/README.rst
|
README.rst
|
.. image:: https://img.shields.io/badge/python-3.8+-green.svg
:target: https://www.python.org/
:alt: Python >= 3.8
.. image:: https://img.shields.io/github/license/lanl/scico.svg
:target: https://github.com/lanl/scico/blob/main/LICENSE
:alt: Package License
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
:target: https://github.com/psf/black
:alt: Code style
.. image:: https://readthedocs.org/projects/scico/badge/?version=latest
:target: http://scico.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://github.com/lanl/scico/actions/workflows/lint.yml/badge.svg
:target: https://github.com/lanl/scico/actions/workflows/lint.yml
:alt: Lint status
.. image:: https://github.com/lanl/scico/actions/workflows/pytest_ubuntu.yml/badge.svg
:target: https://github.com/lanl/scico/actions/workflows/pytest_ubuntu.yml
:alt: Test status
.. image:: https://codecov.io/gh/lanl/scico/branch/main/graph/badge.svg?token=wQimmjnzFf
:target: https://codecov.io/gh/lanl/scico
:alt: Test coverage
.. image:: https://www.codefactor.io/repository/github/lanl/scico/badge/main
:target: https://www.codefactor.io/repository/github/lanl/scico/overview/main
:alt: CodeFactor
.. image:: https://badge.fury.io/py/scico.svg
:target: https://badge.fury.io/py/scico
:alt: PyPI package version
.. image:: https://static.pepy.tech/personalized-badge/scico?period=month&left_color=grey&right_color=brightgreen
:target: https://pepy.tech/project/scico
:alt: PyPI download statistics
.. image:: https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.svg
:target: https://nbviewer.jupyter.org/github/lanl/scico-data/tree/main/notebooks/index.ipynb
:alt: View notebooks at nbviewer
.. image:: https://mybinder.org/badge_logo.svg
:target: https://mybinder.org/v2/gh/lanl/scico-data/binder?labpath=notebooks%2Findex.ipynb
:alt: Run notebooks on binder
.. image:: https://colab.research.google.com/assets/colab-badge.svg
:target: https://colab.research.google.com/github/lanl/scico-data/blob/colab/notebooks/index.ipynb
:alt: Run notebooks on google colab
.. image:: https://joss.theoj.org/papers/10.21105/joss.04722/status.svg
:target: https://doi.org/10.21105/joss.04722
:alt: JOSS paper
Scientific Computational Imaging Code (SCICO)
=============================================
SCICO is a Python package for solving the inverse problems that arise in scientific imaging applications. Its primary focus is providing methods for solving ill-posed inverse problems by using an appropriate prior model of the reconstruction space. SCICO includes a growing suite of operators, cost functionals, regularizers, and optimization routines that may be combined to solve a wide range of problems, and is designed so that it is easy to add new building blocks. SCICO is built on top of `JAX <https://github.com/google/jax>`_, which provides features such as automatic gradient calculation and GPU acceleration.
`Documentation <https://scico.rtfd.io/>`_ is available online. If you use this software for published work, please cite the corresponding `JOSS Paper <https://doi.org/10.21105/joss.04722>`_ (see bibtex entry ``balke-2022-scico`` in ``docs/source/references.bib``).
Installation
============
See the `online documentation <https://scico.rtfd.io/en/latest/install.html>`_ for installation instructions.
Usage Examples
==============
Usage examples are available as Python scripts and Jupyter Notebooks. Example scripts are located in ``examples/scripts``. The corresponding Jupyter Notebooks are provided in the `scico-data <https://github.com/lanl/scico-data>`_ submodule and symlinked to ``examples/notebooks``. They are also viewable on `GitHub <https://github.com/lanl/scico-data/tree/main/notebooks>`_ or `nbviewer <https://nbviewer.jupyter.org/github/lanl/scico-data/tree/main/notebooks/index.ipynb>`_, or can be run online by `binder <https://mybinder.org/v2/gh/lanl/scico-data/binder?labpath=notebooks%2Findex.ipynb>`_.
License
=======
SCICO is distributed as open-source software under a BSD 3-Clause License (see the ``LICENSE`` file for details).
LANL open source approval reference C20091.
(c) 2020-2023. Triad National Security, LLC. All rights reserved.
This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S. Department of Energy/National Nuclear Security Administration. All rights in the program are reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear Security Administration. The Government has granted for itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare derivative works, distribute copies to the public, perform publicly and display publicly, and to permit others to do so.
| 0.893643 | 0.579311 |
.. _installing:
Installing SCICO
================
SCICO requires Python version 3.8 or later. (Version 3.9 is
recommended as it is the version under which SCICO has been most
thoroughly tested.) It is supported on both Linux and macOS, but is
not currently supported on Windows due to the limited support for
``jaxlib`` on Windows. However, Windows users can use SCICO via the
`Windows Subsystem for Linux
<https://docs.microsoft.com/en-us/windows/wsl/about>`_ (WSL). Guides
exist for using WSL with `CPU only
<https://docs.microsoft.com/en-us/windows/wsl/install-win10>`_ and
with `GPU support
<https://docs.microsoft.com/en-us/windows/win32/direct3d12/gpu-cuda-in-wsl>`_.
From PyPI
---------
The simplest way to install the most recent release of SCICO from
`PyPI <https://pypi.python.org/pypi/scico/>`_ is
::
pip install scico
From GitHub
-----------
SCICO can be downloaded from the `GitHub repo
<https://github.com/lanl/scico>`_. Note that, since the SCICO repo has
a submodule, it should be cloned via the command
::
git clone --recurse-submodules [email protected]:lanl/scico.git
Install using the commands
::
cd scico
pip install -r requirements.txt
pip install -e .
GPU Support
-----------
The instructions above install a CPU-only version of SCICO. To install
a version with GPU support:
1. Follow the CPU only instructions, above
2. Install the version of jaxlib with GPU support, as described in the `JAX installation
instructions <https://github.com/google/jax#installation>`_.
In the simplest case, the appropriate command is
::
pip install --upgrade "jax[cuda]" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
but it may be necessary to explicitly specify the ``jaxlib``
version if the most recent release is not yet supported by SCICO
(as specified in the ``requirements.txt`` file), or if using a
version of CUDA older than 11.4, or CuDNN older than 8.2, in which
case the command would be of the form ::
pip install --upgrade "jaxlib==0.4.2+cuda11.cudnn82" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
with appropriate substitution of ``jaxlib``, CUDA, and CuDNN version numbers.
Additional Dependencies
-----------------------
See :ref:`example_depend` for instructions on installing dependencies
related to the examples.
For Developers
--------------
See :ref:`scico_dev_contributing` for instructions on installing a
version of SCICO suitable for development.
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/install.rst
|
install.rst
|
.. _installing:
Installing SCICO
================
SCICO requires Python version 3.8 or later. (Version 3.9 is
recommended as it is the version under which SCICO has been most
thoroughly tested.) It is supported on both Linux and macOS, but is
not currently supported on Windows due to the limited support for
``jaxlib`` on Windows. However, Windows users can use SCICO via the
`Windows Subsystem for Linux
<https://docs.microsoft.com/en-us/windows/wsl/about>`_ (WSL). Guides
exist for using WSL with `CPU only
<https://docs.microsoft.com/en-us/windows/wsl/install-win10>`_ and
with `GPU support
<https://docs.microsoft.com/en-us/windows/win32/direct3d12/gpu-cuda-in-wsl>`_.
From PyPI
---------
The simplest way to install the most recent release of SCICO from
`PyPI <https://pypi.python.org/pypi/scico/>`_ is
::
pip install scico
From GitHub
-----------
SCICO can be downloaded from the `GitHub repo
<https://github.com/lanl/scico>`_. Note that, since the SCICO repo has
a submodule, it should be cloned via the command
::
git clone --recurse-submodules [email protected]:lanl/scico.git
Install using the commands
::
cd scico
pip install -r requirements.txt
pip install -e .
GPU Support
-----------
The instructions above install a CPU-only version of SCICO. To install
a version with GPU support:
1. Follow the CPU only instructions, above
2. Install the version of jaxlib with GPU support, as described in the `JAX installation
instructions <https://github.com/google/jax#installation>`_.
In the simplest case, the appropriate command is
::
pip install --upgrade "jax[cuda]" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
but it may be necessary to explicitly specify the ``jaxlib``
version if the most recent release is not yet supported by SCICO
(as specified in the ``requirements.txt`` file), or if using a
version of CUDA older than 11.4, or CuDNN older than 8.2, in which
case the command would be of the form ::
pip install --upgrade "jaxlib==0.4.2+cuda11.cudnn82" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
with appropriate substitution of ``jaxlib``, CUDA, and CuDNN version numbers.
Additional Dependencies
-----------------------
See :ref:`example_depend` for instructions on installing dependencies
related to the examples.
For Developers
--------------
See :ref:`scico_dev_contributing` for instructions on installing a
version of SCICO suitable for development.
| 0.790085 | 0.321527 |
Why SCICO?
==========
Advantages of JAX-based Design
------------------------------
The vast majority of scientific computing packages in Python are based
on `NumPy <https://numpy.org/>`__ and `SciPy <https://scipy.org/>`__.
SCICO, in contrast, is based on `JAX
<https://jax.readthedocs.io/en/latest/>`__, which provides most of the
same features, but with the addition of automatic differentiation, GPU
support, and just-in-time (JIT) compilation. (The availability of
these features in SCICO is subject to some :ref:`caveats
<non_jax_dep>`.) SCICO users and developers are advised to become
familiar with the `differences between JAX and
NumPy. <https://jax.readthedocs.io/en/latest/notebooks/thinking_in_jax.html>`_.
While recent advances in automatic differentiation have primarily been
driven by its important role in deep learning, it is also invaluable in
a functional minimization framework such as SCICO. The most obvious
advantage is allowing the use of gradient-based minimization methods
without the need for tedious mathematical derivation of an expression
for the gradient. Equally valuable, though, is the ability to
automatically compute the adjoint operator of a linear operator, the
manual derivation of which is often time-consuming.
GPU support and JIT compilation both offer the potential for significant
code acceleration, with the speed gains that can be obtained depending
on the algorithm/function to be executed. In many cases, a speed
improvement by an order of magnitude or more can be obtained by running
the same code on a GPU rather than a CPU, and similar speed gains can
sometimes also be obtained via JIT compilation.
The figure below shows timing results obtained on a compute server
with an Intel Xeon Gold 6230 CPU and NVIDIA GeForce RTX 2080 Ti
GPU. It is interesting to note that for :class:`.FiniteDifference` the
GPU provides no acceleration, while JIT provides more than an order of
magnitude of speed improvement on both CPU and GPU. For :class:`.DFT`
and :class:`.Convolve`, significant JIT acceleration is limited to the
GPU, which also provides significant acceleration over the CPU.
.. image:: /figures/jax-timing.png
:align: center
:width: 95%
:alt: Timing results for SCICO operators on CPU and GPU with and without JIT
Related Packages
----------------
Many elements of SCICO are partially available in other packages. We
briefly review them here, highlighting some of the main differences with
SCICO.
`GlobalBioIm <https://biomedical-imaging-group.github.io/GlobalBioIm/>`__
is similar in structure to SCICO (and a major inspiration for SCICO),
providing linear operators and solvers for inverse problems in imaging.
However, it is written in MATLAB and is thus not usable in a completely
free environment. It also lacks the automatic adjoint calculation and
simple GPU support offered by SCICO.
`PyLops <https://pylops.readthedocs.io>`__ provides a linear operator
class and many built-in linear operators. These operators are compatible
with many `SciPy <https://scipy.org/>`__ solvers. GPU support is
provided via `CuPy <https://cupy.dev>`__, which has the disadvantage
that switching for a CPU to GPU requires code changes, unlike SCICO and
`JAX <https://jax.readthedocs.io/en/latest/>`__. SCICO is more focused
on computational imaging that PyLops and has several specialized
operators that PyLops does not.
`Pycsou <https://matthieumeo.github.io/pycsou/html/index>`__, like
SCICO, is a Python project inspired by GlobalBioIm. Since it is based on
PyLops, it shares the disadvantages with respect to SCICO of that
project.
`ODL <https://odlgroup.github.io/odl/>`__ provides a variety of
operators and related infrastructure for prototyping of inverse
problems. It is built on top of
`NumPy <https://numpy.org/>`__/`SciPy <https://scipy.org/>`__, and does
not support any of the advanced features of
`JAX <https://jax.readthedocs.io/en/latest/>`__.
`ProxImaL <http://www.proximal-lang.org/en/latest/>`__ is a Python
package for image optimization problems. Like SCICO and many of the
other projects listed here, problems are specified by combining objects
representing, operators, functionals, and solvers. It does not support
any of the advanced features of
`JAX <https://jax.readthedocs.io/en/latest/>`__.
`ProxMin <https://github.com/pmelchior/proxmin>`__ provides a set of
proximal optimization algorithms for minimizing non-smooth functionals.
It is built on top of
`NumPy <https://numpy.org/>`__/`SciPy <https://scipy.org/>`__, and does
not support any of the advanced features of
`JAX <https://jax.readthedocs.io/en/latest/>`__ (however, an open issue
suggests that `JAX <https://jax.readthedocs.io/en/latest/>`__
compatibility is planned).
`CVXPY <https://www.cvxpy.org>`__ provides a flexible language for
defining optimization problems and a wide selection of solvers, but has
limited support for matrix-free methods.
Other related projects that may be of interest include:
- `ToMoBAR <https://github.com/dkazanc/ToMoBAR>`__
- `CCPi-Regularisation Toolkit <https://github.com/vais-ral/CCPi-Regularisation-Toolkit>`__
- `SPORCO <https://github.com/lanl/sporco>`__
- `SigPy <https://github.com/mikgroup/sigpy>`__
- `MIRT <https://github.com/JeffFessler/MIRT.jl>`__
- `BART <http://mrirecon.github.io/bart/>`__
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/advantages.rst
|
advantages.rst
|
Why SCICO?
==========
Advantages of JAX-based Design
------------------------------
The vast majority of scientific computing packages in Python are based
on `NumPy <https://numpy.org/>`__ and `SciPy <https://scipy.org/>`__.
SCICO, in contrast, is based on `JAX
<https://jax.readthedocs.io/en/latest/>`__, which provides most of the
same features, but with the addition of automatic differentiation, GPU
support, and just-in-time (JIT) compilation. (The availability of
these features in SCICO is subject to some :ref:`caveats
<non_jax_dep>`.) SCICO users and developers are advised to become
familiar with the `differences between JAX and
NumPy. <https://jax.readthedocs.io/en/latest/notebooks/thinking_in_jax.html>`_.
While recent advances in automatic differentiation have primarily been
driven by its important role in deep learning, it is also invaluable in
a functional minimization framework such as SCICO. The most obvious
advantage is allowing the use of gradient-based minimization methods
without the need for tedious mathematical derivation of an expression
for the gradient. Equally valuable, though, is the ability to
automatically compute the adjoint operator of a linear operator, the
manual derivation of which is often time-consuming.
GPU support and JIT compilation both offer the potential for significant
code acceleration, with the speed gains that can be obtained depending
on the algorithm/function to be executed. In many cases, a speed
improvement by an order of magnitude or more can be obtained by running
the same code on a GPU rather than a CPU, and similar speed gains can
sometimes also be obtained via JIT compilation.
The figure below shows timing results obtained on a compute server
with an Intel Xeon Gold 6230 CPU and NVIDIA GeForce RTX 2080 Ti
GPU. It is interesting to note that for :class:`.FiniteDifference` the
GPU provides no acceleration, while JIT provides more than an order of
magnitude of speed improvement on both CPU and GPU. For :class:`.DFT`
and :class:`.Convolve`, significant JIT acceleration is limited to the
GPU, which also provides significant acceleration over the CPU.
.. image:: /figures/jax-timing.png
:align: center
:width: 95%
:alt: Timing results for SCICO operators on CPU and GPU with and without JIT
Related Packages
----------------
Many elements of SCICO are partially available in other packages. We
briefly review them here, highlighting some of the main differences with
SCICO.
`GlobalBioIm <https://biomedical-imaging-group.github.io/GlobalBioIm/>`__
is similar in structure to SCICO (and a major inspiration for SCICO),
providing linear operators and solvers for inverse problems in imaging.
However, it is written in MATLAB and is thus not usable in a completely
free environment. It also lacks the automatic adjoint calculation and
simple GPU support offered by SCICO.
`PyLops <https://pylops.readthedocs.io>`__ provides a linear operator
class and many built-in linear operators. These operators are compatible
with many `SciPy <https://scipy.org/>`__ solvers. GPU support is
provided via `CuPy <https://cupy.dev>`__, which has the disadvantage
that switching for a CPU to GPU requires code changes, unlike SCICO and
`JAX <https://jax.readthedocs.io/en/latest/>`__. SCICO is more focused
on computational imaging that PyLops and has several specialized
operators that PyLops does not.
`Pycsou <https://matthieumeo.github.io/pycsou/html/index>`__, like
SCICO, is a Python project inspired by GlobalBioIm. Since it is based on
PyLops, it shares the disadvantages with respect to SCICO of that
project.
`ODL <https://odlgroup.github.io/odl/>`__ provides a variety of
operators and related infrastructure for prototyping of inverse
problems. It is built on top of
`NumPy <https://numpy.org/>`__/`SciPy <https://scipy.org/>`__, and does
not support any of the advanced features of
`JAX <https://jax.readthedocs.io/en/latest/>`__.
`ProxImaL <http://www.proximal-lang.org/en/latest/>`__ is a Python
package for image optimization problems. Like SCICO and many of the
other projects listed here, problems are specified by combining objects
representing, operators, functionals, and solvers. It does not support
any of the advanced features of
`JAX <https://jax.readthedocs.io/en/latest/>`__.
`ProxMin <https://github.com/pmelchior/proxmin>`__ provides a set of
proximal optimization algorithms for minimizing non-smooth functionals.
It is built on top of
`NumPy <https://numpy.org/>`__/`SciPy <https://scipy.org/>`__, and does
not support any of the advanced features of
`JAX <https://jax.readthedocs.io/en/latest/>`__ (however, an open issue
suggests that `JAX <https://jax.readthedocs.io/en/latest/>`__
compatibility is planned).
`CVXPY <https://www.cvxpy.org>`__ provides a flexible language for
defining optimization problems and a wide selection of solvers, but has
limited support for matrix-free methods.
Other related projects that may be of interest include:
- `ToMoBAR <https://github.com/dkazanc/ToMoBAR>`__
- `CCPi-Regularisation Toolkit <https://github.com/vais-ral/CCPi-Regularisation-Toolkit>`__
- `SPORCO <https://github.com/lanl/sporco>`__
- `SigPy <https://github.com/mikgroup/sigpy>`__
- `MIRT <https://github.com/JeffFessler/MIRT.jl>`__
- `BART <http://mrirecon.github.io/bart/>`__
| 0.940463 | 0.909947 |
.. _example_notebooks:
Usage Examples
==============
.. toctree::
:maxdepth: 1
.. include:: include/examplenotes.rst
Organized by Application
------------------------
.. toctree::
:maxdepth: 1
Computed Tomography
^^^^^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/ct_abel_tv_admm
examples/ct_abel_tv_admm_tune
examples/ct_astra_noreg_pcg
examples/ct_astra_3d_tv_admm
examples/ct_astra_tv_admm
examples/ct_astra_weighted_tv_admm
examples/ct_svmbir_tv_multi
examples/ct_svmbir_ppp_bm3d_admm_cg
examples/ct_svmbir_ppp_bm3d_admm_prox
examples/ct_fan_svmbir_ppp_bm3d_admm_prox
examples/ct_astra_modl_train_foam2
examples/ct_astra_odp_train_foam2
examples/ct_astra_unet_train_foam2
Deconvolution
^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/deconv_circ_tv_admm
examples/deconv_tv_admm
examples/deconv_tv_padmm
examples/deconv_tv_admm_tune
examples/deconv_microscopy_tv_admm
examples/deconv_microscopy_allchn_tv_admm
examples/deconv_ppp_bm3d_admm
examples/deconv_ppp_bm3d_pgm
examples/deconv_ppp_dncnn_admm
examples/deconv_ppp_dncnn_padmm
examples/deconv_ppp_bm4d_admm
examples/deconv_modl_train_foam1
examples/deconv_odp_train_foam1
Sparse Coding
^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/sparsecode_admm
examples/sparsecode_conv_admm
examples/sparsecode_conv_md_admm
examples/sparsecode_pgm
examples/sparsecode_poisson_pgm
Miscellaneous
^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/demosaic_ppp_bm3d_admm
examples/superres_ppp_dncnn_admm
examples/denoise_l1tv_admm
examples/denoise_tv_admm
examples/denoise_tv_pgm
examples/denoise_tv_multi
examples/denoise_cplx_tv_nlpadmm
examples/denoise_cplx_tv_pdhg
examples/denoise_dncnn_universal
examples/diffusercam_tv_admm
examples/video_rpca_admm
examples/ct_astra_datagen_foam2
examples/deconv_datagen_bsds
examples/deconv_datagen_foam1
examples/denoise_datagen_bsds
Organized by Regularization
---------------------------
.. toctree::
:maxdepth: 1
Plug and Play Priors
^^^^^^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/ct_svmbir_ppp_bm3d_admm_cg
examples/ct_svmbir_ppp_bm3d_admm_prox
examples/ct_fan_svmbir_ppp_bm3d_admm_prox
examples/deconv_ppp_bm3d_admm
examples/deconv_ppp_bm3d_pgm
examples/deconv_ppp_dncnn_admm
examples/deconv_ppp_dncnn_padmm
examples/deconv_ppp_bm4d_admm
examples/demosaic_ppp_bm3d_admm
examples/superres_ppp_dncnn_admm
Total Variation
^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/ct_abel_tv_admm
examples/ct_abel_tv_admm_tune
examples/ct_astra_tv_admm
examples/ct_astra_3d_tv_admm
examples/ct_astra_weighted_tv_admm
examples/ct_svmbir_tv_multi
examples/deconv_circ_tv_admm
examples/deconv_tv_admm
examples/deconv_tv_admm_tune
examples/deconv_tv_padmm
examples/deconv_microscopy_tv_admm
examples/deconv_microscopy_allchn_tv_admm
examples/denoise_l1tv_admm
examples/denoise_tv_admm
examples/denoise_tv_pgm
examples/denoise_tv_multi
examples/denoise_cplx_tv_nlpadmm
examples/denoise_cplx_tv_pdhg
examples/diffusercam_tv_admm
Sparsity
^^^^^^^^
.. toctree::
:maxdepth: 1
examples/diffusercam_tv_admm
examples/sparsecode_admm
examples/sparsecode_conv_admm
examples/sparsecode_conv_md_admm
examples/sparsecode_pgm
examples/sparsecode_poisson_pgm
examples/video_rpca_admm
Machine Learning
^^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/ct_astra_datagen_foam2
examples/ct_astra_modl_train_foam2
examples/ct_astra_odp_train_foam2
examples/ct_astra_unet_train_foam2
examples/deconv_datagen_bsds
examples/deconv_datagen_foam1
examples/deconv_modl_train_foam1
examples/deconv_odp_train_foam1
examples/denoise_datagen_bsds
examples/denoise_dncnn_train_bsds
examples/denoise_dncnn_universal
Organized by Optimization Algorithm
-----------------------------------
.. toctree::
:maxdepth: 1
ADMM
^^^^
.. toctree::
:maxdepth: 1
examples/ct_abel_tv_admm
examples/ct_abel_tv_admm_tune
examples/ct_astra_tv_admm
examples/ct_astra_3d_tv_admm
examples/ct_astra_weighted_tv_admm
examples/ct_svmbir_tv_multi
examples/ct_svmbir_ppp_bm3d_admm_cg
examples/ct_svmbir_ppp_bm3d_admm_prox
examples/ct_fan_svmbir_ppp_bm3d_admm_prox
examples/deconv_circ_tv_admm
examples/deconv_tv_admm
examples/deconv_tv_admm_tune
examples/deconv_microscopy_tv_admm
examples/deconv_microscopy_allchn_tv_admm
examples/deconv_ppp_bm3d_admm
examples/deconv_ppp_dncnn_admm
examples/deconv_ppp_bm4d_admm
examples/diffusercam_tv_admm
examples/sparsecode_admm
examples/sparsecode_conv_admm
examples/sparsecode_conv_md_admm
examples/demosaic_ppp_bm3d_admm
examples/superres_ppp_dncnn_admm
examples/denoise_l1tv_admm
examples/denoise_tv_admm
examples/denoise_tv_multi
examples/video_rpca_admm
Linearized ADMM
^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/ct_svmbir_tv_multi
examples/denoise_tv_multi
Proximal ADMM
^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/deconv_tv_padmm
examples/denoise_tv_multi
examples/denoise_cplx_tv_nlpadmm
examples/deconv_ppp_dncnn_padmm
Non-linear Proximal ADMM
^^^^^^^^^^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/denoise_cplx_tv_nlpadmm
PDHG
^^^^
.. toctree::
:maxdepth: 1
examples/ct_svmbir_tv_multi
examples/denoise_tv_multi
examples/denoise_cplx_tv_pdhg
PGM
^^^
.. toctree::
:maxdepth: 1
examples/deconv_ppp_bm3d_pgm
examples/sparsecode_pgm
examples/sparsecode_poisson_pgm
examples/denoise_tv_pgm
PCG
^^^
.. toctree::
:maxdepth: 1
examples/ct_astra_noreg_pcg
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples.rst
|
examples.rst
|
.. _example_notebooks:
Usage Examples
==============
.. toctree::
:maxdepth: 1
.. include:: include/examplenotes.rst
Organized by Application
------------------------
.. toctree::
:maxdepth: 1
Computed Tomography
^^^^^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/ct_abel_tv_admm
examples/ct_abel_tv_admm_tune
examples/ct_astra_noreg_pcg
examples/ct_astra_3d_tv_admm
examples/ct_astra_tv_admm
examples/ct_astra_weighted_tv_admm
examples/ct_svmbir_tv_multi
examples/ct_svmbir_ppp_bm3d_admm_cg
examples/ct_svmbir_ppp_bm3d_admm_prox
examples/ct_fan_svmbir_ppp_bm3d_admm_prox
examples/ct_astra_modl_train_foam2
examples/ct_astra_odp_train_foam2
examples/ct_astra_unet_train_foam2
Deconvolution
^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/deconv_circ_tv_admm
examples/deconv_tv_admm
examples/deconv_tv_padmm
examples/deconv_tv_admm_tune
examples/deconv_microscopy_tv_admm
examples/deconv_microscopy_allchn_tv_admm
examples/deconv_ppp_bm3d_admm
examples/deconv_ppp_bm3d_pgm
examples/deconv_ppp_dncnn_admm
examples/deconv_ppp_dncnn_padmm
examples/deconv_ppp_bm4d_admm
examples/deconv_modl_train_foam1
examples/deconv_odp_train_foam1
Sparse Coding
^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/sparsecode_admm
examples/sparsecode_conv_admm
examples/sparsecode_conv_md_admm
examples/sparsecode_pgm
examples/sparsecode_poisson_pgm
Miscellaneous
^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/demosaic_ppp_bm3d_admm
examples/superres_ppp_dncnn_admm
examples/denoise_l1tv_admm
examples/denoise_tv_admm
examples/denoise_tv_pgm
examples/denoise_tv_multi
examples/denoise_cplx_tv_nlpadmm
examples/denoise_cplx_tv_pdhg
examples/denoise_dncnn_universal
examples/diffusercam_tv_admm
examples/video_rpca_admm
examples/ct_astra_datagen_foam2
examples/deconv_datagen_bsds
examples/deconv_datagen_foam1
examples/denoise_datagen_bsds
Organized by Regularization
---------------------------
.. toctree::
:maxdepth: 1
Plug and Play Priors
^^^^^^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/ct_svmbir_ppp_bm3d_admm_cg
examples/ct_svmbir_ppp_bm3d_admm_prox
examples/ct_fan_svmbir_ppp_bm3d_admm_prox
examples/deconv_ppp_bm3d_admm
examples/deconv_ppp_bm3d_pgm
examples/deconv_ppp_dncnn_admm
examples/deconv_ppp_dncnn_padmm
examples/deconv_ppp_bm4d_admm
examples/demosaic_ppp_bm3d_admm
examples/superres_ppp_dncnn_admm
Total Variation
^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/ct_abel_tv_admm
examples/ct_abel_tv_admm_tune
examples/ct_astra_tv_admm
examples/ct_astra_3d_tv_admm
examples/ct_astra_weighted_tv_admm
examples/ct_svmbir_tv_multi
examples/deconv_circ_tv_admm
examples/deconv_tv_admm
examples/deconv_tv_admm_tune
examples/deconv_tv_padmm
examples/deconv_microscopy_tv_admm
examples/deconv_microscopy_allchn_tv_admm
examples/denoise_l1tv_admm
examples/denoise_tv_admm
examples/denoise_tv_pgm
examples/denoise_tv_multi
examples/denoise_cplx_tv_nlpadmm
examples/denoise_cplx_tv_pdhg
examples/diffusercam_tv_admm
Sparsity
^^^^^^^^
.. toctree::
:maxdepth: 1
examples/diffusercam_tv_admm
examples/sparsecode_admm
examples/sparsecode_conv_admm
examples/sparsecode_conv_md_admm
examples/sparsecode_pgm
examples/sparsecode_poisson_pgm
examples/video_rpca_admm
Machine Learning
^^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/ct_astra_datagen_foam2
examples/ct_astra_modl_train_foam2
examples/ct_astra_odp_train_foam2
examples/ct_astra_unet_train_foam2
examples/deconv_datagen_bsds
examples/deconv_datagen_foam1
examples/deconv_modl_train_foam1
examples/deconv_odp_train_foam1
examples/denoise_datagen_bsds
examples/denoise_dncnn_train_bsds
examples/denoise_dncnn_universal
Organized by Optimization Algorithm
-----------------------------------
.. toctree::
:maxdepth: 1
ADMM
^^^^
.. toctree::
:maxdepth: 1
examples/ct_abel_tv_admm
examples/ct_abel_tv_admm_tune
examples/ct_astra_tv_admm
examples/ct_astra_3d_tv_admm
examples/ct_astra_weighted_tv_admm
examples/ct_svmbir_tv_multi
examples/ct_svmbir_ppp_bm3d_admm_cg
examples/ct_svmbir_ppp_bm3d_admm_prox
examples/ct_fan_svmbir_ppp_bm3d_admm_prox
examples/deconv_circ_tv_admm
examples/deconv_tv_admm
examples/deconv_tv_admm_tune
examples/deconv_microscopy_tv_admm
examples/deconv_microscopy_allchn_tv_admm
examples/deconv_ppp_bm3d_admm
examples/deconv_ppp_dncnn_admm
examples/deconv_ppp_bm4d_admm
examples/diffusercam_tv_admm
examples/sparsecode_admm
examples/sparsecode_conv_admm
examples/sparsecode_conv_md_admm
examples/demosaic_ppp_bm3d_admm
examples/superres_ppp_dncnn_admm
examples/denoise_l1tv_admm
examples/denoise_tv_admm
examples/denoise_tv_multi
examples/video_rpca_admm
Linearized ADMM
^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/ct_svmbir_tv_multi
examples/denoise_tv_multi
Proximal ADMM
^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/deconv_tv_padmm
examples/denoise_tv_multi
examples/denoise_cplx_tv_nlpadmm
examples/deconv_ppp_dncnn_padmm
Non-linear Proximal ADMM
^^^^^^^^^^^^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
examples/denoise_cplx_tv_nlpadmm
PDHG
^^^^
.. toctree::
:maxdepth: 1
examples/ct_svmbir_tv_multi
examples/denoise_tv_multi
examples/denoise_cplx_tv_pdhg
PGM
^^^
.. toctree::
:maxdepth: 1
examples/deconv_ppp_bm3d_pgm
examples/sparsecode_pgm
examples/sparsecode_poisson_pgm
examples/denoise_tv_pgm
PCG
^^^
.. toctree::
:maxdepth: 1
examples/ct_astra_noreg_pcg
| 0.671147 | 0.343507 |
import importlib
import inspect
import os
import pkgutil
import sys
from glob import glob
from runpy import run_path
def run_conf_files(vardict=None, path=None):
"""Execute Python files in conf directory.
Args:
vardict: Dictionary into which variable names should be inserted.
Defaults to empty dict.
path: Path to conf directory. Defaults to path to this module.
Returns:
A dict populated with variables defined during execution of the
configuration files.
"""
if vardict is None:
vardict = {}
if path is None:
path = os.path.dirname(__file__)
files = os.path.join(path, "conf", "*.py")
for f in sorted(glob(files)):
conf = run_path(f, init_globals=vardict)
for k, v in conf.items():
if len(k) >= 4 and k[0:2] == "__" and k[-2:] == "__": # ignore __<name>__ variables
continue
vardict[k] = v
return vardict
def package_classes(package):
"""Get a list of classes in a package.
Return a list of qualified names of classes in the specified
package. Classes in modules with names beginning with an "_" are
omitted, as are classes whose internal module name record is not
the same as the module in which they are found (i.e. indicating
that they have been imported from elsewhere).
Args:
package: Reference to package for which classes are to be listed
(not package name string).
Returns:
A list of qualified names of classes in the specified package.
"""
classes = []
# Iterate over modules in package
for importer, modname, _ in pkgutil.walk_packages(
path=package.__path__, prefix=(package.__name__ + "."), onerror=lambda x: None
):
# Skip modules whose names begin with a "_"
if modname.split(".")[-1][0] == "_":
continue
importlib.import_module(modname)
# Iterate over module members
for name, obj in inspect.getmembers(sys.modules[modname]):
if inspect.isclass(obj):
# Get internal module name of class for comparison with working module name
try:
objmodname = getattr(sys.modules[modname], obj.__name__).__module__
except Exception:
objmodname = None
if objmodname == modname:
classes.append(modname + "." + obj.__name__)
return classes
def get_text_indentation(text, skiplines=0):
"""Compute the leading whitespace indentation in a block of text.
Args:
text: A block of text as a string.
Returns:
Indentation length.
"""
min_indent = len(text)
lines = text.splitlines()
if len(lines) > skiplines:
lines = lines[skiplines:]
else:
return None
for line in lines:
if len(line) > 0:
indent = len(line) - len(line.lstrip())
if indent < min_indent:
min_indent = indent
return min_indent
def add_text_indentation(text, indent):
"""Insert leading whitespace into a block of text.
Args:
text: A block of text as a string.
indent: Number of leading spaces to insert on each line.
Returns:
Text with additional indentation.
"""
lines = text.splitlines()
for n, line in enumerate(lines):
if len(line) > 0:
lines[n] = (" " * indent) + line
return "\n".join(lines)
def insert_inheritance_diagram(clsqname, parts=None, default_nparts=2):
"""Insert an inheritance diagram into a class docstring.
No action is taken for classes without a base clase, and for classes
without a docstring.
Args:
clsqname: Qualified name (i.e. including module name path) of class.
parts: A dict mapping qualified class names to custom values for
the ":parts:" directive.
default_nparts: Default value for the ":parts:" directive.
"""
# Extract module name and class name from qualified class name
clspth = clsqname.split(".")
modname = ".".join(clspth[0:-1])
clsname = clspth[-1]
# Get reference to class
cls = getattr(sys.modules[modname], clsname)
# Return immediately if class has no base classes
if getattr(cls, "__bases__") == (object,):
return
# Get current docstring
docstr = getattr(cls, "__doc__")
# Return immediately if class has no docstring
if docstr is None:
return
# Use class-specific parts or default parts directive value
if parts and clsqname in parts:
nparts = parts[clsqname]
else:
nparts = default_nparts
# Split docstring into individual lines
lines = docstr.splitlines()
# Return immediately if there are no lines
if not lines:
return
# Cut leading whitespace lines
n = 0
for n, line in enumerate(lines):
if line != "":
break
lines = lines[n:]
# Define inheritance diagram insertion text
idstr = f"""
.. inheritance-diagram:: {clsname}
:parts: {nparts}
"""
docstr_indent = get_text_indentation(docstr, skiplines=1)
if docstr_indent is not None and docstr_indent > 4:
idstr = add_text_indentation(idstr, docstr_indent - 4)
# Insert inheritance diagram after summary line and whitespace line following it
lines.insert(2, idstr)
# Construct new docstring and attach it to the class
extdocstr = "\n".join(lines)
setattr(cls, "__doc__", extdocstr)
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/docsutil.py
|
docsutil.py
|
import importlib
import inspect
import os
import pkgutil
import sys
from glob import glob
from runpy import run_path
def run_conf_files(vardict=None, path=None):
"""Execute Python files in conf directory.
Args:
vardict: Dictionary into which variable names should be inserted.
Defaults to empty dict.
path: Path to conf directory. Defaults to path to this module.
Returns:
A dict populated with variables defined during execution of the
configuration files.
"""
if vardict is None:
vardict = {}
if path is None:
path = os.path.dirname(__file__)
files = os.path.join(path, "conf", "*.py")
for f in sorted(glob(files)):
conf = run_path(f, init_globals=vardict)
for k, v in conf.items():
if len(k) >= 4 and k[0:2] == "__" and k[-2:] == "__": # ignore __<name>__ variables
continue
vardict[k] = v
return vardict
def package_classes(package):
"""Get a list of classes in a package.
Return a list of qualified names of classes in the specified
package. Classes in modules with names beginning with an "_" are
omitted, as are classes whose internal module name record is not
the same as the module in which they are found (i.e. indicating
that they have been imported from elsewhere).
Args:
package: Reference to package for which classes are to be listed
(not package name string).
Returns:
A list of qualified names of classes in the specified package.
"""
classes = []
# Iterate over modules in package
for importer, modname, _ in pkgutil.walk_packages(
path=package.__path__, prefix=(package.__name__ + "."), onerror=lambda x: None
):
# Skip modules whose names begin with a "_"
if modname.split(".")[-1][0] == "_":
continue
importlib.import_module(modname)
# Iterate over module members
for name, obj in inspect.getmembers(sys.modules[modname]):
if inspect.isclass(obj):
# Get internal module name of class for comparison with working module name
try:
objmodname = getattr(sys.modules[modname], obj.__name__).__module__
except Exception:
objmodname = None
if objmodname == modname:
classes.append(modname + "." + obj.__name__)
return classes
def get_text_indentation(text, skiplines=0):
"""Compute the leading whitespace indentation in a block of text.
Args:
text: A block of text as a string.
Returns:
Indentation length.
"""
min_indent = len(text)
lines = text.splitlines()
if len(lines) > skiplines:
lines = lines[skiplines:]
else:
return None
for line in lines:
if len(line) > 0:
indent = len(line) - len(line.lstrip())
if indent < min_indent:
min_indent = indent
return min_indent
def add_text_indentation(text, indent):
"""Insert leading whitespace into a block of text.
Args:
text: A block of text as a string.
indent: Number of leading spaces to insert on each line.
Returns:
Text with additional indentation.
"""
lines = text.splitlines()
for n, line in enumerate(lines):
if len(line) > 0:
lines[n] = (" " * indent) + line
return "\n".join(lines)
def insert_inheritance_diagram(clsqname, parts=None, default_nparts=2):
"""Insert an inheritance diagram into a class docstring.
No action is taken for classes without a base clase, and for classes
without a docstring.
Args:
clsqname: Qualified name (i.e. including module name path) of class.
parts: A dict mapping qualified class names to custom values for
the ":parts:" directive.
default_nparts: Default value for the ":parts:" directive.
"""
# Extract module name and class name from qualified class name
clspth = clsqname.split(".")
modname = ".".join(clspth[0:-1])
clsname = clspth[-1]
# Get reference to class
cls = getattr(sys.modules[modname], clsname)
# Return immediately if class has no base classes
if getattr(cls, "__bases__") == (object,):
return
# Get current docstring
docstr = getattr(cls, "__doc__")
# Return immediately if class has no docstring
if docstr is None:
return
# Use class-specific parts or default parts directive value
if parts and clsqname in parts:
nparts = parts[clsqname]
else:
nparts = default_nparts
# Split docstring into individual lines
lines = docstr.splitlines()
# Return immediately if there are no lines
if not lines:
return
# Cut leading whitespace lines
n = 0
for n, line in enumerate(lines):
if line != "":
break
lines = lines[n:]
# Define inheritance diagram insertion text
idstr = f"""
.. inheritance-diagram:: {clsname}
:parts: {nparts}
"""
docstr_indent = get_text_indentation(docstr, skiplines=1)
if docstr_indent is not None and docstr_indent > 4:
idstr = add_text_indentation(idstr, docstr_indent - 4)
# Insert inheritance diagram after summary line and whitespace line following it
lines.insert(2, idstr)
# Construct new docstring and attach it to the class
extdocstr = "\n".join(lines)
setattr(cls, "__doc__", extdocstr)
| 0.567457 | 0.2709 |
Overview
========
`Scientific Computational Imaging Code (SCICO)
<https://github.com/lanl/scico>`__ is a Python package for solving the
inverse problems that arise in scientific imaging applications. Its
primary focus is providing methods for solving ill-posed inverse
problems by using an appropriate prior model of the reconstruction
space. SCICO includes a growing suite of operators, cost functionals,
regularizers, and optimization algorithms that may be combined to
solve a wide range of problems, and is designed so that it is easy to
add new building blocks. When solving a problem, these components are
combined in a way that makes code for optimization routines look like
the pseudocode in scientific papers. SCICO is built on top of `JAX
<https://jax.readthedocs.io/en/latest/>`__ rather than `NumPy
<https://numpy.org/>`__, enabling GPU/TPU acceleration, just-in-time
compilation, and automatic gradient functionality, which is used to
automatically compute the adjoints of linear operators. An example of
how to solve a multi-channel tomography problem with SCICO is shown in
the figure below.
.. image:: /figures/scico-tomo-overview.png
:align: center
:width: 95%
:alt: Solving a multi-channel tomography problem with SCICO.
|
The SCICO source code is available from `GitHub
<https://github.com/lanl/scico>`__, and pre-built packages are
available from `PyPI <https://github.com/lanl/scico>`__. (Detailed
instructions for installing SCICO are available in :ref:`installing`.)
It has extensive `online documentation <https://scico.rtfd.io/>`__,
including :doc:`API documentation <_autosummary/scico>` and
:ref:`usage examples <example_notebooks>`, which can be run online at
`Google Colab
<https://colab.research.google.com/github/lanl/scico-data/blob/colab/notebooks/index.ipynb>`__
and `binder
<https://mybinder.org/v2/gh/lanl/scico-data/binder?labpath=notebooks%2Findex.ipynb>`__.
If you use this library for published work, please cite
:cite:`balke-2022-scico` (see bibtex entry ``balke-2022-scico`` in
`docs/source/references.bib
<https://github.com/lanl/scico/blob/main/docs/source/references.bib>`_
in the source distribution).
Contributing
------------
Bug reports, feature requests, and general suggestions are welcome,
and should be submitted via the `GitHub issue system
<https://github.com/lanl/scico/issues>`__. More substantial
contributions are also :ref:`welcome <scico_dev_contributing>`.
License
-------
SCICO is distributed as open-source software under a BSD 3-Clause
License (see the `LICENSE
<https://github.com/lanl/scico/blob/master/LICENSE>`__ file for
details). LANL open source approval reference C20091.
© 2020-2023. Triad National Security, LLC. All rights reserved.
This program was produced under U.S. Government contract
89233218CNA000001 for Los Alamos National Laboratory (LANL), which is
operated by Triad National Security, LLC for the U.S. Department of
Energy/National Nuclear Security Administration. All rights in the
program are reserved by Triad National Security, LLC, and the
U.S. Department of Energy/National Nuclear Security Administration.
The Government has granted for itself and others acting on its behalf
a nonexclusive, paid-up, irrevocable worldwide license in this
material to reproduce, prepare derivative works, distribute copies to
the public, perform publicly and display publicly, and to permit
others to do so.
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/overview.rst
|
overview.rst
|
Overview
========
`Scientific Computational Imaging Code (SCICO)
<https://github.com/lanl/scico>`__ is a Python package for solving the
inverse problems that arise in scientific imaging applications. Its
primary focus is providing methods for solving ill-posed inverse
problems by using an appropriate prior model of the reconstruction
space. SCICO includes a growing suite of operators, cost functionals,
regularizers, and optimization algorithms that may be combined to
solve a wide range of problems, and is designed so that it is easy to
add new building blocks. When solving a problem, these components are
combined in a way that makes code for optimization routines look like
the pseudocode in scientific papers. SCICO is built on top of `JAX
<https://jax.readthedocs.io/en/latest/>`__ rather than `NumPy
<https://numpy.org/>`__, enabling GPU/TPU acceleration, just-in-time
compilation, and automatic gradient functionality, which is used to
automatically compute the adjoints of linear operators. An example of
how to solve a multi-channel tomography problem with SCICO is shown in
the figure below.
.. image:: /figures/scico-tomo-overview.png
:align: center
:width: 95%
:alt: Solving a multi-channel tomography problem with SCICO.
|
The SCICO source code is available from `GitHub
<https://github.com/lanl/scico>`__, and pre-built packages are
available from `PyPI <https://github.com/lanl/scico>`__. (Detailed
instructions for installing SCICO are available in :ref:`installing`.)
It has extensive `online documentation <https://scico.rtfd.io/>`__,
including :doc:`API documentation <_autosummary/scico>` and
:ref:`usage examples <example_notebooks>`, which can be run online at
`Google Colab
<https://colab.research.google.com/github/lanl/scico-data/blob/colab/notebooks/index.ipynb>`__
and `binder
<https://mybinder.org/v2/gh/lanl/scico-data/binder?labpath=notebooks%2Findex.ipynb>`__.
If you use this library for published work, please cite
:cite:`balke-2022-scico` (see bibtex entry ``balke-2022-scico`` in
`docs/source/references.bib
<https://github.com/lanl/scico/blob/main/docs/source/references.bib>`_
in the source distribution).
Contributing
------------
Bug reports, feature requests, and general suggestions are welcome,
and should be submitted via the `GitHub issue system
<https://github.com/lanl/scico/issues>`__. More substantial
contributions are also :ref:`welcome <scico_dev_contributing>`.
License
-------
SCICO is distributed as open-source software under a BSD 3-Clause
License (see the `LICENSE
<https://github.com/lanl/scico/blob/master/LICENSE>`__ file for
details). LANL open source approval reference C20091.
© 2020-2023. Triad National Security, LLC. All rights reserved.
This program was produced under U.S. Government contract
89233218CNA000001 for Los Alamos National Laboratory (LANL), which is
operated by Triad National Security, LLC for the U.S. Department of
Energy/National Nuclear Security Administration. All rights in the
program are reserved by Triad National Security, LLC, and the
U.S. Department of Energy/National Nuclear Security Administration.
The Government has granted for itself and others acting on its behalf
a nonexclusive, paid-up, irrevocable worldwide license in this
material to reproduce, prepare derivative works, distribute copies to
the public, perform publicly and display publicly, and to permit
others to do so.
| 0.920101 | 0.799638 |
Developers
==========
Core Developers
---------------
- `Cristina Garcia Cardona <https://github.com/crstngc>`_
- `Michael McCann <https://github.com/Michael-T-McCann>`_
- `Brendt Wohlberg <https://github.com/bwohlberg>`_
Emeritus Developers
-------------------
- `Thilo Balke <https://github.com/tbalke>`_
- `Fernando Davis <https://github.com/FernandoDavis>`_
- `Soumendu Majee <https://github.com/smajee>`_
- `Luke Pfister <https://github.com/lukepfister>`_
Contributors
------------
- `Weijie Gan <https://github.com/wjgancn>`_ (Non-blind variant of DnCNN)
- `Oleg Korobkin <https://github.com/korobkin>`_ (BlockArray improvements)
- `Andrew Leong <https://scholar.google.com/citations?user=-2wRWbcAAAAJ&hl=en>`_ (Improvements to optics module documentation)
- `Saurav Maheshkar <https://github.com/SauravMaheshkar>`_ (Improvements to pre-commit configuration)
- `Yanpeng Yuan <https://github.com/yanpeng7>`_ (ASTRA interface improvements)
- `Li-Ta (Ollie) Lo <https://github.com/ollielo>`_ (ASTRA interface improvements)
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/team.rst
|
team.rst
|
Developers
==========
Core Developers
---------------
- `Cristina Garcia Cardona <https://github.com/crstngc>`_
- `Michael McCann <https://github.com/Michael-T-McCann>`_
- `Brendt Wohlberg <https://github.com/bwohlberg>`_
Emeritus Developers
-------------------
- `Thilo Balke <https://github.com/tbalke>`_
- `Fernando Davis <https://github.com/FernandoDavis>`_
- `Soumendu Majee <https://github.com/smajee>`_
- `Luke Pfister <https://github.com/lukepfister>`_
Contributors
------------
- `Weijie Gan <https://github.com/wjgancn>`_ (Non-blind variant of DnCNN)
- `Oleg Korobkin <https://github.com/korobkin>`_ (BlockArray improvements)
- `Andrew Leong <https://scholar.google.com/citations?user=-2wRWbcAAAAJ&hl=en>`_ (Improvements to optics module documentation)
- `Saurav Maheshkar <https://github.com/SauravMaheshkar>`_ (Improvements to pre-commit configuration)
- `Yanpeng Yuan <https://github.com/yanpeng7>`_ (ASTRA interface improvements)
- `Li-Ta (Ollie) Lo <https://github.com/ollielo>`_ (ASTRA interface improvements)
| 0.69181 | 0.436262 |
# Usage Examples
## Organized by Application
### Computed Tomography
> - [TV-Regularized Abel Inversion](ct_abel_tv_admm.ipynb)
> - [Parameter Tuning for TV-Regularized Abel
> Inversion](ct_abel_tv_admm_tune.ipynb)
> - [CT Reconstruction with CG and PCG](ct_astra_noreg_pcg.ipynb)
> - [3D TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_3d_tv_admm.ipynb)
> - [TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_tv_admm.ipynb)
> - [TV-Regularized Low-Dose CT
> Reconstruction](ct_astra_weighted_tv_admm.ipynb)
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem
> Solver)](ct_svmbir_ppp_bm3d_admm_cg.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR
> Prox)](ct_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Fan-Beam CT
> Reconstruction](ct_fan_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [CT Training and Reconstructions with
> MoDL](ct_astra_modl_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> ODP](ct_astra_odp_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> UNet](ct_astra_unet_train_foam2.ipynb)
### Deconvolution
> - [Circulant Blur Image Deconvolution with TV
> Regularization](deconv_circ_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (ADMM
> Solver)](deconv_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (Proximal ADMM
> Solver)](deconv_tv_padmm.ipynb)
> - [Parameter Tuning for Image Deconvolution with TV Regularization
> (ADMM Solver)](deconv_tv_admm_tune.ipynb)
> - [Deconvolution Microscopy (Single
> Channel)](deconv_microscopy_tv_admm.ipynb)
> - [Deconvolution Microscopy (All
> Channels)](deconv_microscopy_allchn_tv_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (ADMM
> Solver)](deconv_ppp_bm3d_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (APGM
> Solver)](deconv_ppp_bm3d_pgm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (ADMM
> Solver)](deconv_ppp_dncnn_admm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (Proximal ADMM
> Solver)](deconv_ppp_dncnn_padmm.ipynb)
> - [PPP (with BM4D) Volume Deconvolution](deconv_ppp_bm4d_admm.ipynb)
> - [Deconvolution Training and Reconstructions with
> MoDL](deconv_modl_train_foam1.ipynb)
> - [Deconvolution Training and Reconstructions with
> ODP](deconv_odp_train_foam1.ipynb)
### Sparse Coding
> - [Non-Negative Basis Pursuit DeNoising
> (ADMM)](sparsecode_admm.ipynb)
> - [Convolutional Sparse Coding (ADMM)](sparsecode_conv_admm.ipynb)
> - [Convolutional Sparse Coding with Mask Decoupling
> (ADMM)](sparsecode_conv_md_admm.ipynb)
> - [Basis Pursuit DeNoising (APGM)](sparsecode_pgm.ipynb)
> - [Non-negative Poisson Loss Reconstruction
> (APGM)](sparsecode_poisson_pgm.ipynb)
### Miscellaneous
> - [PPP (with BM3D) Image Demosaicing](demosaic_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image
> Superresolution](superres_ppp_dncnn_admm.ipynb)
> - [ℓ1 Total Variation Denoising](denoise_l1tv_admm.ipynb)
> - [Total Variation Denoising (ADMM)](denoise_tv_admm.ipynb)
> - [Total Variation Denoising with Constraint
> (APGM)](denoise_tv_pgm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
> - [Complex Total Variation Denoising with PDHG
> Solver](denoise_cplx_tv_pdhg.ipynb)
> - [Comparison of DnCNN Variants for Image
> Denoising](denoise_dncnn_universal.ipynb)
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
> - [Video Decomposition via Robust PCA](video_rpca_admm.ipynb)
> - [CT Data Generation for NN Training](ct_astra_datagen_foam2.ipynb)
> - [Blurred Data Generation (Natural Images) for NN
> Training](deconv_datagen_bsds.ipynb)
> - [Blurred Data Generation (Foams) for NN
> Training](deconv_datagen_foam1.ipynb)
> - [Noisy Data Generation for NN
> Training](denoise_datagen_bsds.ipynb)
## Organized by Regularization
### Plug and Play Priors
> - [PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem
> Solver)](ct_svmbir_ppp_bm3d_admm_cg.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR
> Prox)](ct_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Fan-Beam CT
> Reconstruction](ct_fan_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Image Deconvolution (ADMM
> Solver)](deconv_ppp_bm3d_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (APGM
> Solver)](deconv_ppp_bm3d_pgm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (ADMM
> Solver)](deconv_ppp_dncnn_admm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (Proximal ADMM
> Solver)](deconv_ppp_dncnn_padmm.ipynb)
> - [PPP (with BM4D) Volume Deconvolution](deconv_ppp_bm4d_admm.ipynb)
> - [PPP (with BM3D) Image Demosaicing](demosaic_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image
> Superresolution](superres_ppp_dncnn_admm.ipynb)
### Total Variation
> - [TV-Regularized Abel Inversion](ct_abel_tv_admm.ipynb)
> - [Parameter Tuning for TV-Regularized Abel
> Inversion](ct_abel_tv_admm_tune.ipynb)
> - [TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_tv_admm.ipynb)
> - [3D TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_3d_tv_admm.ipynb)
> - [TV-Regularized Low-Dose CT
> Reconstruction](ct_astra_weighted_tv_admm.ipynb)
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [Circulant Blur Image Deconvolution with TV
> Regularization](deconv_circ_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (ADMM
> Solver)](deconv_tv_admm.ipynb)
> - [Parameter Tuning for Image Deconvolution with TV Regularization
> (ADMM Solver)](deconv_tv_admm_tune.ipynb)
> - [Image Deconvolution with TV Regularization (Proximal ADMM
> Solver)](deconv_tv_padmm.ipynb)
> - [Deconvolution Microscopy (Single
> Channel)](deconv_microscopy_tv_admm.ipynb)
> - [Deconvolution Microscopy (All
> Channels)](deconv_microscopy_allchn_tv_admm.ipynb)
> - [ℓ1 Total Variation Denoising](denoise_l1tv_admm.ipynb)
> - [Total Variation Denoising (ADMM)](denoise_tv_admm.ipynb)
> - [Total Variation Denoising with Constraint
> (APGM)](denoise_tv_pgm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
> - [Complex Total Variation Denoising with PDHG
> Solver](denoise_cplx_tv_pdhg.ipynb)
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
### Sparsity
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
> - [Non-Negative Basis Pursuit DeNoising
> (ADMM)](sparsecode_admm.ipynb)
> - [Convolutional Sparse Coding (ADMM)](sparsecode_conv_admm.ipynb)
> - [Convolutional Sparse Coding with Mask Decoupling
> (ADMM)](sparsecode_conv_md_admm.ipynb)
> - [Basis Pursuit DeNoising (APGM)](sparsecode_pgm.ipynb)
> - [Non-negative Poisson Loss Reconstruction
> (APGM)](sparsecode_poisson_pgm.ipynb)
> - [Video Decomposition via Robust PCA](video_rpca_admm.ipynb)
### Machine Learning
> - [CT Data Generation for NN Training](ct_astra_datagen_foam2.ipynb)
> - [CT Training and Reconstructions with
> MoDL](ct_astra_modl_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> ODP](ct_astra_odp_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> UNet](ct_astra_unet_train_foam2.ipynb)
> - [Blurred Data Generation (Natural Images) for NN
> Training](deconv_datagen_bsds.ipynb)
> - [Blurred Data Generation (Foams) for NN
> Training](deconv_datagen_foam1.ipynb)
> - [Deconvolution Training and Reconstructions with
> MoDL](deconv_modl_train_foam1.ipynb)
> - [Deconvolution Training and Reconstructions with
> ODP](deconv_odp_train_foam1.ipynb)
> - [Noisy Data Generation for NN
> Training](denoise_datagen_bsds.ipynb)
> - [Training of DnCNN for Denoising](denoise_dncnn_train_bsds.ipynb)
> - [Comparison of DnCNN Variants for Image
> Denoising](denoise_dncnn_universal.ipynb)
## Organized by Optimization Algorithm
### ADMM
> - [TV-Regularized Abel Inversion](ct_abel_tv_admm.ipynb)
> - [Parameter Tuning for TV-Regularized Abel
> Inversion](ct_abel_tv_admm_tune.ipynb)
> - [TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_tv_admm.ipynb)
> - [3D TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_3d_tv_admm.ipynb)
> - [TV-Regularized Low-Dose CT
> Reconstruction](ct_astra_weighted_tv_admm.ipynb)
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem
> Solver)](ct_svmbir_ppp_bm3d_admm_cg.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR
> Prox)](ct_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Fan-Beam CT
> Reconstruction](ct_fan_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [Circulant Blur Image Deconvolution with TV
> Regularization](deconv_circ_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (ADMM
> Solver)](deconv_tv_admm.ipynb)
> - [Parameter Tuning for Image Deconvolution with TV Regularization
> (ADMM Solver)](deconv_tv_admm_tune.ipynb)
> - [Deconvolution Microscopy (Single
> Channel)](deconv_microscopy_tv_admm.ipynb)
> - [Deconvolution Microscopy (All
> Channels)](deconv_microscopy_allchn_tv_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (ADMM
> Solver)](deconv_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (ADMM
> Solver)](deconv_ppp_dncnn_admm.ipynb)
> - [PPP (with BM4D) Volume Deconvolution](deconv_ppp_bm4d_admm.ipynb)
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
> - [Non-Negative Basis Pursuit DeNoising
> (ADMM)](sparsecode_admm.ipynb)
> - [Convolutional Sparse Coding (ADMM)](sparsecode_conv_admm.ipynb)
> - [Convolutional Sparse Coding with Mask Decoupling
> (ADMM)](sparsecode_conv_md_admm.ipynb)
> - [PPP (with BM3D) Image Demosaicing](demosaic_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image
> Superresolution](superres_ppp_dncnn_admm.ipynb)
> - [ℓ1 Total Variation Denoising](denoise_l1tv_admm.ipynb)
> - [Total Variation Denoising (ADMM)](denoise_tv_admm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Video Decomposition via Robust PCA](video_rpca_admm.ipynb)
### Linearized ADMM
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
### Proximal ADMM
> - [Image Deconvolution with TV Regularization (Proximal ADMM
> Solver)](deconv_tv_padmm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (Proximal ADMM
> Solver)](deconv_ppp_dncnn_padmm.ipynb)
### Non-linear Proximal ADMM
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
### PDHG
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with PDHG
> Solver](denoise_cplx_tv_pdhg.ipynb)
### PGM
> - [PPP (with BM3D) Image Deconvolution (APGM
> Solver)](deconv_ppp_bm3d_pgm.ipynb)
> - [Basis Pursuit DeNoising (APGM)](sparsecode_pgm.ipynb)
> - [Non-negative Poisson Loss Reconstruction
> (APGM)](sparsecode_poisson_pgm.ipynb)
> - [Total Variation Denoising with Constraint
> (APGM)](denoise_tv_pgm.ipynb)
### PCG
> - [CT Reconstruction with CG and PCG](ct_astra_noreg_pcg.ipynb)
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/index.ipynb
|
index.ipynb
|
# Usage Examples
## Organized by Application
### Computed Tomography
> - [TV-Regularized Abel Inversion](ct_abel_tv_admm.ipynb)
> - [Parameter Tuning for TV-Regularized Abel
> Inversion](ct_abel_tv_admm_tune.ipynb)
> - [CT Reconstruction with CG and PCG](ct_astra_noreg_pcg.ipynb)
> - [3D TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_3d_tv_admm.ipynb)
> - [TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_tv_admm.ipynb)
> - [TV-Regularized Low-Dose CT
> Reconstruction](ct_astra_weighted_tv_admm.ipynb)
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem
> Solver)](ct_svmbir_ppp_bm3d_admm_cg.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR
> Prox)](ct_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Fan-Beam CT
> Reconstruction](ct_fan_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [CT Training and Reconstructions with
> MoDL](ct_astra_modl_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> ODP](ct_astra_odp_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> UNet](ct_astra_unet_train_foam2.ipynb)
### Deconvolution
> - [Circulant Blur Image Deconvolution with TV
> Regularization](deconv_circ_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (ADMM
> Solver)](deconv_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (Proximal ADMM
> Solver)](deconv_tv_padmm.ipynb)
> - [Parameter Tuning for Image Deconvolution with TV Regularization
> (ADMM Solver)](deconv_tv_admm_tune.ipynb)
> - [Deconvolution Microscopy (Single
> Channel)](deconv_microscopy_tv_admm.ipynb)
> - [Deconvolution Microscopy (All
> Channels)](deconv_microscopy_allchn_tv_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (ADMM
> Solver)](deconv_ppp_bm3d_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (APGM
> Solver)](deconv_ppp_bm3d_pgm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (ADMM
> Solver)](deconv_ppp_dncnn_admm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (Proximal ADMM
> Solver)](deconv_ppp_dncnn_padmm.ipynb)
> - [PPP (with BM4D) Volume Deconvolution](deconv_ppp_bm4d_admm.ipynb)
> - [Deconvolution Training and Reconstructions with
> MoDL](deconv_modl_train_foam1.ipynb)
> - [Deconvolution Training and Reconstructions with
> ODP](deconv_odp_train_foam1.ipynb)
### Sparse Coding
> - [Non-Negative Basis Pursuit DeNoising
> (ADMM)](sparsecode_admm.ipynb)
> - [Convolutional Sparse Coding (ADMM)](sparsecode_conv_admm.ipynb)
> - [Convolutional Sparse Coding with Mask Decoupling
> (ADMM)](sparsecode_conv_md_admm.ipynb)
> - [Basis Pursuit DeNoising (APGM)](sparsecode_pgm.ipynb)
> - [Non-negative Poisson Loss Reconstruction
> (APGM)](sparsecode_poisson_pgm.ipynb)
### Miscellaneous
> - [PPP (with BM3D) Image Demosaicing](demosaic_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image
> Superresolution](superres_ppp_dncnn_admm.ipynb)
> - [ℓ1 Total Variation Denoising](denoise_l1tv_admm.ipynb)
> - [Total Variation Denoising (ADMM)](denoise_tv_admm.ipynb)
> - [Total Variation Denoising with Constraint
> (APGM)](denoise_tv_pgm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
> - [Complex Total Variation Denoising with PDHG
> Solver](denoise_cplx_tv_pdhg.ipynb)
> - [Comparison of DnCNN Variants for Image
> Denoising](denoise_dncnn_universal.ipynb)
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
> - [Video Decomposition via Robust PCA](video_rpca_admm.ipynb)
> - [CT Data Generation for NN Training](ct_astra_datagen_foam2.ipynb)
> - [Blurred Data Generation (Natural Images) for NN
> Training](deconv_datagen_bsds.ipynb)
> - [Blurred Data Generation (Foams) for NN
> Training](deconv_datagen_foam1.ipynb)
> - [Noisy Data Generation for NN
> Training](denoise_datagen_bsds.ipynb)
## Organized by Regularization
### Plug and Play Priors
> - [PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem
> Solver)](ct_svmbir_ppp_bm3d_admm_cg.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR
> Prox)](ct_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Fan-Beam CT
> Reconstruction](ct_fan_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Image Deconvolution (ADMM
> Solver)](deconv_ppp_bm3d_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (APGM
> Solver)](deconv_ppp_bm3d_pgm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (ADMM
> Solver)](deconv_ppp_dncnn_admm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (Proximal ADMM
> Solver)](deconv_ppp_dncnn_padmm.ipynb)
> - [PPP (with BM4D) Volume Deconvolution](deconv_ppp_bm4d_admm.ipynb)
> - [PPP (with BM3D) Image Demosaicing](demosaic_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image
> Superresolution](superres_ppp_dncnn_admm.ipynb)
### Total Variation
> - [TV-Regularized Abel Inversion](ct_abel_tv_admm.ipynb)
> - [Parameter Tuning for TV-Regularized Abel
> Inversion](ct_abel_tv_admm_tune.ipynb)
> - [TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_tv_admm.ipynb)
> - [3D TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_3d_tv_admm.ipynb)
> - [TV-Regularized Low-Dose CT
> Reconstruction](ct_astra_weighted_tv_admm.ipynb)
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [Circulant Blur Image Deconvolution with TV
> Regularization](deconv_circ_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (ADMM
> Solver)](deconv_tv_admm.ipynb)
> - [Parameter Tuning for Image Deconvolution with TV Regularization
> (ADMM Solver)](deconv_tv_admm_tune.ipynb)
> - [Image Deconvolution with TV Regularization (Proximal ADMM
> Solver)](deconv_tv_padmm.ipynb)
> - [Deconvolution Microscopy (Single
> Channel)](deconv_microscopy_tv_admm.ipynb)
> - [Deconvolution Microscopy (All
> Channels)](deconv_microscopy_allchn_tv_admm.ipynb)
> - [ℓ1 Total Variation Denoising](denoise_l1tv_admm.ipynb)
> - [Total Variation Denoising (ADMM)](denoise_tv_admm.ipynb)
> - [Total Variation Denoising with Constraint
> (APGM)](denoise_tv_pgm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
> - [Complex Total Variation Denoising with PDHG
> Solver](denoise_cplx_tv_pdhg.ipynb)
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
### Sparsity
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
> - [Non-Negative Basis Pursuit DeNoising
> (ADMM)](sparsecode_admm.ipynb)
> - [Convolutional Sparse Coding (ADMM)](sparsecode_conv_admm.ipynb)
> - [Convolutional Sparse Coding with Mask Decoupling
> (ADMM)](sparsecode_conv_md_admm.ipynb)
> - [Basis Pursuit DeNoising (APGM)](sparsecode_pgm.ipynb)
> - [Non-negative Poisson Loss Reconstruction
> (APGM)](sparsecode_poisson_pgm.ipynb)
> - [Video Decomposition via Robust PCA](video_rpca_admm.ipynb)
### Machine Learning
> - [CT Data Generation for NN Training](ct_astra_datagen_foam2.ipynb)
> - [CT Training and Reconstructions with
> MoDL](ct_astra_modl_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> ODP](ct_astra_odp_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> UNet](ct_astra_unet_train_foam2.ipynb)
> - [Blurred Data Generation (Natural Images) for NN
> Training](deconv_datagen_bsds.ipynb)
> - [Blurred Data Generation (Foams) for NN
> Training](deconv_datagen_foam1.ipynb)
> - [Deconvolution Training and Reconstructions with
> MoDL](deconv_modl_train_foam1.ipynb)
> - [Deconvolution Training and Reconstructions with
> ODP](deconv_odp_train_foam1.ipynb)
> - [Noisy Data Generation for NN
> Training](denoise_datagen_bsds.ipynb)
> - [Training of DnCNN for Denoising](denoise_dncnn_train_bsds.ipynb)
> - [Comparison of DnCNN Variants for Image
> Denoising](denoise_dncnn_universal.ipynb)
## Organized by Optimization Algorithm
### ADMM
> - [TV-Regularized Abel Inversion](ct_abel_tv_admm.ipynb)
> - [Parameter Tuning for TV-Regularized Abel
> Inversion](ct_abel_tv_admm_tune.ipynb)
> - [TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_tv_admm.ipynb)
> - [3D TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_3d_tv_admm.ipynb)
> - [TV-Regularized Low-Dose CT
> Reconstruction](ct_astra_weighted_tv_admm.ipynb)
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem
> Solver)](ct_svmbir_ppp_bm3d_admm_cg.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR
> Prox)](ct_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Fan-Beam CT
> Reconstruction](ct_fan_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [Circulant Blur Image Deconvolution with TV
> Regularization](deconv_circ_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (ADMM
> Solver)](deconv_tv_admm.ipynb)
> - [Parameter Tuning for Image Deconvolution with TV Regularization
> (ADMM Solver)](deconv_tv_admm_tune.ipynb)
> - [Deconvolution Microscopy (Single
> Channel)](deconv_microscopy_tv_admm.ipynb)
> - [Deconvolution Microscopy (All
> Channels)](deconv_microscopy_allchn_tv_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (ADMM
> Solver)](deconv_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (ADMM
> Solver)](deconv_ppp_dncnn_admm.ipynb)
> - [PPP (with BM4D) Volume Deconvolution](deconv_ppp_bm4d_admm.ipynb)
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
> - [Non-Negative Basis Pursuit DeNoising
> (ADMM)](sparsecode_admm.ipynb)
> - [Convolutional Sparse Coding (ADMM)](sparsecode_conv_admm.ipynb)
> - [Convolutional Sparse Coding with Mask Decoupling
> (ADMM)](sparsecode_conv_md_admm.ipynb)
> - [PPP (with BM3D) Image Demosaicing](demosaic_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image
> Superresolution](superres_ppp_dncnn_admm.ipynb)
> - [ℓ1 Total Variation Denoising](denoise_l1tv_admm.ipynb)
> - [Total Variation Denoising (ADMM)](denoise_tv_admm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Video Decomposition via Robust PCA](video_rpca_admm.ipynb)
### Linearized ADMM
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
### Proximal ADMM
> - [Image Deconvolution with TV Regularization (Proximal ADMM
> Solver)](deconv_tv_padmm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (Proximal ADMM
> Solver)](deconv_ppp_dncnn_padmm.ipynb)
### Non-linear Proximal ADMM
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
### PDHG
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with PDHG
> Solver](denoise_cplx_tv_pdhg.ipynb)
### PGM
> - [PPP (with BM3D) Image Deconvolution (APGM
> Solver)](deconv_ppp_bm3d_pgm.ipynb)
> - [Basis Pursuit DeNoising (APGM)](sparsecode_pgm.ipynb)
> - [Non-negative Poisson Loss Reconstruction
> (APGM)](sparsecode_poisson_pgm.ipynb)
> - [Total Variation Denoising with Constraint
> (APGM)](denoise_tv_pgm.ipynb)
### PCG
> - [CT Reconstruction with CG and PCG](ct_astra_noreg_pcg.ipynb)
| 0.708818 | 0.633694 |
Noisy Data Generation for NN Training
=====================================
This example demonstrates how to generate noisy image data for
training neural network models for denoising. The original images are
part of the
[BSDS500 dataset](http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/)
provided by the Berkeley Segmentation Dataset and Benchmark project.
```
import numpy as np
from scico import plot
from scico.flax.examples import load_image_data
plot.config_notebook_plotting()
```
Read data from cache or generate if not available.
```
size = 40 # patch size
train_nimg = 400 # number of training images
test_nimg = 64 # number of testing images
nimg = train_nimg + test_nimg
gray = True # use gray scale images
data_mode = "dn" # Denoising problem
noise_level = 0.1 # Standard deviation of noise
noise_range = False # Use fixed noise level
stride = 23 # Stride to sample multiple patches from each image
train_ds, test_ds = load_image_data(
train_nimg,
test_nimg,
size,
gray,
data_mode,
verbose=True,
noise_level=noise_level,
noise_range=noise_range,
stride=stride,
)
```
Plot randomly selected sample. Note that patches have small sizes, thus,
plots may correspond to unidentifiable fragments.
```
indx_tr = np.random.randint(0, train_nimg)
indx_te = np.random.randint(0, test_nimg)
fig, axes = plot.subplots(nrows=2, ncols=2, figsize=(7, 7))
plot.imview(
train_ds["label"][indx_tr, ..., 0],
title="Ground truth - Training Sample",
fig=fig,
ax=axes[0, 0],
)
plot.imview(
train_ds["image"][indx_tr, ..., 0],
title="Noisy Image - Training Sample",
fig=fig,
ax=axes[0, 1],
)
plot.imview(
test_ds["label"][indx_te, ..., 0],
title="Ground truth - Testing Sample",
fig=fig,
ax=axes[1, 0],
)
plot.imview(
test_ds["image"][indx_te, ..., 0], title="Noisy Image - Testing Sample", fig=fig, ax=axes[1, 1]
)
fig.suptitle(r"Training and Testing samples")
fig.tight_layout()
fig.colorbar(
axes[0, 1].get_images()[0],
ax=axes,
shrink=0.5,
pad=0.05,
)
fig.show()
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/denoise_datagen_bsds.ipynb
|
denoise_datagen_bsds.ipynb
|
import numpy as np
from scico import plot
from scico.flax.examples import load_image_data
plot.config_notebook_plotting()
size = 40 # patch size
train_nimg = 400 # number of training images
test_nimg = 64 # number of testing images
nimg = train_nimg + test_nimg
gray = True # use gray scale images
data_mode = "dn" # Denoising problem
noise_level = 0.1 # Standard deviation of noise
noise_range = False # Use fixed noise level
stride = 23 # Stride to sample multiple patches from each image
train_ds, test_ds = load_image_data(
train_nimg,
test_nimg,
size,
gray,
data_mode,
verbose=True,
noise_level=noise_level,
noise_range=noise_range,
stride=stride,
)
indx_tr = np.random.randint(0, train_nimg)
indx_te = np.random.randint(0, test_nimg)
fig, axes = plot.subplots(nrows=2, ncols=2, figsize=(7, 7))
plot.imview(
train_ds["label"][indx_tr, ..., 0],
title="Ground truth - Training Sample",
fig=fig,
ax=axes[0, 0],
)
plot.imview(
train_ds["image"][indx_tr, ..., 0],
title="Noisy Image - Training Sample",
fig=fig,
ax=axes[0, 1],
)
plot.imview(
test_ds["label"][indx_te, ..., 0],
title="Ground truth - Testing Sample",
fig=fig,
ax=axes[1, 0],
)
plot.imview(
test_ds["image"][indx_te, ..., 0], title="Noisy Image - Testing Sample", fig=fig, ax=axes[1, 1]
)
fig.suptitle(r"Training and Testing samples")
fig.tight_layout()
fig.colorbar(
axes[0, 1].get_images()[0],
ax=axes,
shrink=0.5,
pad=0.05,
)
fig.show()
| 0.696268 | 0.97066 |
Non-Negative Basis Pursuit DeNoising (ADMM)
===========================================
This example demonstrates the solution of a non-negative sparse coding
problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - D \mathbf{x} \|_2^2
+ \lambda \| \mathbf{x} \|_1 + I(\mathbf{x} \geq 0) \;,$$
where $D$ the dictionary, $\mathbf{y}$ the signal to be represented,
$\mathbf{x}$ is the sparse representation, and $I(\mathbf{x} \geq 0)$
is the non-negative indicator.
```
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.admm import ADMM, MatrixSubproblemSolver
from scico.util import device_info
plot.config_notebook_plotting()
```
Create random dictionary, reference random sparse representation, and
test signal consisting of the synthesis of the reference sparse
representation.
```
m = 32 # signal size
n = 128 # dictionary size
s = 10 # sparsity level
np.random.seed(1)
D = np.random.randn(m, n)
D = D / np.linalg.norm(D, axis=0, keepdims=True) # normalize dictionary
xt = np.zeros(n) # true signal
idx = np.random.randint(low=0, high=n, size=s) # support of xt
xt[idx] = np.random.rand(s)
y = D @ xt + 5e-2 * np.random.randn(m) # synthetic signal
xt = jax.device_put(xt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
```
Set up the forward operator and ADMM solver object.
```
lmbda = 1e-1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g_list = [lmbda * functional.L1Norm(), functional.NonNegativeIndicator()]
C_list = [linop.Identity((n)), linop.Identity((n))]
rho_list = [1.0, 1.0]
maxiter = 100 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=g_list,
C_list=C_list,
rho_list=rho_list,
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=MatrixSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
x = solver.solve()
```
Plot the recovered coefficients and signal.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((xt, solver.x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((D @ xt, y, D @ solver.x)).T,
title="Signal",
lgnd=("Ground Truth", "Noisy", "Recovered"),
fig=fig,
ax=ax[1],
)
fig.show()
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/sparsecode_admm.ipynb
|
sparsecode_admm.ipynb
|
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.admm import ADMM, MatrixSubproblemSolver
from scico.util import device_info
plot.config_notebook_plotting()
m = 32 # signal size
n = 128 # dictionary size
s = 10 # sparsity level
np.random.seed(1)
D = np.random.randn(m, n)
D = D / np.linalg.norm(D, axis=0, keepdims=True) # normalize dictionary
xt = np.zeros(n) # true signal
idx = np.random.randint(low=0, high=n, size=s) # support of xt
xt[idx] = np.random.rand(s)
y = D @ xt + 5e-2 * np.random.randn(m) # synthetic signal
xt = jax.device_put(xt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
lmbda = 1e-1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g_list = [lmbda * functional.L1Norm(), functional.NonNegativeIndicator()]
C_list = [linop.Identity((n)), linop.Identity((n))]
rho_list = [1.0, 1.0]
maxiter = 100 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=g_list,
C_list=C_list,
rho_list=rho_list,
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=MatrixSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x = solver.solve()
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((xt, solver.x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((D @ xt, y, D @ solver.x)).T,
title="Signal",
lgnd=("Ground Truth", "Noisy", "Recovered"),
fig=fig,
ax=ax[1],
)
fig.show()
| 0.626467 | 0.944944 |
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
=============================================================================
This example demonstrates the use of
[scico.ray.tune](../_autosummary/scico.ray.tune.rst) to tune parameters
for the companion [example script](deconv_tv_admm.rst). The `ray.tune`
function API is used in this example.
This script is hard-coded to run on CPU only to avoid the large number of
warnings that are emitted when GPU resources are requested but not available,
and due to the difficulty of supressing these warnings in a way that does
not force use of the CPU only. To enable GPU usage, comment out the
`os.environ` statements near the beginning of the script, and change the
value of the "gpu" entry in the `resources` dict from 0 to 1. Note that
two environment variables are set to suppress the warnings because
`JAX_PLATFORMS` was intended to replace `JAX_PLATFORM_NAME` but this change
has yet to be correctly implemented
(see [google/jax#6805](https://github.com/google/jax/issues/6805) and
[google/jax#10272](https://github.com/google/jax/pull/10272)).
```
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import report, tune
plot.config_notebook_plotting()
```
Create a ground truth image.
```
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
```
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
```
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = scico.random.randn(Ax.shape, seed=0)
y = Ax + σ * noise
```
Define performance evaluation function.
```
def eval_params(config, x_gt, psf, y):
"""Parameter evaluation function. The `config` parameter is a
dict of specific parameters for evaluation of a single parameter
set (a pair of parameters in this case). The remaining parameters
are objects that are passed to the evaluation function via the
ray object store.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Put main arrays on jax device.
x_gt, psf, y = jax.device_put([x_gt, psf, y])
# Set up problem to be solved.
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L21Norm()
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
# Define solver.
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.adj(y),
maxiter=10,
subproblem_solver=LinearSubproblemSolver(),
)
# Perform 50 iterations, reporting performance to ray.tune every 10 iterations.
for step in range(5):
x_admm = solver.solve()
report({"psnr": float(metric.psnr(x_gt, x_admm))})
```
Define parameter search space and resources per trial.
```
config = {"lambda": tune.loguniform(1e-3, 1e-1), "rho": tune.loguniform(1e-2, 1e0)}
resources = {"cpu": 4, "gpu": 0} # cpus per trial, gpus per trial
```
Run parameter search.
```
tuner = tune.Tuner(
tune.with_parameters(eval_params, x_gt=x_gt, psf=psf, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
)
results = tuner.fit()
```
Display best parameters and corresponding performance.
```
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
```
Plot parameter values visited during parameter search. Marker sizes are
proportional to number of iterations run at each parameter pair. The best
point in the parameter space is indicated in red.
```
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
```
Plot parameter values visited during parameter search and corresponding
reconstruction PSNRs.The best point in the parameter space is indicated
in red.
```
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 18.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 18 dB omitted)")
fig.show()
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/deconv_tv_admm_tune.ipynb
|
deconv_tv_admm_tune.ipynb
|
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import report, tune
plot.config_notebook_plotting()
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = scico.random.randn(Ax.shape, seed=0)
y = Ax + σ * noise
def eval_params(config, x_gt, psf, y):
"""Parameter evaluation function. The `config` parameter is a
dict of specific parameters for evaluation of a single parameter
set (a pair of parameters in this case). The remaining parameters
are objects that are passed to the evaluation function via the
ray object store.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Put main arrays on jax device.
x_gt, psf, y = jax.device_put([x_gt, psf, y])
# Set up problem to be solved.
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L21Norm()
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
# Define solver.
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.adj(y),
maxiter=10,
subproblem_solver=LinearSubproblemSolver(),
)
# Perform 50 iterations, reporting performance to ray.tune every 10 iterations.
for step in range(5):
x_admm = solver.solve()
report({"psnr": float(metric.psnr(x_gt, x_admm))})
config = {"lambda": tune.loguniform(1e-3, 1e-1), "rho": tune.loguniform(1e-2, 1e0)}
resources = {"cpu": 4, "gpu": 0} # cpus per trial, gpus per trial
tuner = tune.Tuner(
tune.with_parameters(eval_params, x_gt=x_gt, psf=psf, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
)
results = tuner.fit()
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 18.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 18 dB omitted)")
fig.show()
| 0.734786 | 0.953579 |
PPP (with BM4D) Volume Deconvolution
====================================
This example demonstrates the solution of a 3D image deconvolution problem
(involving recovering a 3D volume that has been convolved with a 3D kernel
and corrupted by noise) using the ADMM Plug-and-Play Priors (PPP)
algorithm <cite data-cite="venkatakrishnan-2013-plugandplay2"/>, with the BM4D
<cite data-cite="maggioni-2012-nonlocal"/> denoiser.
```
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.examples import create_3d_foam_phantom, downsample_volume, tile_volume_slices
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
plot.config_notebook_plotting()
```
Create a ground truth image.
```
np.random.seed(1234)
N = 128 # phantom size
Nx, Ny, Nz = N, N, N // 4
upsamp = 2
x_gt_hires = create_3d_foam_phantom((upsamp * Nz, upsamp * Ny, upsamp * Nx), N_sphere=100)
x_gt = downsample_volume(x_gt_hires, upsamp)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
```
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
```
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n, n)) / (n**3)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
```
Set up ADMM solver.
```
f = loss.SquaredL2Loss(y=y, A=A)
C = linop.Identity(x_gt.shape)
λ = 40.0 / 255 # BM4D regularization strength
g = λ * functional.BM4D()
ρ = 1.0 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
```
Show slices of the recovered 3D volume.
```
show_id = Nz // 2
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(tile_volume_slices(x_gt), title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc, nc:-nc]
yc = snp.clip(yc, 0, 1)
plot.imview(
tile_volume_slices(yc),
title="Slices of blurred, noisy volume: %.2f (dB)" % metric.psnr(x_gt, yc),
fig=fig,
ax=ax[1],
)
plot.imview(
tile_volume_slices(x),
title="Slices of deconvolved volume: %.2f (dB)" % metric.psnr(x_gt, x),
fig=fig,
ax=ax[2],
)
fig.show()
```
Plot convergence statistics.
```
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/deconv_ppp_bm4d_admm.ipynb
|
deconv_ppp_bm4d_admm.ipynb
|
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.examples import create_3d_foam_phantom, downsample_volume, tile_volume_slices
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
plot.config_notebook_plotting()
np.random.seed(1234)
N = 128 # phantom size
Nx, Ny, Nz = N, N, N // 4
upsamp = 2
x_gt_hires = create_3d_foam_phantom((upsamp * Nz, upsamp * Ny, upsamp * Nx), N_sphere=100)
x_gt = downsample_volume(x_gt_hires, upsamp)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n, n)) / (n**3)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
f = loss.SquaredL2Loss(y=y, A=A)
C = linop.Identity(x_gt.shape)
λ = 40.0 / 255 # BM4D regularization strength
g = λ * functional.BM4D()
ρ = 1.0 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
show_id = Nz // 2
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(tile_volume_slices(x_gt), title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc, nc:-nc]
yc = snp.clip(yc, 0, 1)
plot.imview(
tile_volume_slices(yc),
title="Slices of blurred, noisy volume: %.2f (dB)" % metric.psnr(x_gt, yc),
fig=fig,
ax=ax[1],
)
plot.imview(
tile_volume_slices(x),
title="Slices of deconvolved volume: %.2f (dB)" % metric.psnr(x_gt, x),
fig=fig,
ax=ax[2],
)
fig.show()
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
| 0.723016 | 0.965996 |
Parameter Tuning for TV-Regularized Abel Inversion
==================================================
This example demonstrates the use of
[scico.ray.tune](../_autosummary/scico.ray.tune.rst) to tune
parameters for the companion [example script](ct_abel_tv_admm.rst). The
`ray.tune` class API is used in this example.
This script is hard-coded to run on CPU only to avoid the large number of
warnings that are emitted when GPU resources are requested but not available,
and due to the difficulty of supressing these warnings in a way that does
not force use of the CPU only. To enable GPU usage, comment out the
`os.environ` statements near the beginning of the script, and change the
value of the "gpu" entry in the `resources` dict from 0 to 1. Note that
two environment variables are set to suppress the warnings because
`JAX_PLATFORMS` was intended to replace `JAX_PLATFORM_NAME` but this change
has yet to be correctly implemented
(see [google/jax#6805](https://github.com/google/jax/issues/6805) and
[google/jax#10272](https://github.com/google/jax/pull/10272).
```
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import tune
plot.config_notebook_plotting()
```
Create a ground truth image.
```
N = 256 # image size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
```
Set up the forward operator and create a test measurement.
```
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
```
Compute inverse Abel transform solution for use as initial solution.
```
x_inv = A.inverse(y)
x0 = snp.clip(x_inv, 0.0, 1.0)
```
Define performance evaluation class.
```
class Trainable(tune.Trainable):
"""Parameter evaluation class."""
def setup(self, config, x_gt, x0, y):
"""This method initializes a new parameter evaluation object. It
is called once when a new parameter evaluation object is created.
The `config` parameter is a dict of specific parameters for
evaluation of a single parameter set (a pair of parameters in
this case). The remaining parameters are objects that are passed
to the evaluation function via the ray object store.
"""
# Put main arrays on jax device.
self.x_gt, self.x0, self.y = jax.device_put([x_gt, x0, y])
# Set up problem to be solved.
self.A = AbelProjector(self.x_gt.shape)
self.f = loss.SquaredL2Loss(y=self.y, A=self.A)
self.C = linop.FiniteDifference(input_shape=self.x_gt.shape)
self.reset_config(config)
def reset_config(self, config):
"""This method is only required when `scico.ray.tune.Tuner` is
initialized with `reuse_actors` set to ``True`` (the default). In
this case, a set of parameter evaluation processes and
corresponding objects are created once (including initialization
via a call to the `setup` method), and this method is called when
switching to evaluation of a different parameter configuration.
If `reuse_actors` is set to ``False``, then a new process and
object are created for each parameter configuration, and this
method is not used.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Set up parameter-dependent functional.
g = λ * functional.L1Norm()
# Define solver.
cg_tol = 1e-4
cg_maxiter = 25
self.solver = ADMM(
f=self.f,
g_list=[g],
C_list=[self.C],
rho_list=[ρ],
x0=self.x0,
maxiter=10,
subproblem_solver=LinearSubproblemSolver(
cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}
),
)
return True
def step(self):
"""This method is called for each step in the evaluation of a
single parameter configuration. The maximum number of times it
can be called is controlled by the `num_iterations` parameter
in the initialization of a `scico.ray.tune.Tuner` object.
"""
# Perform 10 solver steps for every ray.tune step
x_tv = snp.clip(self.solver.solve(), 0.0, 1.0)
return {"psnr": float(metric.psnr(self.x_gt, x_tv))}
```
Define parameter search space and resources per trial.
```
config = {"lambda": tune.loguniform(1e0, 1e2), "rho": tune.loguniform(1e1, 1e3)}
resources = {"gpu": 0, "cpu": 1} # gpus per trial, cpus per trial
```
Run parameter search.
```
tuner = tune.Tuner(
tune.with_parameters(Trainable, x_gt=x_gt, x0=x0, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
num_iterations=10, # perform at most 10 steps for each parameter evaluation
)
results = tuner.fit()
```
Display best parameters and corresponding performance.
```
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
```
Plot parameter values visited during parameter search. Marker sizes are
proportional to number of iterations run at each parameter pair. The best
point in the parameter space is indicated in red.
```
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
```
Plot parameter values visited during parameter search and corresponding
reconstruction PSNRs.The best point in the parameter space is indicated
in red.
```
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 20.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 20 dB omitted)")
fig.show()
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/ct_abel_tv_admm_tune.ipynb
|
ct_abel_tv_admm_tune.ipynb
|
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import tune
plot.config_notebook_plotting()
N = 256 # image size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
x_inv = A.inverse(y)
x0 = snp.clip(x_inv, 0.0, 1.0)
class Trainable(tune.Trainable):
"""Parameter evaluation class."""
def setup(self, config, x_gt, x0, y):
"""This method initializes a new parameter evaluation object. It
is called once when a new parameter evaluation object is created.
The `config` parameter is a dict of specific parameters for
evaluation of a single parameter set (a pair of parameters in
this case). The remaining parameters are objects that are passed
to the evaluation function via the ray object store.
"""
# Put main arrays on jax device.
self.x_gt, self.x0, self.y = jax.device_put([x_gt, x0, y])
# Set up problem to be solved.
self.A = AbelProjector(self.x_gt.shape)
self.f = loss.SquaredL2Loss(y=self.y, A=self.A)
self.C = linop.FiniteDifference(input_shape=self.x_gt.shape)
self.reset_config(config)
def reset_config(self, config):
"""This method is only required when `scico.ray.tune.Tuner` is
initialized with `reuse_actors` set to ``True`` (the default). In
this case, a set of parameter evaluation processes and
corresponding objects are created once (including initialization
via a call to the `setup` method), and this method is called when
switching to evaluation of a different parameter configuration.
If `reuse_actors` is set to ``False``, then a new process and
object are created for each parameter configuration, and this
method is not used.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Set up parameter-dependent functional.
g = λ * functional.L1Norm()
# Define solver.
cg_tol = 1e-4
cg_maxiter = 25
self.solver = ADMM(
f=self.f,
g_list=[g],
C_list=[self.C],
rho_list=[ρ],
x0=self.x0,
maxiter=10,
subproblem_solver=LinearSubproblemSolver(
cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}
),
)
return True
def step(self):
"""This method is called for each step in the evaluation of a
single parameter configuration. The maximum number of times it
can be called is controlled by the `num_iterations` parameter
in the initialization of a `scico.ray.tune.Tuner` object.
"""
# Perform 10 solver steps for every ray.tune step
x_tv = snp.clip(self.solver.solve(), 0.0, 1.0)
return {"psnr": float(metric.psnr(self.x_gt, x_tv))}
config = {"lambda": tune.loguniform(1e0, 1e2), "rho": tune.loguniform(1e1, 1e3)}
resources = {"gpu": 0, "cpu": 1} # gpus per trial, cpus per trial
tuner = tune.Tuner(
tune.with_parameters(Trainable, x_gt=x_gt, x0=x0, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
num_iterations=10, # perform at most 10 steps for each parameter evaluation
)
results = tuner.fit()
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 20.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 20 dB omitted)")
fig.show()
| 0.720467 | 0.944842 |
ℓ1 Total Variation Denoising
============================
This example demonstrates impulse noise removal via ℓ1 total variation
<cite data-cite="alliney-1992-digital"/> <cite data-cite="esser-2010-primal"/> (Sec. 2.4.4)
(i.e. total variation regularization with an ℓ1 data fidelity term),
minimizing the functional
$$\mathrm{argmin}_{\mathbf{x}} \; \| \mathbf{y} - \mathbf{x}
\|_1 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $\mathbf{y}$ is the noisy image, $C$ is a 2D finite difference
operator, and $\mathbf{x}$ is the denoised image.
```
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import spnoise
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
from scipy.ndimage import median_filter
plot.config_notebook_plotting()
```
Create a ground truth image and impose salt & pepper noise to create a
noisy test image.
```
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = 0.5 * x_gt / x_gt.max()
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
y = spnoise(x_gt, 0.5)
```
Denoise with median filtering.
```
x_med = median_filter(y, size=(5, 5))
```
Denoise with ℓ1 total variation.
```
λ = 1.5e0
g_loss = loss.Loss(y=y, f=functional.L1Norm())
g_tv = λ * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
solver = ADMM(
f=None,
g_list=[g_loss, g_tv],
C_list=[linop.Identity(input_shape=y.shape), C],
rho_list=[5e0, 5e0],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_tv = solver.solve()
hist = solver.itstat_object.history(transpose=True)
```
Plot results.
```
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.0))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(13, 12))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy image", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(
x_med,
title=f"Median filtering: {metric.psnr(x_gt, x_med):.2f} (dB)",
fig=fig,
ax=ax[1, 0],
**plt_args,
)
plot.imview(
x_tv,
title=f"ℓ1-TV denoising: {metric.psnr(x_gt, x_tv):.2f} (dB)",
fig=fig,
ax=ax[1, 1],
**plt_args,
)
fig.show()
```
Plot convergence statistics.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/denoise_l1tv_admm.ipynb
|
denoise_l1tv_admm.ipynb
|
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import spnoise
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
from scipy.ndimage import median_filter
plot.config_notebook_plotting()
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = 0.5 * x_gt / x_gt.max()
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
y = spnoise(x_gt, 0.5)
x_med = median_filter(y, size=(5, 5))
λ = 1.5e0
g_loss = loss.Loss(y=y, f=functional.L1Norm())
g_tv = λ * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
solver = ADMM(
f=None,
g_list=[g_loss, g_tv],
C_list=[linop.Identity(input_shape=y.shape), C],
rho_list=[5e0, 5e0],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_tv = solver.solve()
hist = solver.itstat_object.history(transpose=True)
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.0))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(13, 12))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy image", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(
x_med,
title=f"Median filtering: {metric.psnr(x_gt, x_med):.2f} (dB)",
fig=fig,
ax=ax[1, 0],
**plt_args,
)
plot.imview(
x_tv,
title=f"ℓ1-TV denoising: {metric.psnr(x_gt, x_tv):.2f} (dB)",
fig=fig,
ax=ax[1, 1],
**plt_args,
)
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
| 0.806662 | 0.96856 |
3D TV-Regularized Sparse-View CT Reconstruction
===============================================
This example demonstrates solution of a sparse-view, 3D CT
reconstruction problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, $C$ is
a 3D finite difference operator, and $\mathbf{x}$ is the desired
image.
```
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import functional, linop, loss, metric, plot
from scico.examples import create_tangle_phantom
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
plot.config_notebook_plotting()
```
Create a ground truth image and projector.
```
Nx = 128
Ny = 256
Nz = 64
tangle = create_tangle_phantom(Nx, Ny, Nz)
tangle = jax.device_put(tangle)
n_projection = 10 # number of projections
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(
tangle.shape, [1.0, 1.0], [Nz, max(Nx, Ny)], angles
) # Radon transform operator
y = A @ tangle # sinogram
```
Set up ADMM solver object.
```
λ = 2e0 # L1 norm regularization parameter
ρ = 5e0 # ADMM penalty parameter
maxiter = 25 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
C = linop.FiniteDifference(input_shape=tangle.shape, append=0)
g = λ * functional.L21Norm()
f = loss.SquaredL2Loss(y=y, A=A)
x0 = A.T(y)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
tangle_recon = solver.x
print(
"TV Restruction\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon))
)
```
Show the recovered image.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(7, 5))
plot.imview(tangle[32], title="Ground truth (central slice)", cbar=None, fig=fig, ax=ax[0])
plot.imview(
tangle_recon[32],
title="TV Reconstruction (central slice)\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon)),
fig=fig,
ax=ax[1],
)
divider = make_axes_locatable(ax[1])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/ct_astra_3d_tv_admm.ipynb
|
ct_astra_3d_tv_admm.ipynb
|
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import functional, linop, loss, metric, plot
from scico.examples import create_tangle_phantom
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
plot.config_notebook_plotting()
Nx = 128
Ny = 256
Nz = 64
tangle = create_tangle_phantom(Nx, Ny, Nz)
tangle = jax.device_put(tangle)
n_projection = 10 # number of projections
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(
tangle.shape, [1.0, 1.0], [Nz, max(Nx, Ny)], angles
) # Radon transform operator
y = A @ tangle # sinogram
λ = 2e0 # L1 norm regularization parameter
ρ = 5e0 # ADMM penalty parameter
maxiter = 25 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
C = linop.FiniteDifference(input_shape=tangle.shape, append=0)
g = λ * functional.L21Norm()
f = loss.SquaredL2Loss(y=y, A=A)
x0 = A.T(y)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
tangle_recon = solver.x
print(
"TV Restruction\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon))
)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(7, 5))
plot.imview(tangle[32], title="Ground truth (central slice)", cbar=None, fig=fig, ax=ax[0])
plot.imview(
tangle_recon[32],
title="TV Reconstruction (central slice)\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon)),
fig=fig,
ax=ax[1],
)
divider = make_axes_locatable(ax[1])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
| 0.778649 | 0.973844 |
CT Reconstruction with CG and PCG
=================================
This example demonstrates a simple iterative CT reconstruction using
conjugate gradient (CG) and preconditioned conjugate gradient (PCG)
algorithms to solve the problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, and
$\mathbf{x}$ is the reconstructed image.
```
from time import time
import numpy as np
import jax
import jax.numpy as jnp
from xdesign import Foam, discrete_phantom
from scico import loss, plot
from scico.linop import CircularConvolve
from scico.linop.radon_astra import TomographicProjector
from scico.solver import cg
plot.config_notebook_plotting()
```
Create a ground truth image.
```
N = 256 # phantom size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
```
Configure a CT projection operator and generate synthetic measurements.
```
n_projection = N # matches the phantom size so this is not few-view CT
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = 1 / N * TomographicProjector(x_gt.shape, 1, N, angles) # Radon transform operator
y = A @ x_gt # sinogram
```
Forward and back project a single pixel (Kronecker delta) to compute
an approximate impulse response for $\mathbf{A}^T \mathbf{A}$.
```
H = CircularConvolve.from_operator(A.T @ A)
```
Invert in the Fourier domain to form a preconditioner $\mathbf{M}
\approx (\mathbf{A}^T \mathbf{A})^{-1}$. See
<cite data-cite="clinthorne-1993-preconditioning"/> Section V.A. for more details.
```
# γ limits the gain of the preconditioner; higher gives a weaker filter.
γ = 1e-2
# The imaginary part comes from numerical errors in A.T and needs to be
# removed to ensure H is symmetric, positive definite.
frequency_response = np.real(H.h_dft)
inv_frequency_response = 1 / (frequency_response + γ)
# Using circular convolution without padding is sufficient here because
# M is approximate anyway.
M = CircularConvolve(inv_frequency_response, x_gt.shape, h_is_dft=True)
```
Check that $\mathbf{M}$ does approximately invert $\mathbf{A}^T \mathbf{A}$.
```
plot_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, axes = plot.subplots(nrows=1, ncols=3, figsize=(12, 4.5))
plot.imview(x_gt, title="Ground truth, $x_{gt}$", fig=fig, ax=axes[0], **plot_args)
plot.imview(
A.T @ A @ x_gt, title=r"$\mathbf{A}^T \mathbf{A} x_{gt}$", fig=fig, ax=axes[1], **plot_args
)
plot.imview(
M @ A.T @ A @ x_gt,
title=r"$\mathbf{M} \mathbf{A}^T \mathbf{A} x_{gt}$",
fig=fig,
ax=axes[2],
**plot_args,
)
fig.suptitle(r"$\mathbf{M}$ approximately inverts $\mathbf{A}^T \mathbf{A}$")
fig.tight_layout()
fig.colorbar(
axes[2].get_images()[0],
ax=axes,
location="right",
shrink=1.0,
pad=0.05,
label="Arbitrary Units",
)
fig.show()
```
Reconstruct with both standard and preconditioned conjugate gradient.
```
start_time = time()
x_cg, info_cg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=1e-5,
info=True,
)
time_cg = time() - start_time
start_time = time()
x_pcg, info_pcg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=2e-5, # preconditioning affects the problem scaling so tol differs between CG and PCG
info=True,
M=M,
)
time_pcg = time() - start_time
```
Compare CG and PCG in terms of reconstruction time and data fidelity.
```
f_cg = loss.SquaredL2Loss(y=A.T @ y, A=A.T @ A)
f_data = loss.SquaredL2Loss(y=y, A=A)
print(
f"{'Method':10s}{'Iterations':>15s}{'Time (s)':>15s}{'||ATAx - ATy||':>15s}{'||Ax - y||':>15s}"
)
print(
f"{'CG':10s}{info_cg['num_iter']:>15d}{time_cg:>15.2f}{f_cg(x_cg):>15.2e}{f_data(x_cg):>15.2e}"
)
print(
f"{'PCG':10s}{info_pcg['num_iter']:>15d}{time_pcg:>15.2f}{f_cg(x_pcg):>15.2e}"
f"{f_data(x_pcg):>15.2e}"
)
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/ct_astra_noreg_pcg.ipynb
|
ct_astra_noreg_pcg.ipynb
|
from time import time
import numpy as np
import jax
import jax.numpy as jnp
from xdesign import Foam, discrete_phantom
from scico import loss, plot
from scico.linop import CircularConvolve
from scico.linop.radon_astra import TomographicProjector
from scico.solver import cg
plot.config_notebook_plotting()
N = 256 # phantom size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
n_projection = N # matches the phantom size so this is not few-view CT
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = 1 / N * TomographicProjector(x_gt.shape, 1, N, angles) # Radon transform operator
y = A @ x_gt # sinogram
H = CircularConvolve.from_operator(A.T @ A)
# γ limits the gain of the preconditioner; higher gives a weaker filter.
γ = 1e-2
# The imaginary part comes from numerical errors in A.T and needs to be
# removed to ensure H is symmetric, positive definite.
frequency_response = np.real(H.h_dft)
inv_frequency_response = 1 / (frequency_response + γ)
# Using circular convolution without padding is sufficient here because
# M is approximate anyway.
M = CircularConvolve(inv_frequency_response, x_gt.shape, h_is_dft=True)
plot_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, axes = plot.subplots(nrows=1, ncols=3, figsize=(12, 4.5))
plot.imview(x_gt, title="Ground truth, $x_{gt}$", fig=fig, ax=axes[0], **plot_args)
plot.imview(
A.T @ A @ x_gt, title=r"$\mathbf{A}^T \mathbf{A} x_{gt}$", fig=fig, ax=axes[1], **plot_args
)
plot.imview(
M @ A.T @ A @ x_gt,
title=r"$\mathbf{M} \mathbf{A}^T \mathbf{A} x_{gt}$",
fig=fig,
ax=axes[2],
**plot_args,
)
fig.suptitle(r"$\mathbf{M}$ approximately inverts $\mathbf{A}^T \mathbf{A}$")
fig.tight_layout()
fig.colorbar(
axes[2].get_images()[0],
ax=axes,
location="right",
shrink=1.0,
pad=0.05,
label="Arbitrary Units",
)
fig.show()
start_time = time()
x_cg, info_cg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=1e-5,
info=True,
)
time_cg = time() - start_time
start_time = time()
x_pcg, info_pcg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=2e-5, # preconditioning affects the problem scaling so tol differs between CG and PCG
info=True,
M=M,
)
time_pcg = time() - start_time
f_cg = loss.SquaredL2Loss(y=A.T @ y, A=A.T @ A)
f_data = loss.SquaredL2Loss(y=y, A=A)
print(
f"{'Method':10s}{'Iterations':>15s}{'Time (s)':>15s}{'||ATAx - ATy||':>15s}{'||Ax - y||':>15s}"
)
print(
f"{'CG':10s}{info_cg['num_iter']:>15d}{time_cg:>15.2f}{f_cg(x_cg):>15.2e}{f_data(x_cg):>15.2e}"
)
print(
f"{'PCG':10s}{info_pcg['num_iter']:>15d}{time_pcg:>15.2f}{f_cg(x_pcg):>15.2e}"
f"{f_data(x_pcg):>15.2e}"
)
| 0.793946 | 0.9838 |
Convolutional Sparse Coding with Mask Decoupling (ADMM)
=======================================================
This example demonstrates the solution of a convolutional sparse coding
problem
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
B \Big( \sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big) \Big\|_2^2 +
\lambda \sum_k ( \| \mathbf{x}_k \|_1 - \| \mathbf{x}_k \|_2 ) \;,$$
where the $\mathbf{h}$_k is a set of filters comprising the dictionary,
the $\mathbf{x}$_k is a corrresponding set of coefficient maps,
$\mathbf{y}$ is the signal to be represented, and $B$ is a cropping
operator that allows the boundary artifacts resulting from circular
convolution to be avoided. Following the mask decoupling approach
<cite data-cite="almeida-2013-deconvolving"/>, the problem is posed in ADMM form
as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}_0, \mathbf{z}_1} \; (1/2) \|
\mathbf{y} - B \mb{z}_0 \|_2^2 + \lambda \sum_k ( \| \mathbf{z}_{1,k}
\|_1 - \| \mathbf{z}_{1,k} \|_2 ) \\ \;\; \text{s.t.} \;\;
\mathbf{z}_0 = \sum_k \mathbf{h}_k \ast \mathbf{x}_k \;\;
\mathbf{z}_{1,k} = \mathbf{x}_k\;,$$.
The most computationally expensive step in the ADMM algorithm is solved
using the frequency-domain approach proposed in
<cite data-cite="wohlberg-2014-efficient"/>.
```
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm, ZeroFunctional
from scico.linop import CircularConvolve, Crop, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, G0BlockCircularConvolveSolver
from scico.util import device_info
plot.config_notebook_plotting()
```
Set problem size and create random convolutional dictionary (a set of
filters) and a corresponding sparse random set of coefficient maps.
```
N = 121 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
```
Normalize dictionary filters and scale coefficient maps accordingly.
```
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
```
Convert numpy arrays to jax arrays.
```
h = jax.device_put(h)
x0 = jax.device_put(x0)
```
Set up required padding and corresponding crop operator.
```
h_center = (h.shape[1] // 2, h.shape[2] // 2)
pad_width = ((0, 0), (h_center[0], h_center[0]), (h_center[1], h_center[1]))
x0p = snp.pad(x0, pad_width=pad_width)
B = Crop(pad_width[1:], input_shape=x0p.shape[1:])
```
Set up sum-of-convolutions forward operator.
```
C = CircularConvolve(h, input_shape=x0p.shape, ndims=2, h_center=h_center)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
```
Construct test image from dictionary $\mathbf{h}$ and padded version of
coefficient maps $\mathbf{x}_0$.
```
y = B(A(x0p))
```
Set functional and solver parameters.
```
λ = 1e0 # l1-l2 norm regularization parameter
ρ0 = 1e0 # ADMM penalty parameters
ρ1 = 3e0
maxiter = 200 # number of ADMM iterations
```
Define loss function and regularization. Note the use of the
$\ell_1 - \ell_2$ norm, which has been found to provide slightly better
performance than the $\ell_1$ norm in this type of problem
<cite data-cite="wohlberg-2021-psf"/>.
```
f = ZeroFunctional()
g0 = SquaredL2Loss(y=y, A=B)
g1 = λ * L1MinusL2Norm()
C0 = A
C1 = Identity(input_shape=x0p.shape)
```
Initialize ADMM solver.
```
solver = ADMM(
f=f,
g_list=[g0, g1],
C_list=[C0, C1],
rho_list=[ρ0, ρ1],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=G0BlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
```
Show the recovered coefficient maps.
```
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
```
Show test image and reconstruction from recovered coefficient maps. Note
the absence of the wrap-around effects at the boundary that can be seen
in the corresponding images in the [related example](sparsecode_conv_admm.rst).
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(B(A(x1)), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
```
Plot convergence statistics.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/sparsecode_conv_md_admm.ipynb
|
sparsecode_conv_md_admm.ipynb
|
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm, ZeroFunctional
from scico.linop import CircularConvolve, Crop, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, G0BlockCircularConvolveSolver
from scico.util import device_info
plot.config_notebook_plotting()
N = 121 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
h = jax.device_put(h)
x0 = jax.device_put(x0)
h_center = (h.shape[1] // 2, h.shape[2] // 2)
pad_width = ((0, 0), (h_center[0], h_center[0]), (h_center[1], h_center[1]))
x0p = snp.pad(x0, pad_width=pad_width)
B = Crop(pad_width[1:], input_shape=x0p.shape[1:])
C = CircularConvolve(h, input_shape=x0p.shape, ndims=2, h_center=h_center)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
y = B(A(x0p))
λ = 1e0 # l1-l2 norm regularization parameter
ρ0 = 1e0 # ADMM penalty parameters
ρ1 = 3e0
maxiter = 200 # number of ADMM iterations
f = ZeroFunctional()
g0 = SquaredL2Loss(y=y, A=B)
g1 = λ * L1MinusL2Norm()
C0 = A
C1 = Identity(input_shape=x0p.shape)
solver = ADMM(
f=f,
g_list=[g0, g1],
C_list=[C0, C1],
rho_list=[ρ0, ρ1],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=G0BlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(B(A(x1)), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
| 0.689096 | 0.975785 |
Convolutional Sparse Coding (ADMM)
==================================
This example demonstrates the solution of a simple convolutional sparse
coding problem
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
\sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big\|_2^2 + \lambda \sum_k
( \| \mathbf{x}_k \|_1 - \| \mathbf{x}_k \|_2 ) \;,$$
where the $\mathbf{h}$_k is a set of filters comprising the dictionary,
the $\mathbf{x}$_k is a corrresponding set of coefficient maps, and
$\mathbf{y}$ is the signal to be represented. The problem is solved via
an ADMM algorithm using the frequency-domain approach proposed in
<cite data-cite="wohlberg-2014-efficient"/>.
```
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm
from scico.linop import CircularConvolve, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, FBlockCircularConvolveSolver
from scico.util import device_info
plot.config_notebook_plotting()
```
Set problem size and create random convolutional dictionary (a set of
filters) and a corresponding sparse random set of coefficient maps.
```
N = 128 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
```
Normalize dictionary filters and scale coefficient maps accordingly.
```
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
```
Convert numpy arrays to jax arrays.
```
h = jax.device_put(h)
x0 = jax.device_put(x0)
```
Set up sum-of-convolutions forward operator.
```
C = CircularConvolve(h, input_shape=x0.shape, ndims=2)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
```
Construct test image from dictionary $\mathbf{h}$ and coefficient maps
$\mathbf{x}_0$.
```
y = A(x0)
```
Set functional and solver parameters.
```
λ = 1e0 # l1-l2 norm regularization parameter
ρ = 2e0 # ADMM penalty parameter
maxiter = 200 # number of ADMM iterations
```
Define loss function and regularization. Note the use of the
$\ell_1 - \ell_2$ norm, which has been found to provide slightly better
performance than the $\ell_1$ norm in this type of problem
<cite data-cite="wohlberg-2021-psf"/>.
```
f = SquaredL2Loss(y=y, A=A)
g0 = λ * L1MinusL2Norm()
C0 = Identity(input_shape=x0.shape)
```
Initialize ADMM solver.
```
solver = ADMM(
f=f,
g_list=[g0],
C_list=[C0],
rho_list=[ρ],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=FBlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
```
Show the recovered coefficient maps.
```
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
```
Show test image and reconstruction from recovered coefficient maps.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(A(x1), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
```
Plot convergence statistics.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/sparsecode_conv_admm.ipynb
|
sparsecode_conv_admm.ipynb
|
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm
from scico.linop import CircularConvolve, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, FBlockCircularConvolveSolver
from scico.util import device_info
plot.config_notebook_plotting()
N = 128 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
h = jax.device_put(h)
x0 = jax.device_put(x0)
C = CircularConvolve(h, input_shape=x0.shape, ndims=2)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
y = A(x0)
λ = 1e0 # l1-l2 norm regularization parameter
ρ = 2e0 # ADMM penalty parameter
maxiter = 200 # number of ADMM iterations
f = SquaredL2Loss(y=y, A=A)
g0 = λ * L1MinusL2Norm()
C0 = Identity(input_shape=x0.shape)
solver = ADMM(
f=f,
g_list=[g0],
C_list=[C0],
rho_list=[ρ],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=FBlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(A(x1), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
| 0.719581 | 0.970882 |
Basis Pursuit DeNoising (APGM)
==============================
This example demonstrates the solution of the the sparse coding problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - D \mathbf{x}
\|_2^2 + \lambda \| \mathbf{x} \|_1\;,$$
where $D$ the dictionary, $\mathbf{y}$ the signal to be represented,
and $\mathbf{x}$ is the sparse representation.
```
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.pgm import AcceleratedPGM
from scico.util import device_info
plot.config_notebook_plotting()
```
Construct a random dictionary, a reference random sparse
representation, and a test signal consisting of the synthesis of the
reference sparse representation.
```
m = 512 # Signal size
n = 4 * m # Dictionary size
s = 32 # Sparsity level (number of non-zeros)
σ = 0.5 # Noise level
np.random.seed(12345)
D = np.random.randn(m, n)
L0 = np.linalg.norm(D, 2) ** 2
x_gt = np.zeros(n) # true signal
idx = np.random.permutation(list(range(0, n - 1)))
x_gt[idx[0:s]] = np.random.randn(s)
y = D @ x_gt + σ * np.random.randn(m) # synthetic signal
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
```
Set up the forward operator and AcceleratedPGM solver object.
```
maxiter = 100
λ = 2.98e1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L1Norm()
solver = AcceleratedPGM(
f=f, g=g, L0=L0, x0=A.adj(y), maxiter=maxiter, itstat_options={"display": True, "period": 10}
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
```
Plot the recovered coefficients and convergence statistics.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((x_gt, x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Objective, hist.Residual)).T,
ptyp="semilogy",
title="Convergence",
xlbl="Iteration",
lgnd=("Objective", "Residual"),
fig=fig,
ax=ax[1],
)
fig.show()
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/sparsecode_pgm.ipynb
|
sparsecode_pgm.ipynb
|
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.pgm import AcceleratedPGM
from scico.util import device_info
plot.config_notebook_plotting()
m = 512 # Signal size
n = 4 * m # Dictionary size
s = 32 # Sparsity level (number of non-zeros)
σ = 0.5 # Noise level
np.random.seed(12345)
D = np.random.randn(m, n)
L0 = np.linalg.norm(D, 2) ** 2
x_gt = np.zeros(n) # true signal
idx = np.random.permutation(list(range(0, n - 1)))
x_gt[idx[0:s]] = np.random.randn(s)
y = D @ x_gt + σ * np.random.randn(m) # synthetic signal
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
maxiter = 100
λ = 2.98e1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L1Norm()
solver = AcceleratedPGM(
f=f, g=g, L0=L0, x0=A.adj(y), maxiter=maxiter, itstat_options={"display": True, "period": 10}
)
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((x_gt, x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Objective, hist.Residual)).T,
ptyp="semilogy",
title="Convergence",
xlbl="Iteration",
lgnd=("Objective", "Residual"),
fig=fig,
ax=ax[1],
)
fig.show()
| 0.659734 | 0.971402 |
Training of DnCNN for Denoising
===============================
This example demonstrates the training and application of the DnCNN model
from <cite data-cite="zhang-2017-dncnn"/> to denoise images that have been corrupted
plot.config_notebook_plotting()
with additive Gaussian noise.
```
import os
from time import time
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import flax as sflax
from scico import metric, plot
from scico.flax.examples import load_image_data
```
Prepare parallel processing. Set an arbitrary processor count (only
applies if GPU is not available).
```
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8"
platform = jax.lib.xla_bridge.get_backend().platform
print("Platform: ", platform)
```
Read data from cache or generate if not available.
```
size = 40 # patch size
train_nimg = 400 # number of training images
test_nimg = 16 # number of testing images
nimg = train_nimg + test_nimg
gray = True # use gray scale images
data_mode = "dn" # Denoising problem
noise_level = 0.1 # Standard deviation of noise
noise_range = False # Use fixed noise level
stride = 23 # Stride to sample multiple patches from each image
train_ds, test_ds = load_image_data(
train_nimg,
test_nimg,
size,
gray,
data_mode,
verbose=True,
noise_level=noise_level,
noise_range=noise_range,
stride=stride,
)
```
Define configuration dictionary for model and training loop.
Parameters have been selected for demonstration purposes and relatively
short training. The depth of the model has been reduced to 6, instead of
the 17 of the original model. The suggested settings can be found in the
original paper.
```
# model configuration
model_conf = {
"depth": 6,
"num_filters": 64,
}
# training configuration
train_conf: sflax.ConfigDict = {
"seed": 0,
"opt_type": "ADAM",
"batch_size": 128,
"num_epochs": 50,
"base_learning_rate": 1e-3,
"warmup_epochs": 0,
"log_every_steps": 5000,
"log": True,
}
```
Construct DnCNN model.
```
channels = train_ds["image"].shape[-1]
model = sflax.DnCNNNet(
depth=model_conf["depth"],
channels=channels,
num_filters=model_conf["num_filters"],
)
```
Run training loop.
```
workdir = os.path.join(os.path.expanduser("~"), ".cache", "scico", "examples", "dncnn_out")
train_conf["workdir"] = workdir
print(f"{'JAX process: '}{jax.process_index()}{' / '}{jax.process_count()}")
print(f"{'JAX local devices: '}{jax.local_devices()}")
trainer = sflax.BasicFlaxTrainer(
train_conf,
model,
train_ds,
test_ds,
)
start_time = time()
modvar, stats_object = trainer.train()
time_train = time() - start_time
```
Evaluate on testing data.
```
test_patches = 720
start_time = time()
fmap = sflax.FlaxMap(model, modvar)
output = fmap(test_ds["image"][:test_patches])
time_eval = time() - start_time
output = np.clip(output, a_min=0, a_max=1.0)
```
Compare trained model in terms of reconstruction time and data fidelity.
```
snr_eval = metric.snr(test_ds["label"][:test_patches], output)
psnr_eval = metric.psnr(test_ds["label"][:test_patches], output)
print(
f"{'DnCNNNet training':18s}{'epochs:':2s}{train_conf['num_epochs']:>5d}"
f"{'':21s}{'time[s]:':10s}{time_train:>7.2f}"
)
print(
f"{'DnCNNNet testing':18s}{'SNR:':5s}{snr_eval:>5.2f}{' dB'}{'':3s}"
f"{'PSNR:':6s}{psnr_eval:>5.2f}{' dB'}{'':3s}{'time[s]:':10s}{time_eval:>7.2f}"
)
```
Plot comparison. Note that patches have small sizes, thus, plots may
correspond to unidentifiable fragments.
```
np.random.seed(123)
indx = np.random.randint(0, high=test_patches)
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(test_ds["label"][indx, ..., 0], title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
test_ds["image"][indx, ..., 0],
title="Noisy: \nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
output[indx, ..., 0],
title="DnCNNNet Reconstruction\nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
```
Plot convergence statistics. Statistics only generated if a training
cycle was done (i.e. not reading final epoch results from checkpoint).
```
if stats_object is not None:
hist = stats_object.history(transpose=True)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((hist.Train_Loss, hist.Eval_Loss)).T,
x=hist.Epoch,
ptyp="semilogy",
title="Loss function",
xlbl="Epoch",
ylbl="Loss value",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Train_SNR, hist.Eval_SNR)).T,
x=hist.Epoch,
title="Metric",
xlbl="Epoch",
ylbl="SNR (dB)",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[1],
)
fig.show()
```
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/denoise_dncnn_train_bsds.ipynb
|
denoise_dncnn_train_bsds.ipynb
|
import os
from time import time
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import flax as sflax
from scico import metric, plot
from scico.flax.examples import load_image_data
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8"
platform = jax.lib.xla_bridge.get_backend().platform
print("Platform: ", platform)
size = 40 # patch size
train_nimg = 400 # number of training images
test_nimg = 16 # number of testing images
nimg = train_nimg + test_nimg
gray = True # use gray scale images
data_mode = "dn" # Denoising problem
noise_level = 0.1 # Standard deviation of noise
noise_range = False # Use fixed noise level
stride = 23 # Stride to sample multiple patches from each image
train_ds, test_ds = load_image_data(
train_nimg,
test_nimg,
size,
gray,
data_mode,
verbose=True,
noise_level=noise_level,
noise_range=noise_range,
stride=stride,
)
# model configuration
model_conf = {
"depth": 6,
"num_filters": 64,
}
# training configuration
train_conf: sflax.ConfigDict = {
"seed": 0,
"opt_type": "ADAM",
"batch_size": 128,
"num_epochs": 50,
"base_learning_rate": 1e-3,
"warmup_epochs": 0,
"log_every_steps": 5000,
"log": True,
}
channels = train_ds["image"].shape[-1]
model = sflax.DnCNNNet(
depth=model_conf["depth"],
channels=channels,
num_filters=model_conf["num_filters"],
)
workdir = os.path.join(os.path.expanduser("~"), ".cache", "scico", "examples", "dncnn_out")
train_conf["workdir"] = workdir
print(f"{'JAX process: '}{jax.process_index()}{' / '}{jax.process_count()}")
print(f"{'JAX local devices: '}{jax.local_devices()}")
trainer = sflax.BasicFlaxTrainer(
train_conf,
model,
train_ds,
test_ds,
)
start_time = time()
modvar, stats_object = trainer.train()
time_train = time() - start_time
test_patches = 720
start_time = time()
fmap = sflax.FlaxMap(model, modvar)
output = fmap(test_ds["image"][:test_patches])
time_eval = time() - start_time
output = np.clip(output, a_min=0, a_max=1.0)
snr_eval = metric.snr(test_ds["label"][:test_patches], output)
psnr_eval = metric.psnr(test_ds["label"][:test_patches], output)
print(
f"{'DnCNNNet training':18s}{'epochs:':2s}{train_conf['num_epochs']:>5d}"
f"{'':21s}{'time[s]:':10s}{time_train:>7.2f}"
)
print(
f"{'DnCNNNet testing':18s}{'SNR:':5s}{snr_eval:>5.2f}{' dB'}{'':3s}"
f"{'PSNR:':6s}{psnr_eval:>5.2f}{' dB'}{'':3s}{'time[s]:':10s}{time_eval:>7.2f}"
)
np.random.seed(123)
indx = np.random.randint(0, high=test_patches)
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(test_ds["label"][indx, ..., 0], title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
test_ds["image"][indx, ..., 0],
title="Noisy: \nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
output[indx, ..., 0],
title="DnCNNNet Reconstruction\nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
if stats_object is not None:
hist = stats_object.history(transpose=True)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((hist.Train_Loss, hist.Eval_Loss)).T,
x=hist.Epoch,
ptyp="semilogy",
title="Loss function",
xlbl="Epoch",
ylbl="Loss value",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Train_SNR, hist.Eval_SNR)).T,
x=hist.Epoch,
title="Metric",
xlbl="Epoch",
ylbl="SNR (dB)",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[1],
)
fig.show()
| 0.657428 | 0.894329 |
from typing import Optional, Sequence, Union # needed for typehints_formatter hack
from scico.typing import ( # needed for typehints_formatter hack
ArrayIndex,
AxisIndex,
DType,
)
# An explanation for this nasty hack, the primary purpose of which is to avoid
# the very long definition of the scico.typing.DType appearing explicitly in the
# docs. This is handled correctly by sphinx.ext.autodoc in some circumstances,
# but only when sphinx_autodoc_typehints is not included in the extension list,
# and the appearance of the type hints (e.g. whether links to definitions are
# included) seems to depend on whether "from __future__ import annotations" was
# used in the module being documented, which is not ideal from a consistency
# perspective. (It's also worth noting that sphinx.ext.autodoc provides some
# configurability for type aliases via the autodoc_type_aliases sphinx
# configuration option.) The alternative is to include sphinx_autodoc_typehints,
# which gives a consistent appearance to the type hints, but the
# autodoc_type_aliases configuration option is ignored, and type aliases are
# always expanded. This hack avoids expansion for the type aliases with the
# longest definitions by definining a custom function for formatting the
# type hints, using an option provided by sphinx_autodoc_typehints. For
# more information, see
# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_type_aliases
# https://github.com/tox-dev/sphinx-autodoc-typehints/issues/284
# https://github.com/tox-dev/sphinx-autodoc-typehints/blob/main/README.md
def typehints_formatter_function(annotation, config):
markup = {
DType: ":obj:`~scico.typing.DType`",
# Compound types involving DType must be added here to avoid their DType
# component being expanded in the docs.
Optional[DType]: ":obj:`~typing.Optional`\ [\ :obj:`~scico.typing.DType`\ ]",
Union[DType, Sequence[DType]]: (
":obj:`~typing.Union`\ [\ :obj:`~scico.typing.DType`\ , "
":obj:`~typing.Sequence`\ [\ :obj:`~scico.typing.DType`\ ]]"
),
AxisIndex: ":obj:`~scico.typing.AxisIndex`",
ArrayIndex: ":obj:`~scico.typing.ArrayIndex`",
}
if annotation in markup:
return markup[annotation]
else:
return None
typehints_formatter = typehints_formatter_function
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/conf/85-dtype_typehints.py
|
85-dtype_typehints.py
|
from typing import Optional, Sequence, Union # needed for typehints_formatter hack
from scico.typing import ( # needed for typehints_formatter hack
ArrayIndex,
AxisIndex,
DType,
)
# An explanation for this nasty hack, the primary purpose of which is to avoid
# the very long definition of the scico.typing.DType appearing explicitly in the
# docs. This is handled correctly by sphinx.ext.autodoc in some circumstances,
# but only when sphinx_autodoc_typehints is not included in the extension list,
# and the appearance of the type hints (e.g. whether links to definitions are
# included) seems to depend on whether "from __future__ import annotations" was
# used in the module being documented, which is not ideal from a consistency
# perspective. (It's also worth noting that sphinx.ext.autodoc provides some
# configurability for type aliases via the autodoc_type_aliases sphinx
# configuration option.) The alternative is to include sphinx_autodoc_typehints,
# which gives a consistent appearance to the type hints, but the
# autodoc_type_aliases configuration option is ignored, and type aliases are
# always expanded. This hack avoids expansion for the type aliases with the
# longest definitions by definining a custom function for formatting the
# type hints, using an option provided by sphinx_autodoc_typehints. For
# more information, see
# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_type_aliases
# https://github.com/tox-dev/sphinx-autodoc-typehints/issues/284
# https://github.com/tox-dev/sphinx-autodoc-typehints/blob/main/README.md
def typehints_formatter_function(annotation, config):
markup = {
DType: ":obj:`~scico.typing.DType`",
# Compound types involving DType must be added here to avoid their DType
# component being expanded in the docs.
Optional[DType]: ":obj:`~typing.Optional`\ [\ :obj:`~scico.typing.DType`\ ]",
Union[DType, Sequence[DType]]: (
":obj:`~typing.Union`\ [\ :obj:`~scico.typing.DType`\ , "
":obj:`~typing.Sequence`\ [\ :obj:`~scico.typing.DType`\ ]]"
),
AxisIndex: ":obj:`~scico.typing.AxisIndex`",
ArrayIndex: ":obj:`~scico.typing.ArrayIndex`",
}
if annotation in markup:
return markup[annotation]
else:
return None
typehints_formatter = typehints_formatter_function
| 0.893527 | 0.225961 |
import re
from inspect import getmembers, isfunction
# Rewrite module names for certain functions imported into scico.numpy so that they are
# included in the docs for that module. While a bit messy to do so here rather than in a
# function run via app.connect, it is necessary (for some yet to be identified reason)
# to do it here to ensure that the relevant API docs include a table of functions.
import scico.numpy
for module in (scico.numpy, scico.numpy.fft, scico.numpy.linalg, scico.numpy.testing):
for _, f in getmembers(module, isfunction):
# Rewrite module name so that function is included in docs
f.__module__ = module.__name__
f.__doc__ = re.sub(
r"^:func:`([\w_]+)` wrapped to operate",
r":obj:`jax.numpy.\1` wrapped to operate",
str(f.__doc__),
flags=re.M,
)
modname = ".".join(module.__name__.split(".")[1:])
f.__doc__ = re.sub(
r"^LAX-backend implementation of :func:`([\w_]+)`.",
r"LAX-backend implementation of :obj:`%s.\1`." % modname,
str(f.__doc__),
flags=re.M,
)
# Improve formatting of jax.numpy warning
f.__doc__ = re.sub(
r"^\*\*\* This function is not yet implemented by jax.numpy, and will "
"raise NotImplementedError \*\*\*",
"**WARNING**: This function is not yet implemented by jax.numpy, "
" and will raise :exc:`NotImplementedError`.",
f.__doc__,
flags=re.M,
)
# Remove cross-references to section NEP35
f.__doc__ = re.sub(":ref:`NEP 35 <NEP35>`", "NEP 35", f.__doc__, re.M)
# Remove cross-reference to numpydoc style references section
f.__doc__ = re.sub(r" \[(\d+)\]_", "", f.__doc__, flags=re.M)
# Remove entire numpydoc references section
f.__doc__ = re.sub(r"References\n----------\n.*\n", "", f.__doc__, flags=re.DOTALL)
# Remove spurious two-space indentation of entire docstring
scico.numpy.vectorize.__doc__ = re.sub("^ ", "", scico.numpy.vectorize.__doc__, flags=re.M)
# Fix various docstring formatting errors
scico.numpy.testing.break_cycles.__doc__ = re.sub(
"calling gc.collect$",
"calling gc.collect.\n\n",
scico.numpy.testing.break_cycles.__doc__,
flags=re.M,
)
scico.numpy.testing.break_cycles.__doc__ = re.sub(
" __del__\) inside", "__del__\) inside", scico.numpy.testing.break_cycles.__doc__, flags=re.M
)
scico.numpy.testing.assert_raises_regex.__doc__ = re.sub(
"\*args,\n.*\*\*kwargs",
"*args, **kwargs",
scico.numpy.testing.assert_raises_regex.__doc__,
flags=re.M,
)
scico.numpy.BlockArray.global_shards.__doc__ = re.sub(
"`Shard`s", "`Shard`\ s", scico.numpy.BlockArray.global_shards.__doc__, flags=re.M
)
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/conf/80-scico_numpy.py
|
80-scico_numpy.py
|
import re
from inspect import getmembers, isfunction
# Rewrite module names for certain functions imported into scico.numpy so that they are
# included in the docs for that module. While a bit messy to do so here rather than in a
# function run via app.connect, it is necessary (for some yet to be identified reason)
# to do it here to ensure that the relevant API docs include a table of functions.
import scico.numpy
for module in (scico.numpy, scico.numpy.fft, scico.numpy.linalg, scico.numpy.testing):
for _, f in getmembers(module, isfunction):
# Rewrite module name so that function is included in docs
f.__module__ = module.__name__
f.__doc__ = re.sub(
r"^:func:`([\w_]+)` wrapped to operate",
r":obj:`jax.numpy.\1` wrapped to operate",
str(f.__doc__),
flags=re.M,
)
modname = ".".join(module.__name__.split(".")[1:])
f.__doc__ = re.sub(
r"^LAX-backend implementation of :func:`([\w_]+)`.",
r"LAX-backend implementation of :obj:`%s.\1`." % modname,
str(f.__doc__),
flags=re.M,
)
# Improve formatting of jax.numpy warning
f.__doc__ = re.sub(
r"^\*\*\* This function is not yet implemented by jax.numpy, and will "
"raise NotImplementedError \*\*\*",
"**WARNING**: This function is not yet implemented by jax.numpy, "
" and will raise :exc:`NotImplementedError`.",
f.__doc__,
flags=re.M,
)
# Remove cross-references to section NEP35
f.__doc__ = re.sub(":ref:`NEP 35 <NEP35>`", "NEP 35", f.__doc__, re.M)
# Remove cross-reference to numpydoc style references section
f.__doc__ = re.sub(r" \[(\d+)\]_", "", f.__doc__, flags=re.M)
# Remove entire numpydoc references section
f.__doc__ = re.sub(r"References\n----------\n.*\n", "", f.__doc__, flags=re.DOTALL)
# Remove spurious two-space indentation of entire docstring
scico.numpy.vectorize.__doc__ = re.sub("^ ", "", scico.numpy.vectorize.__doc__, flags=re.M)
# Fix various docstring formatting errors
scico.numpy.testing.break_cycles.__doc__ = re.sub(
"calling gc.collect$",
"calling gc.collect.\n\n",
scico.numpy.testing.break_cycles.__doc__,
flags=re.M,
)
scico.numpy.testing.break_cycles.__doc__ = re.sub(
" __del__\) inside", "__del__\) inside", scico.numpy.testing.break_cycles.__doc__, flags=re.M
)
scico.numpy.testing.assert_raises_regex.__doc__ = re.sub(
"\*args,\n.*\*\*kwargs",
"*args, **kwargs",
scico.numpy.testing.assert_raises_regex.__doc__,
flags=re.M,
)
scico.numpy.BlockArray.global_shards.__doc__ = re.sub(
"`Shard`s", "`Shard`\ s", scico.numpy.BlockArray.global_shards.__doc__, flags=re.M
)
| 0.749179 | 0.255187 |
Functionals
===========
A functional is
a mapping from :math:`\mathbb{R}^n` or :math:`\mathbb{C}^n` to :math:`\mathbb{R}`.
In SCICO, functionals are
primarily used to represent a cost to be minimized
and are represented by instances of the :class:`.Functional` class.
An instance of :class:`.Functional`, ``f``, may provide three core operations.
* Evaluation
- ``f(x)`` returns the value of the functional
evaluated at the point ``x``.
- A functional that can be evaluated
has the attribute ``f.has_eval == True``.
- Not all functionals can be evaluated: see `Plug-and-Play`_.
* Gradient
- ``f.grad(x)`` returns the gradient of the functional evaluated at ``x``.
- Gradients are calculated using JAX reverse-mode automatic differentiation,
exposed through :func:`scico.grad`.
- *Note:* The gradient of a functional ``f`` can be evaluated even if that functional is not smooth.
All that is required is that the functional can be evaluated, ``f.has_eval == True``.
However, the result may not be a valid gradient (or subgradient) for all inputs.
* Proximal operator
- ``f.prox(v, lam)`` returns the result of the scaled proximal
operator of ``f``, i.e., the proximal operator of ``lambda x:
lam * f(x)``, evaluated at the point ``v``.
- The proximal operator of a functional :math:`f : \mathbb{R}^n \to
\mathbb{R}` is the mapping :math:`\mathrm{prox}_f : \mathbb{R}^n
\to \mathbb{R}^n` defined as
.. math::
\mathrm{prox}_f (\mb{v}) = \argmin_{\mb{x}} f(\mb{x}) +
\frac{1}{2} \norm{\mb{v} - \mb{x}}_2^2\;.
Plug-and-Play
-------------
For the plug-and-play framework :cite:`sreehari-2016-plug`,
we encapsulate generic denoisers including CNNs
in :class:`.Functional` objects that **cannot be evaluated**.
The denoiser is applied via the the proximal operator.
For examples, see :ref:`example_notebooks`.
Proximal Calculus
-----------------
We support a limited subset of proximal calculus rules:
Scaled Functionals
^^^^^^^^^^^^^^^^^^
Given a scalar ``c`` and a functional ``f`` with a defined proximal method, we can
determine the proximal method of ``c * f`` as
.. math::
\begin{align}
\mathrm{prox}_{c f} (v, \lambda) &= \argmin_x \lambda (c f)(x) + \frac{1}{2} \norm{v - x}_2^2 \\
&= \argmin_x (\lambda c) f(x) + \frac{1}{2} \norm{v - x}_2^2 \\
&= \mathrm{prox}_{f} (v, c \lambda) \;.
\end{align}
Note that we have made no assumptions regarding homogeneity of ``f``;
rather, only that the proximal method of ``f`` is given
in the parameterized form :math:`\mathrm{prox}_{c f}`.
In SCICO, multiplying a :class:`.Functional` by a scalar
will return a :class:`.ScaledFunctional`.
This :class:`.ScaledFunctional` retains the ``has_eval`` and ``has_prox`` attributes
from the original :class:`.Functional`,
but the proximal method is modified to accomodate the additional scalar.
Separable Functionals
^^^^^^^^^^^^^^^^^^^^^
A separable functional :math:`f : \mathbb{C}^N \to \mathbb{R}` can be written as the sum
of functionals :math:`f_i : \mathbb{C}^{N_i} \to \mathbb{R}` with :math:`\sum_i N_i = N`. In particular,
.. math::
f(\mb{x}) = f(\mb{x}_1, \dots, \mb{x}_N) = f_1(\mb{x}_1) + \dots + f_N(\mb{x}_N) \;.
The proximal operator of a separable :math:`f` can be written
in terms of the proximal operators of the :math:`f_i`
(see Theorem 6.6 of :cite:`beck-2017-first`):
.. math::
\mathrm{prox}_f(\mb{x}, \lambda)
=
\begin{bmatrix}
\mathrm{prox}_{f_1}(\mb{x}_1, \lambda) \\
\vdots \\
\mathrm{prox}_{f_N}(\mb{x}_N, \lambda) \\
\end{bmatrix} \;.
Separable Functionals are implemented in the :class:`.SeparableFunctional` class. Separable functionals naturally accept :class:`.BlockArray` inputs and return the prox as a :class:`.BlockArray`.
Adding New Functionals
----------------------
To add a new functional,
create a class which
1. inherits from base :class:`.Functional`;
2. has ``has_eval`` and ``has_prox`` flags;
3. has ``_eval`` and ``prox`` methods, as necessary.
For example,
::
class MyFunctional(scico.functional.Functional):
has_eval = True
has_prox = True
def _eval(self, x: JaxArray) -> float:
return snp.sum(x)
def prox(self, x: JaxArray, lam : float) -> JaxArray:
return x - lam
Losses
------
In SCICO, a loss is a special type of functional
.. math::
f(\mb{x}) = \alpha l( \mb{y}, A(\mb{x}) ) \;,
where :math:`\alpha` is a scaling parameter,
:math:`l` is a functional,
:math:`\mb{y}` is a set of measurements,
and :math:`A` is an operator.
SCICO uses the class :class:`.Loss` to represent losses.
Loss functionals commonly arrise in the context of solving
inverse problems in scientific imaging,
where they are used to represent the mismatch
between predicted measurements :math:`A(\mb{x})`
and actual ones :math:`\mb{y}`.
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/include/functional.rst
|
functional.rst
|
Functionals
===========
A functional is
a mapping from :math:`\mathbb{R}^n` or :math:`\mathbb{C}^n` to :math:`\mathbb{R}`.
In SCICO, functionals are
primarily used to represent a cost to be minimized
and are represented by instances of the :class:`.Functional` class.
An instance of :class:`.Functional`, ``f``, may provide three core operations.
* Evaluation
- ``f(x)`` returns the value of the functional
evaluated at the point ``x``.
- A functional that can be evaluated
has the attribute ``f.has_eval == True``.
- Not all functionals can be evaluated: see `Plug-and-Play`_.
* Gradient
- ``f.grad(x)`` returns the gradient of the functional evaluated at ``x``.
- Gradients are calculated using JAX reverse-mode automatic differentiation,
exposed through :func:`scico.grad`.
- *Note:* The gradient of a functional ``f`` can be evaluated even if that functional is not smooth.
All that is required is that the functional can be evaluated, ``f.has_eval == True``.
However, the result may not be a valid gradient (or subgradient) for all inputs.
* Proximal operator
- ``f.prox(v, lam)`` returns the result of the scaled proximal
operator of ``f``, i.e., the proximal operator of ``lambda x:
lam * f(x)``, evaluated at the point ``v``.
- The proximal operator of a functional :math:`f : \mathbb{R}^n \to
\mathbb{R}` is the mapping :math:`\mathrm{prox}_f : \mathbb{R}^n
\to \mathbb{R}^n` defined as
.. math::
\mathrm{prox}_f (\mb{v}) = \argmin_{\mb{x}} f(\mb{x}) +
\frac{1}{2} \norm{\mb{v} - \mb{x}}_2^2\;.
Plug-and-Play
-------------
For the plug-and-play framework :cite:`sreehari-2016-plug`,
we encapsulate generic denoisers including CNNs
in :class:`.Functional` objects that **cannot be evaluated**.
The denoiser is applied via the the proximal operator.
For examples, see :ref:`example_notebooks`.
Proximal Calculus
-----------------
We support a limited subset of proximal calculus rules:
Scaled Functionals
^^^^^^^^^^^^^^^^^^
Given a scalar ``c`` and a functional ``f`` with a defined proximal method, we can
determine the proximal method of ``c * f`` as
.. math::
\begin{align}
\mathrm{prox}_{c f} (v, \lambda) &= \argmin_x \lambda (c f)(x) + \frac{1}{2} \norm{v - x}_2^2 \\
&= \argmin_x (\lambda c) f(x) + \frac{1}{2} \norm{v - x}_2^2 \\
&= \mathrm{prox}_{f} (v, c \lambda) \;.
\end{align}
Note that we have made no assumptions regarding homogeneity of ``f``;
rather, only that the proximal method of ``f`` is given
in the parameterized form :math:`\mathrm{prox}_{c f}`.
In SCICO, multiplying a :class:`.Functional` by a scalar
will return a :class:`.ScaledFunctional`.
This :class:`.ScaledFunctional` retains the ``has_eval`` and ``has_prox`` attributes
from the original :class:`.Functional`,
but the proximal method is modified to accomodate the additional scalar.
Separable Functionals
^^^^^^^^^^^^^^^^^^^^^
A separable functional :math:`f : \mathbb{C}^N \to \mathbb{R}` can be written as the sum
of functionals :math:`f_i : \mathbb{C}^{N_i} \to \mathbb{R}` with :math:`\sum_i N_i = N`. In particular,
.. math::
f(\mb{x}) = f(\mb{x}_1, \dots, \mb{x}_N) = f_1(\mb{x}_1) + \dots + f_N(\mb{x}_N) \;.
The proximal operator of a separable :math:`f` can be written
in terms of the proximal operators of the :math:`f_i`
(see Theorem 6.6 of :cite:`beck-2017-first`):
.. math::
\mathrm{prox}_f(\mb{x}, \lambda)
=
\begin{bmatrix}
\mathrm{prox}_{f_1}(\mb{x}_1, \lambda) \\
\vdots \\
\mathrm{prox}_{f_N}(\mb{x}_N, \lambda) \\
\end{bmatrix} \;.
Separable Functionals are implemented in the :class:`.SeparableFunctional` class. Separable functionals naturally accept :class:`.BlockArray` inputs and return the prox as a :class:`.BlockArray`.
Adding New Functionals
----------------------
To add a new functional,
create a class which
1. inherits from base :class:`.Functional`;
2. has ``has_eval`` and ``has_prox`` flags;
3. has ``_eval`` and ``prox`` methods, as necessary.
For example,
::
class MyFunctional(scico.functional.Functional):
has_eval = True
has_prox = True
def _eval(self, x: JaxArray) -> float:
return snp.sum(x)
def prox(self, x: JaxArray, lam : float) -> JaxArray:
return x - lam
Losses
------
In SCICO, a loss is a special type of functional
.. math::
f(\mb{x}) = \alpha l( \mb{y}, A(\mb{x}) ) \;,
where :math:`\alpha` is a scaling parameter,
:math:`l` is a functional,
:math:`\mb{y}` is a set of measurements,
and :math:`A` is an operator.
SCICO uses the class :class:`.Loss` to represent losses.
Loss functionals commonly arrise in the context of solving
inverse problems in scientific imaging,
where they are used to represent the mismatch
between predicted measurements :math:`A(\mb{x})`
and actual ones :math:`\mb{y}`.
| 0.967426 | 0.909586 |
Operators
=========
An operator is a map from :math:`\mathbb{R}^n` or :math:`\mathbb{C}^n`
to :math:`\mathbb{R}^m` or :math:`\mathbb{C}^m`. In SCICO, operators
are primarily used to represent imaging systems and provide
regularization. SCICO operators are represented by instances of the
:class:`.Operator` class.
SCICO :class:`.Operator` objects extend the notion of "shape" and
"size" from the usual NumPy ``ndarray`` class. Each
:class:`.Operator` object has an ``input_shape`` and ``output_shape``;
these shapes can be either tuples or a tuple of tuples (in the case of
a :class:`.BlockArray`). The ``matrix_shape`` attribute describes the
shape of the :class:`.LinearOperator` if it were to act on vectorized,
or flattened, inputs.
For example, consider a two-dimensional array :math:`\mb{x} \in
\mathbb{R}^{n \times m}`. We compute the discrete differences of
:math:`\mb{x}` in the horizontal and vertical directions, generating
two new arrays: :math:`\mb{x}_h \in \mathbb{R}^{n \times (m-1)}` and
:math:`\mb{x}_v \in \mathbb{R}^{(n-1) \times m}`. We represent this
linear operator by :math:`\mb{A} : \mathbb{R}^{n \times m} \to
\mathbb{R}^{n \times (m-1)} \otimes \mathbb{R}^{(n-1) \times m}`. In
SCICO, this linear operator will return a :class:`.BlockArray` with
the horizontal and vertical differences stored as blocks. Letting
:math:`y = \mb{A} x`, we have ``y.shape = ((n, m-1), (n-1, m))`` and
::
A.input_shape = (n, m)
A.output_shape = ((n, m-1), (n-1, m)], (n, m))
A.shape = ( ((n, m-1), (n-1, m)), (n, m)) # (output_shape, input_shape)
A.input_size = n*m
A.output_size = n*(n-1)*m*(m-1)
A.matrix_shape = (n*(n-1)*m*(m-1), n*m) # (output_size, input_size)
Operator Calculus
-----------------
SCICO supports a variety of operator calculus rules, allowing new
operators to be defined in terms of old ones. The following table
summarizes the available operations.
+----------------+-----------------+
| Operation | Result |
+----------------+-----------------+
| ``(A+B)(x)`` | ``A(x) + B(x)`` |
+----------------+-----------------+
| ``(A-B)(x)`` | ``A(x) - B(x)`` |
+----------------+-----------------+
| ``(c * A)(x)`` | ``c * A(x)`` |
+----------------+-----------------+
| ``(A/c)(x)`` | ``A(x)/c`` |
+----------------+-----------------+
| ``(-A)(x)`` | ``-A(x)`` |
+----------------+-----------------+
| ``A(B)(x)`` | ``A(B(x))`` |
+----------------+-----------------+
| ``A(B)`` | ``Operator`` |
+----------------+-----------------+
Defining A New Operator
-----------------------
To define a new operator, pass a callable to the :class:`.Operator`
constructor:
::
A = Operator(input_shape=(32,), eval_fn = lambda x: 2 * x)
Or use subclassing:
::
>>> from scico.operator import Operator
>>> class MyOp(Operator):
...
... def _eval(self, x):
... return 2 * x
>>> A = MyOp(input_shape=(32,))
At a minimum, the ``_eval`` function must be overridden. If either
``output_shape`` or ``output_dtype`` are unspecified, they are
determined by evaluating the operator on an input of appropriate shape
and dtype.
Linear Operators
================
Linear operators are those for which
.. math::
H(a \mb{x} + b \mb{y}) = a H(\mb{x}) + b H(\mb{y}) \;.
SCICO represents linear operators as instances of the class
:class:`.LinearOperator`. While finite-dimensional linear operators
can always be associated with a matrix, it is often useful to
represent them in a matrix-free manner. Most of SCICO's linear
operators are implemented matrix-free.
Using A LinearOperator
----------------------
We implement two ways to evaluate a :class:`.LinearOperator`. The
first is using standard callable syntax: ``A(x)``. The second mimics
the NumPy matrix multiplication syntax: ``A @ x``. Both methods
perform shape and type checks to validate the input before ultimately
either calling `A._eval` or generating a new :class:`.LinearOperator`.
For linear operators that map real-valued inputs to real-valued
outputs, there are two ways to apply the adjoint: ``A.adj(y)`` and
``A.T @ y``.
For complex-valued linear operators, there are three ways to apply the
adjoint ``A.adj(y)``, ``A.H @ y``, and ``A.conj().T @ y``. Note that
in this case, ``A.T`` returns the non-conjugated transpose of the
:class:`.LinearOperator`.
While the cost of evaluating the linear operator is virtually
identical for ``A(x)`` and ``A @ x``, the ``A.H`` and ``A.conj().T``
methods are somewhat slower; especially the latter. This is because
two intermediate linear operators must be created before the function
is evaluated. Evaluating ``A.conj().T @ y`` is equivalent to:
::
def f(y):
B = A.conj() # New LinearOperator #1
C = B.T # New LinearOperator #2
return C @ y
**Note**: the speed differences between these methods vanish if
applied inside of a jit-ed function. For instance:
::
f = jax.jit(lambda x: A.conj().T @ x)
+------------------+-----------------+
| Public Method | Private Method |
+------------------+-----------------+
| ``__call__`` | ``._eval`` |
+------------------+-----------------+
| ``adj`` | ``._adj`` |
+------------------+-----------------+
| ``gram`` | ``._gram`` |
+------------------+-----------------+
The public methods perform shape and type checking to validate the
input before either calling the corresponding private method or
returning a composite LinearOperator.
Linear Operator Calculus
------------------------
SCICO supports several linear operator calculus rules.
Given
``A`` and ``B`` of class :class:`.LinearOperator` and of appropriate shape,
``x`` an array of appropriate shape,
``c`` a scalar, and
``O`` an :class:`.Operator`,
we have
+----------------+----------------------------+
| Operation | Result |
+----------------+----------------------------+
| ``(A+B)(x)`` | ``A(x) + B(x)`` |
+----------------+----------------------------+
| ``(A-B)(x)`` | ``A(x) - B(x)`` |
+----------------+----------------------------+
| ``(c * A)(x)`` | ``c * A(x)`` |
+----------------+----------------------------+
| ``(A/c)(x)`` | ``A(x)/c`` |
+----------------+----------------------------+
| ``(-A)(x)`` | ``-A(x)`` |
+----------------+----------------------------+
| ``(A@B)(x)`` | ``A@B@x`` |
+----------------+----------------------------+
| ``A @ B`` | ``ComposedLinearOperator`` |
+----------------+----------------------------+
| ``A @ O`` | ``Operator`` |
+----------------+----------------------------+
| ``O(A)`` | ``Operator`` |
+----------------+----------------------------+
Defining A New Linear Operator
------------------------------
To define a new linear operator, pass a callable to the
:class:`.LinearOperator` constructor
::
>>> from scico.linop import LinearOperator
>>> A = LinearOperator(input_shape=(32,),
... eval_fn = lambda x: 2 * x)
Or, use subclassing:
::
>>> class MyLinearOperator(LinearOperator):
... def _eval(self, x):
... return 2 * x
>>> A = MyLinearOperator(input_shape=(32,))
At a minimum, the ``_eval`` method must be overridden. If the
``_adj`` method is not overriden, the adjoint is determined using
:func:`scico.linear_adjoint`. If either ``output_shape`` or
``output_dtype`` are unspecified, they are determined by evaluating
the Operator on an input of appropriate shape and dtype.
🔪 Sharp Edges 🔪
------------------
Strict Types in Adjoint
^^^^^^^^^^^^^^^^^^^^^^^
SCICO silently promotes real types to complex types in forward
application, but enforces strict type checking in the adjoint. This
is due to the strict type-safe nature of jax adjoints.
LinearOperators From External Code
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
External code may be wrapped as a subclass of :class:`.Operator` or
:class:`.LinearOperator` and used in SCICO optimization routines;
however this process can be complicated and error-prone. As a
starting point, look at the source for
:class:`.radon_svmbir.TomographicProjector` or
:class:`.radon_astra.TomographicProjector` and the JAX documentation
for the `vector-jacobian product
<https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html#vector-jacobian-products-vjps-aka-reverse-mode-autodiff>`_
and `custom VJP rules
<https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html>`_.
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/include/operator.rst
|
operator.rst
|
Operators
=========
An operator is a map from :math:`\mathbb{R}^n` or :math:`\mathbb{C}^n`
to :math:`\mathbb{R}^m` or :math:`\mathbb{C}^m`. In SCICO, operators
are primarily used to represent imaging systems and provide
regularization. SCICO operators are represented by instances of the
:class:`.Operator` class.
SCICO :class:`.Operator` objects extend the notion of "shape" and
"size" from the usual NumPy ``ndarray`` class. Each
:class:`.Operator` object has an ``input_shape`` and ``output_shape``;
these shapes can be either tuples or a tuple of tuples (in the case of
a :class:`.BlockArray`). The ``matrix_shape`` attribute describes the
shape of the :class:`.LinearOperator` if it were to act on vectorized,
or flattened, inputs.
For example, consider a two-dimensional array :math:`\mb{x} \in
\mathbb{R}^{n \times m}`. We compute the discrete differences of
:math:`\mb{x}` in the horizontal and vertical directions, generating
two new arrays: :math:`\mb{x}_h \in \mathbb{R}^{n \times (m-1)}` and
:math:`\mb{x}_v \in \mathbb{R}^{(n-1) \times m}`. We represent this
linear operator by :math:`\mb{A} : \mathbb{R}^{n \times m} \to
\mathbb{R}^{n \times (m-1)} \otimes \mathbb{R}^{(n-1) \times m}`. In
SCICO, this linear operator will return a :class:`.BlockArray` with
the horizontal and vertical differences stored as blocks. Letting
:math:`y = \mb{A} x`, we have ``y.shape = ((n, m-1), (n-1, m))`` and
::
A.input_shape = (n, m)
A.output_shape = ((n, m-1), (n-1, m)], (n, m))
A.shape = ( ((n, m-1), (n-1, m)), (n, m)) # (output_shape, input_shape)
A.input_size = n*m
A.output_size = n*(n-1)*m*(m-1)
A.matrix_shape = (n*(n-1)*m*(m-1), n*m) # (output_size, input_size)
Operator Calculus
-----------------
SCICO supports a variety of operator calculus rules, allowing new
operators to be defined in terms of old ones. The following table
summarizes the available operations.
+----------------+-----------------+
| Operation | Result |
+----------------+-----------------+
| ``(A+B)(x)`` | ``A(x) + B(x)`` |
+----------------+-----------------+
| ``(A-B)(x)`` | ``A(x) - B(x)`` |
+----------------+-----------------+
| ``(c * A)(x)`` | ``c * A(x)`` |
+----------------+-----------------+
| ``(A/c)(x)`` | ``A(x)/c`` |
+----------------+-----------------+
| ``(-A)(x)`` | ``-A(x)`` |
+----------------+-----------------+
| ``A(B)(x)`` | ``A(B(x))`` |
+----------------+-----------------+
| ``A(B)`` | ``Operator`` |
+----------------+-----------------+
Defining A New Operator
-----------------------
To define a new operator, pass a callable to the :class:`.Operator`
constructor:
::
A = Operator(input_shape=(32,), eval_fn = lambda x: 2 * x)
Or use subclassing:
::
>>> from scico.operator import Operator
>>> class MyOp(Operator):
...
... def _eval(self, x):
... return 2 * x
>>> A = MyOp(input_shape=(32,))
At a minimum, the ``_eval`` function must be overridden. If either
``output_shape`` or ``output_dtype`` are unspecified, they are
determined by evaluating the operator on an input of appropriate shape
and dtype.
Linear Operators
================
Linear operators are those for which
.. math::
H(a \mb{x} + b \mb{y}) = a H(\mb{x}) + b H(\mb{y}) \;.
SCICO represents linear operators as instances of the class
:class:`.LinearOperator`. While finite-dimensional linear operators
can always be associated with a matrix, it is often useful to
represent them in a matrix-free manner. Most of SCICO's linear
operators are implemented matrix-free.
Using A LinearOperator
----------------------
We implement two ways to evaluate a :class:`.LinearOperator`. The
first is using standard callable syntax: ``A(x)``. The second mimics
the NumPy matrix multiplication syntax: ``A @ x``. Both methods
perform shape and type checks to validate the input before ultimately
either calling `A._eval` or generating a new :class:`.LinearOperator`.
For linear operators that map real-valued inputs to real-valued
outputs, there are two ways to apply the adjoint: ``A.adj(y)`` and
``A.T @ y``.
For complex-valued linear operators, there are three ways to apply the
adjoint ``A.adj(y)``, ``A.H @ y``, and ``A.conj().T @ y``. Note that
in this case, ``A.T`` returns the non-conjugated transpose of the
:class:`.LinearOperator`.
While the cost of evaluating the linear operator is virtually
identical for ``A(x)`` and ``A @ x``, the ``A.H`` and ``A.conj().T``
methods are somewhat slower; especially the latter. This is because
two intermediate linear operators must be created before the function
is evaluated. Evaluating ``A.conj().T @ y`` is equivalent to:
::
def f(y):
B = A.conj() # New LinearOperator #1
C = B.T # New LinearOperator #2
return C @ y
**Note**: the speed differences between these methods vanish if
applied inside of a jit-ed function. For instance:
::
f = jax.jit(lambda x: A.conj().T @ x)
+------------------+-----------------+
| Public Method | Private Method |
+------------------+-----------------+
| ``__call__`` | ``._eval`` |
+------------------+-----------------+
| ``adj`` | ``._adj`` |
+------------------+-----------------+
| ``gram`` | ``._gram`` |
+------------------+-----------------+
The public methods perform shape and type checking to validate the
input before either calling the corresponding private method or
returning a composite LinearOperator.
Linear Operator Calculus
------------------------
SCICO supports several linear operator calculus rules.
Given
``A`` and ``B`` of class :class:`.LinearOperator` and of appropriate shape,
``x`` an array of appropriate shape,
``c`` a scalar, and
``O`` an :class:`.Operator`,
we have
+----------------+----------------------------+
| Operation | Result |
+----------------+----------------------------+
| ``(A+B)(x)`` | ``A(x) + B(x)`` |
+----------------+----------------------------+
| ``(A-B)(x)`` | ``A(x) - B(x)`` |
+----------------+----------------------------+
| ``(c * A)(x)`` | ``c * A(x)`` |
+----------------+----------------------------+
| ``(A/c)(x)`` | ``A(x)/c`` |
+----------------+----------------------------+
| ``(-A)(x)`` | ``-A(x)`` |
+----------------+----------------------------+
| ``(A@B)(x)`` | ``A@B@x`` |
+----------------+----------------------------+
| ``A @ B`` | ``ComposedLinearOperator`` |
+----------------+----------------------------+
| ``A @ O`` | ``Operator`` |
+----------------+----------------------------+
| ``O(A)`` | ``Operator`` |
+----------------+----------------------------+
Defining A New Linear Operator
------------------------------
To define a new linear operator, pass a callable to the
:class:`.LinearOperator` constructor
::
>>> from scico.linop import LinearOperator
>>> A = LinearOperator(input_shape=(32,),
... eval_fn = lambda x: 2 * x)
Or, use subclassing:
::
>>> class MyLinearOperator(LinearOperator):
... def _eval(self, x):
... return 2 * x
>>> A = MyLinearOperator(input_shape=(32,))
At a minimum, the ``_eval`` method must be overridden. If the
``_adj`` method is not overriden, the adjoint is determined using
:func:`scico.linear_adjoint`. If either ``output_shape`` or
``output_dtype`` are unspecified, they are determined by evaluating
the Operator on an input of appropriate shape and dtype.
🔪 Sharp Edges 🔪
------------------
Strict Types in Adjoint
^^^^^^^^^^^^^^^^^^^^^^^
SCICO silently promotes real types to complex types in forward
application, but enforces strict type checking in the adjoint. This
is due to the strict type-safe nature of jax adjoints.
LinearOperators From External Code
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
External code may be wrapped as a subclass of :class:`.Operator` or
:class:`.LinearOperator` and used in SCICO optimization routines;
however this process can be complicated and error-prone. As a
starting point, look at the source for
:class:`.radon_svmbir.TomographicProjector` or
:class:`.radon_astra.TomographicProjector` and the JAX documentation
for the `vector-jacobian product
<https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html#vector-jacobian-products-vjps-aka-reverse-mode-autodiff>`_
and `custom VJP rules
<https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html>`_.
| 0.955981 | 0.969785 |
.. _blockarray_class:
BlockArray
==========
.. testsetup::
>>> import scico
>>> import scico.numpy as snp
>>> from scico.numpy import BlockArray
>>> import numpy as np
>>> import jax.numpy
The class :class:`.BlockArray` provides a way to combine arrays of
different shapes into a single object for use with other SCICO classes.
A :class:`.BlockArray` consists of a list of :class:`jax.Array` objects,
which we refer to as blocks. A :class:`.BlockArray` differs from a list in
that, whenever possible, :class:`.BlockArray` properties and methods
(including unary and binary operators like +, -, \*, ...) automatically
map along the blocks, returning another :class:`.BlockArray` or tuple as
appropriate. For example,
::
>>> x = snp.blockarray((
... [[1, 3, 7],
... [2, 2, 1]],
... [2, 4, 8]
... ))
>>> x.shape # returns tuple
((2, 3), (3,))
>>> x * 2 # returns BlockArray # doctest: +ELLIPSIS
BlockArray([...Array([[ 2, 6, 14],
[ 4, 4, 2]], dtype=...), ...Array([ 4, 8, 16], dtype=...)])
>>> y = snp.blockarray((
... [[.2],
... [.3]],
... [.4]
... ))
>>> x + y # returns BlockArray # doctest: +ELLIPSIS
BlockArray([...Array([[1.2, 3.2, 7.2],
[2.3, 2.3, 1.3]], dtype=...), ...Array([2.4, 4.4, 8.4], dtype=...)])
.. _numpy_functions_blockarray:
NumPy and SciPy Functions
-------------------------
:mod:`scico.numpy`, :mod:`scico.numpy.testing`, and
:mod:`scico.scipy.special` provide wrappers around :mod:`jax.numpy`,
:mod:`numpy.testing` and :mod:`jax.scipy.special` where many of the
functions have been extended to work with instances of :class:`.BlockArray`.
In particular:
* When a tuple of tuples is passed as the `shape`
argument to an array creation routine, a :class:`.BlockArray` is created.
* When a :class:`.BlockArray` is passed to a reduction function, the blocks are
ravelled (i.e., reshaped to be 1D) and concatenated before the reduction
is applied. This behavior may be prevented by passing the `axis`
argument, in which case the function is mapped over the blocks.
* When one or more :class:`.BlockArray` instances are passed to a mathematical
function that is not a reduction, the function is mapped over
(corresponding) blocks.
For a list of array creation routines, see
::
>>> scico.numpy.creation_routines # doctest: +ELLIPSIS
('empty', ...)
For a list of reduction functions, see
::
>>> scico.numpy.reduction_functions # doctest: +ELLIPSIS
('sum', ...)
For lists of the remaining wrapped functions, see
::
>>> scico.numpy.mathematical_functions # doctest: +ELLIPSIS
('sin', ...)
>>> scico.numpy.testing_functions # doctest: +ELLIPSIS
('testing.assert_allclose', ...)
>>> import scico.scipy
>>> scico.scipy.special.functions # doctest: +ELLIPSIS
('betainc', ...)
Note that:
* Both :func:`scico.numpy.ravel` and :meth:`.BlockArray.ravel` return a
:class:`.BlockArray` with ravelled blocks rather than the concatenation
of these blocks as a single array.
* The functional and method versions of the "same" function differ in their
behavior, with the method version only applying the reduction within each
block, and the function version applying the reduction across all blocks.
For example, :func:`scico.numpy.sum` applied to a :class:`.BlockArray` with
two blocks returns a scalar value, while :meth:`.BlockArray.sum` returns a
:class:`.BlockArray` two scalar blocks.
Motivating Example
------------------
The discrete differences of a two-dimensional array, :math:`\mb{x} \in
\mbb{R}^{n \times m}`, in the horizontal and vertical directions can
be represented by the arrays :math:`\mb{x}_h \in \mbb{R}^{n \times
(m-1)}` and :math:`\mb{x}_v \in \mbb{R}^{(n-1) \times m}`
respectively. While it is usually useful to consider the output of a
difference operator as a single entity, we cannot combine these two
arrays into a single array since they have different shapes. We could
vectorize each array and concatenate the resulting vectors, leading to
:math:`\mb{\bar{x}} \in \mbb{R}^{n(m-1) + m(n-1)}`, which can be
stored as a one-dimensional array, but this makes it hard to access
the individual components :math:`\mb{x}_h` and :math:`\mb{x}_v`.
Instead, we can construct a :class:`.BlockArray`, :math:`\mb{x}_B =
[\mb{x}_h, \mb{x}_v]`:
::
>>> n = 32
>>> m = 16
>>> x_h, key = scico.random.randn((n, m-1))
>>> x_v, _ = scico.random.randn((n-1, m), key=key)
# Form the blockarray
>>> x_B = snp.blockarray([x_h, x_v])
# The blockarray shape is a tuple of tuples
>>> x_B.shape
((32, 15), (31, 16))
# Each block component can be easily accessed
>>> x_B[0].shape
(32, 15)
>>> x_B[1].shape
(31, 16)
Constructing a BlockArray
-------------------------
The recommended way to construct a :class:`.BlockArray` is by using the
:func:`~scico.numpy.blockarray` function.
::
>>> import scico.numpy as snp
>>> x0, key = scico.random.randn((32, 32))
>>> x1, _ = scico.random.randn((16,), key=key)
>>> X = snp.blockarray((x0, x1))
>>> X.shape
((32, 32), (16,))
>>> X.size
(1024, 16)
>>> len(X)
2
While :func:`~scico.numpy.blockarray` will accept arguments of type
:class:`~numpy.ndarray` or :class:`~jax.Array`, arguments of type :class:`~numpy.ndarray` will be converted to :class:`~jax.Array` type.
Operating on a BlockArray
-------------------------
.. _blockarray_indexing:
Indexing
^^^^^^^^
:class:`.BlockArray` indexing works just like indexing a list.
Multiplication Between BlockArray and LinearOperator
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :class:`.Operator` and :class:`.LinearOperator` classes are designed
to work on instances of :class:`.BlockArray` in addition to instances of
:obj:`~jax.Array`. For example
::
>>> x, key = scico.random.randn((3, 4))
>>> A_1 = scico.linop.Identity(x.shape)
>>> A_1.shape # array -> array
((3, 4), (3, 4))
>>> A_2 = scico.linop.FiniteDifference(x.shape)
>>> A_2.shape # array -> BlockArray
(((2, 4), (3, 3)), (3, 4))
>>> diag = snp.blockarray([np.array(1.0), np.array(2.0)])
>>> A_3 = scico.linop.Diagonal(diag, input_shape=(A_2.output_shape))
>>> A_3.shape # BlockArray -> BlockArray
(((2, 4), (3, 3)), ((2, 4), (3, 3)))
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/include/blockarray.rst
|
blockarray.rst
|
.. _blockarray_class:
BlockArray
==========
.. testsetup::
>>> import scico
>>> import scico.numpy as snp
>>> from scico.numpy import BlockArray
>>> import numpy as np
>>> import jax.numpy
The class :class:`.BlockArray` provides a way to combine arrays of
different shapes into a single object for use with other SCICO classes.
A :class:`.BlockArray` consists of a list of :class:`jax.Array` objects,
which we refer to as blocks. A :class:`.BlockArray` differs from a list in
that, whenever possible, :class:`.BlockArray` properties and methods
(including unary and binary operators like +, -, \*, ...) automatically
map along the blocks, returning another :class:`.BlockArray` or tuple as
appropriate. For example,
::
>>> x = snp.blockarray((
... [[1, 3, 7],
... [2, 2, 1]],
... [2, 4, 8]
... ))
>>> x.shape # returns tuple
((2, 3), (3,))
>>> x * 2 # returns BlockArray # doctest: +ELLIPSIS
BlockArray([...Array([[ 2, 6, 14],
[ 4, 4, 2]], dtype=...), ...Array([ 4, 8, 16], dtype=...)])
>>> y = snp.blockarray((
... [[.2],
... [.3]],
... [.4]
... ))
>>> x + y # returns BlockArray # doctest: +ELLIPSIS
BlockArray([...Array([[1.2, 3.2, 7.2],
[2.3, 2.3, 1.3]], dtype=...), ...Array([2.4, 4.4, 8.4], dtype=...)])
.. _numpy_functions_blockarray:
NumPy and SciPy Functions
-------------------------
:mod:`scico.numpy`, :mod:`scico.numpy.testing`, and
:mod:`scico.scipy.special` provide wrappers around :mod:`jax.numpy`,
:mod:`numpy.testing` and :mod:`jax.scipy.special` where many of the
functions have been extended to work with instances of :class:`.BlockArray`.
In particular:
* When a tuple of tuples is passed as the `shape`
argument to an array creation routine, a :class:`.BlockArray` is created.
* When a :class:`.BlockArray` is passed to a reduction function, the blocks are
ravelled (i.e., reshaped to be 1D) and concatenated before the reduction
is applied. This behavior may be prevented by passing the `axis`
argument, in which case the function is mapped over the blocks.
* When one or more :class:`.BlockArray` instances are passed to a mathematical
function that is not a reduction, the function is mapped over
(corresponding) blocks.
For a list of array creation routines, see
::
>>> scico.numpy.creation_routines # doctest: +ELLIPSIS
('empty', ...)
For a list of reduction functions, see
::
>>> scico.numpy.reduction_functions # doctest: +ELLIPSIS
('sum', ...)
For lists of the remaining wrapped functions, see
::
>>> scico.numpy.mathematical_functions # doctest: +ELLIPSIS
('sin', ...)
>>> scico.numpy.testing_functions # doctest: +ELLIPSIS
('testing.assert_allclose', ...)
>>> import scico.scipy
>>> scico.scipy.special.functions # doctest: +ELLIPSIS
('betainc', ...)
Note that:
* Both :func:`scico.numpy.ravel` and :meth:`.BlockArray.ravel` return a
:class:`.BlockArray` with ravelled blocks rather than the concatenation
of these blocks as a single array.
* The functional and method versions of the "same" function differ in their
behavior, with the method version only applying the reduction within each
block, and the function version applying the reduction across all blocks.
For example, :func:`scico.numpy.sum` applied to a :class:`.BlockArray` with
two blocks returns a scalar value, while :meth:`.BlockArray.sum` returns a
:class:`.BlockArray` two scalar blocks.
Motivating Example
------------------
The discrete differences of a two-dimensional array, :math:`\mb{x} \in
\mbb{R}^{n \times m}`, in the horizontal and vertical directions can
be represented by the arrays :math:`\mb{x}_h \in \mbb{R}^{n \times
(m-1)}` and :math:`\mb{x}_v \in \mbb{R}^{(n-1) \times m}`
respectively. While it is usually useful to consider the output of a
difference operator as a single entity, we cannot combine these two
arrays into a single array since they have different shapes. We could
vectorize each array and concatenate the resulting vectors, leading to
:math:`\mb{\bar{x}} \in \mbb{R}^{n(m-1) + m(n-1)}`, which can be
stored as a one-dimensional array, but this makes it hard to access
the individual components :math:`\mb{x}_h` and :math:`\mb{x}_v`.
Instead, we can construct a :class:`.BlockArray`, :math:`\mb{x}_B =
[\mb{x}_h, \mb{x}_v]`:
::
>>> n = 32
>>> m = 16
>>> x_h, key = scico.random.randn((n, m-1))
>>> x_v, _ = scico.random.randn((n-1, m), key=key)
# Form the blockarray
>>> x_B = snp.blockarray([x_h, x_v])
# The blockarray shape is a tuple of tuples
>>> x_B.shape
((32, 15), (31, 16))
# Each block component can be easily accessed
>>> x_B[0].shape
(32, 15)
>>> x_B[1].shape
(31, 16)
Constructing a BlockArray
-------------------------
The recommended way to construct a :class:`.BlockArray` is by using the
:func:`~scico.numpy.blockarray` function.
::
>>> import scico.numpy as snp
>>> x0, key = scico.random.randn((32, 32))
>>> x1, _ = scico.random.randn((16,), key=key)
>>> X = snp.blockarray((x0, x1))
>>> X.shape
((32, 32), (16,))
>>> X.size
(1024, 16)
>>> len(X)
2
While :func:`~scico.numpy.blockarray` will accept arguments of type
:class:`~numpy.ndarray` or :class:`~jax.Array`, arguments of type :class:`~numpy.ndarray` will be converted to :class:`~jax.Array` type.
Operating on a BlockArray
-------------------------
.. _blockarray_indexing:
Indexing
^^^^^^^^
:class:`.BlockArray` indexing works just like indexing a list.
Multiplication Between BlockArray and LinearOperator
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :class:`.Operator` and :class:`.LinearOperator` classes are designed
to work on instances of :class:`.BlockArray` in addition to instances of
:obj:`~jax.Array`. For example
::
>>> x, key = scico.random.randn((3, 4))
>>> A_1 = scico.linop.Identity(x.shape)
>>> A_1.shape # array -> array
((3, 4), (3, 4))
>>> A_2 = scico.linop.FiniteDifference(x.shape)
>>> A_2.shape # array -> BlockArray
(((2, 4), (3, 3)), (3, 4))
>>> diag = snp.blockarray([np.array(1.0), np.array(2.0)])
>>> A_3 = scico.linop.Diagonal(diag, input_shape=(A_2.output_shape))
>>> A_3.shape # BlockArray -> BlockArray
(((2, 4), (3, 3)), ((2, 4), (3, 3)))
| 0.963213 | 0.764232 |
Learned Models
==============
In SCICO, neural network models are used to represent imaging problems and provide different modes of data-driven regularization.
The models are implemented in `Flax <https://flax.readthedocs.io/>`_, and constitute a representative sample of frequently used networks.
FlaxMap
-------
SCICO interfaces with the implemented models via :class:`.FlaxMap`. This provides a standardized access to all trained models via the model definiton and the learned parameters. Further specialized functionality, such as learned denoisers, are built on top of :class:`.FlaxMap`. The specific models that have been implemented are described below.
DnCNN
-----
The denoiser convolutional neural network model (DnCNN) :cite:`zhang-2017-dncnn`, implemented as :class:`.DnCNNNet`, is used to denoise images that have been corrupted with additive Gaussian noise.
ODP
---
The unrolled optimization with deep priors (ODP) :cite:`diamond-2018-odp`, implemented as :class:`.ODPNet`, is used to solve inverse problems in imaging by adapting classical iterative methods into an end-to-end framework that incorporates deep networks as well as knowledge of the image formation model.
The framework aims to solve the optimization problem
.. math::
\argmin_{\mb{x}} \; f(A \mb{x}, \mb{y}) + r(\mb{x}) \;,
where :math:`A` represents a linear forward model and :math:`r` a regularization function encoding prior information, by unrolling the iterative solution method into a network where each iteration corresponds to a different stage in the ODP network. Different iterative solutions produce different unrolled optimization algorithms which, in turn, produce different ODP networks. The ones implemented in SCICO are described below.
Proximal Map
^^^^^^^^^^^^
This algorithm corresponds to solving
.. math::
:label: eq:odp_prox
\argmin_{\mb{x}} \; \alpha_k \, f(A \mb{x}, \mb{y}) + \frac{1}{2} \| \mb{x} - \mb{x}^k - \mb{x}^{k+1/2} \|_2^2 \;,
with :math:`k` corresponding to the index of the iteration, which translates to an index of the stage of the network, :math:`f(A \mb{x}, \mb{y})` a fidelity term, usually an :math:`\ell_2` norm, and :math:`\mb{x}^{k+1/2}` a regularization representing :math:`\mathrm{prox}_r (\mb{x}^k)` and usually implemented as a convolutional neural network (CNN). This proximal map representation is used when minimization problem :eq:`eq:odp_prox` can be solved in a computationally efficient manner.
:class:`.ODPProxDnBlock` uses this formulation to solve a denoising problem, which, according to :cite:`diamond-2018-odp`, can be solved by
.. math::
\mb{x}^{k+1} = (\alpha_k \, \mb{y} + \mb{x}^k + \mb{x}^{k+1/2}) \, / \, (\alpha_k + 1) \;,
where :math:`A` corresponds to the identity operator and is therefore omitted, :math:`\mb{y}` is the noisy signal, :math:`\alpha_k > 0` is a learned stage-wise parameter weighting the contribution of the fidelity term and :math:`\mb{x}^k + \mb{x}^{k+1/2}` is the regularization, usually represented by a residual CNN.
:class:`.ODPProxDblrBlock` uses this formulation to solve a deblurring problem, which, according to :cite:`diamond-2018-odp`, can be solved by
.. math::
\mb{x}^{k+1} = \mathcal{F}^{-1} \mathrm{diag} (\alpha_k | \mathcal{F}(K)|^2 + 1 )^{-1} \mathcal{F} \, (\alpha_k K^T * \mb{y} + \mb{x}^k + \mb{x}^{k+1/2}) \;,
where :math:`A` is the blurring operator, :math:`K` is the blurring kernel, :math:`\mb{y}` is the blurred signal, :math:`\mathcal{F}` is the DFT, :math:`\alpha_k > 0` is a learned stage-wise parameter weighting the contribution of the fidelity term and :math:`\mb{x}^k + \mb{x}^{k+1/2}` is the regularization represented by a residual CNN.
Gradient Descent
^^^^^^^^^^^^^^^^
When the solution of the optimization problem in :eq:`eq:odp_prox` can not be simply represented by an analytical step, a formulation based on a gradient descent iteration is preferred. This yields
.. math::
\mb{x}^{k+1} = \mb{x}^k + \mb{x}^{k+1/2} - \alpha_k \, A^T \nabla_x \, f(A \mb{x}^k, \mb{y}) \;,
where :math:`\mb{x}^{k+1/2}` represents :math:`\nabla r(\mb{x}^k)`.
:class:`.ODPGrDescBlock` uses this formulation to solve a generic problem with :math:`\ell_2` fidelity as
.. math::
\mb{x}^{k+1} = \mb{x}^k + \mb{x}^{k+1/2} - \alpha_k \, A^T (A \mb{x} - \mb{y}) \;,
with :math:`\mb{y}` the measured signal and :math:`\mb{x} + \mb{x}^{k+1/2}` a residual CNN.
MoDL
----
The model-based deep learning (MoDL) :cite:`aggarwal-2019-modl`, implemented as :class:`.MoDLNet`, is used to solve inverse problems in imaging also by adapting classical iterative methods into an end-to-end deep learning framework, but, in contrast to ODP, it solves the optimization problem
.. math::
\argmin_{\mb{x}} \; \| A \mb{x} - \mb{y}\|_2^2 + \lambda \, \| \mb{x} - \mathrm{D}_w(\mb{x})\|_2^2 \;,
by directly computing the update
.. math::
\mb{x}^{k+1} = (A^T A + \lambda \, I)^{-1} (A^T \mb{y} + \lambda \, \mb{z}^k) \;,
via conjugate gradient. The regularization :math:`\mb{z}^k = \mathrm{D}_w(\mb{x}^{k})` incorporates prior information, usually in the form of a denoiser model. In this case, the denoiser :math:`\mathrm{D}_w` is shared between all the stages of the network requiring relatively less memory than other unrolling methods. This also allows for deploying a different number of iterations in testing than the ones used in training.
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/include/learning.rst
|
learning.rst
|
Learned Models
==============
In SCICO, neural network models are used to represent imaging problems and provide different modes of data-driven regularization.
The models are implemented in `Flax <https://flax.readthedocs.io/>`_, and constitute a representative sample of frequently used networks.
FlaxMap
-------
SCICO interfaces with the implemented models via :class:`.FlaxMap`. This provides a standardized access to all trained models via the model definiton and the learned parameters. Further specialized functionality, such as learned denoisers, are built on top of :class:`.FlaxMap`. The specific models that have been implemented are described below.
DnCNN
-----
The denoiser convolutional neural network model (DnCNN) :cite:`zhang-2017-dncnn`, implemented as :class:`.DnCNNNet`, is used to denoise images that have been corrupted with additive Gaussian noise.
ODP
---
The unrolled optimization with deep priors (ODP) :cite:`diamond-2018-odp`, implemented as :class:`.ODPNet`, is used to solve inverse problems in imaging by adapting classical iterative methods into an end-to-end framework that incorporates deep networks as well as knowledge of the image formation model.
The framework aims to solve the optimization problem
.. math::
\argmin_{\mb{x}} \; f(A \mb{x}, \mb{y}) + r(\mb{x}) \;,
where :math:`A` represents a linear forward model and :math:`r` a regularization function encoding prior information, by unrolling the iterative solution method into a network where each iteration corresponds to a different stage in the ODP network. Different iterative solutions produce different unrolled optimization algorithms which, in turn, produce different ODP networks. The ones implemented in SCICO are described below.
Proximal Map
^^^^^^^^^^^^
This algorithm corresponds to solving
.. math::
:label: eq:odp_prox
\argmin_{\mb{x}} \; \alpha_k \, f(A \mb{x}, \mb{y}) + \frac{1}{2} \| \mb{x} - \mb{x}^k - \mb{x}^{k+1/2} \|_2^2 \;,
with :math:`k` corresponding to the index of the iteration, which translates to an index of the stage of the network, :math:`f(A \mb{x}, \mb{y})` a fidelity term, usually an :math:`\ell_2` norm, and :math:`\mb{x}^{k+1/2}` a regularization representing :math:`\mathrm{prox}_r (\mb{x}^k)` and usually implemented as a convolutional neural network (CNN). This proximal map representation is used when minimization problem :eq:`eq:odp_prox` can be solved in a computationally efficient manner.
:class:`.ODPProxDnBlock` uses this formulation to solve a denoising problem, which, according to :cite:`diamond-2018-odp`, can be solved by
.. math::
\mb{x}^{k+1} = (\alpha_k \, \mb{y} + \mb{x}^k + \mb{x}^{k+1/2}) \, / \, (\alpha_k + 1) \;,
where :math:`A` corresponds to the identity operator and is therefore omitted, :math:`\mb{y}` is the noisy signal, :math:`\alpha_k > 0` is a learned stage-wise parameter weighting the contribution of the fidelity term and :math:`\mb{x}^k + \mb{x}^{k+1/2}` is the regularization, usually represented by a residual CNN.
:class:`.ODPProxDblrBlock` uses this formulation to solve a deblurring problem, which, according to :cite:`diamond-2018-odp`, can be solved by
.. math::
\mb{x}^{k+1} = \mathcal{F}^{-1} \mathrm{diag} (\alpha_k | \mathcal{F}(K)|^2 + 1 )^{-1} \mathcal{F} \, (\alpha_k K^T * \mb{y} + \mb{x}^k + \mb{x}^{k+1/2}) \;,
where :math:`A` is the blurring operator, :math:`K` is the blurring kernel, :math:`\mb{y}` is the blurred signal, :math:`\mathcal{F}` is the DFT, :math:`\alpha_k > 0` is a learned stage-wise parameter weighting the contribution of the fidelity term and :math:`\mb{x}^k + \mb{x}^{k+1/2}` is the regularization represented by a residual CNN.
Gradient Descent
^^^^^^^^^^^^^^^^
When the solution of the optimization problem in :eq:`eq:odp_prox` can not be simply represented by an analytical step, a formulation based on a gradient descent iteration is preferred. This yields
.. math::
\mb{x}^{k+1} = \mb{x}^k + \mb{x}^{k+1/2} - \alpha_k \, A^T \nabla_x \, f(A \mb{x}^k, \mb{y}) \;,
where :math:`\mb{x}^{k+1/2}` represents :math:`\nabla r(\mb{x}^k)`.
:class:`.ODPGrDescBlock` uses this formulation to solve a generic problem with :math:`\ell_2` fidelity as
.. math::
\mb{x}^{k+1} = \mb{x}^k + \mb{x}^{k+1/2} - \alpha_k \, A^T (A \mb{x} - \mb{y}) \;,
with :math:`\mb{y}` the measured signal and :math:`\mb{x} + \mb{x}^{k+1/2}` a residual CNN.
MoDL
----
The model-based deep learning (MoDL) :cite:`aggarwal-2019-modl`, implemented as :class:`.MoDLNet`, is used to solve inverse problems in imaging also by adapting classical iterative methods into an end-to-end deep learning framework, but, in contrast to ODP, it solves the optimization problem
.. math::
\argmin_{\mb{x}} \; \| A \mb{x} - \mb{y}\|_2^2 + \lambda \, \| \mb{x} - \mathrm{D}_w(\mb{x})\|_2^2 \;,
by directly computing the update
.. math::
\mb{x}^{k+1} = (A^T A + \lambda \, I)^{-1} (A^T \mb{y} + \lambda \, \mb{z}^k) \;,
via conjugate gradient. The regularization :math:`\mb{z}^k = \mathrm{D}_w(\mb{x}^{k})` incorporates prior information, usually in the form of a denoiser model. In this case, the denoiser :math:`\mathrm{D}_w` is shared between all the stages of the network requiring relatively less memory than other unrolling methods. This also allows for deploying a different number of iterations in testing than the ones used in training.
| 0.967163 | 0.983769 |
.. _example_depend:
Example Dependencies
--------------------
Some examples use additional dependencies, which are listed in `examples_requirements.txt <https://github.com/lanl/scico/blob/main/examples/examples_requirements.txt>`_.
The additional requirements should be installed via pip, with the exception of ``astra-toolbox``,
which should be installed via conda:
::
conda install -c astra-toolbox astra-toolbox
pip install -r examples/examples_requirements.txt # Installs other example requirements
The dependencies can also be installed individually as required.
Note that ``astra-toolbox`` should be installed on a host with one or more CUDA GPUs to ensure
that the version with GPU support is installed.
Run Time
--------
Most of these examples have been constructed with sufficiently small test problems to
allow them to run to completion within 5 minutes or less on a reasonable workstation.
Note, however, that it was not feasible to construct meaningful examples of the training
of some of the deep learning algorithms that complete within a relatively short time;
the examples "CT Training and Reconstructions with MoDL" and "CT Training and
Reconstructions with ODP" in particular are much slower, and can require multiple hours
to run on a workstation with multiple GPUs.
|
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/include/examplenotes.rst
|
examplenotes.rst
|
.. _example_depend:
Example Dependencies
--------------------
Some examples use additional dependencies, which are listed in `examples_requirements.txt <https://github.com/lanl/scico/blob/main/examples/examples_requirements.txt>`_.
The additional requirements should be installed via pip, with the exception of ``astra-toolbox``,
which should be installed via conda:
::
conda install -c astra-toolbox astra-toolbox
pip install -r examples/examples_requirements.txt # Installs other example requirements
The dependencies can also be installed individually as required.
Note that ``astra-toolbox`` should be installed on a host with one or more CUDA GPUs to ensure
that the version with GPU support is installed.
Run Time
--------
Most of these examples have been constructed with sufficiently small test problems to
allow them to run to completion within 5 minutes or less on a reasonable workstation.
Note, however, that it was not feasible to construct meaningful examples of the training
of some of the deep learning algorithms that complete within a relatively short time;
the examples "CT Training and Reconstructions with MoDL" and "CT Training and
Reconstructions with ODP" in particular are much slower, and can require multiple hours
to run on a workstation with multiple GPUs.
|
| 0.908194 | 0.367497 |
.. _optimizer:
Optimization Algorithms
=======================
ADMM
----
The Alternating Direction Method of Multipliers (ADMM)
:cite:`glowinski-1975-approximation` :cite:`gabay-1976-dual` is an
algorithm for minimizing problems of the form
.. math::
:label: eq:admm_prob
\argmin_{\mb{x}, \mb{z}} \; f(\mb{x}) + g(\mb{z}) \; \text{such that}
\; \acute{A} \mb{x} + \acute{B} \mb{z} = \mb{c} \;,
where :math:`f` and :math:`g` are convex (but not necessarily smooth)
functionals, :math:`\acute{A}` and :math:`\acute{B}` are linear operators,
and :math:`\mb{c}` is a constant vector. (For a thorough introduction and
overview, see :cite:`boyd-2010-distributed`.)
The SCICO ADMM solver, :class:`.ADMM`, solves problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + \sum_{i=1}^N g_i(C_i \mb{x}) \;,
where :math:`f` and the :math:`g_i` are instances of :class:`.Functional`,
and the :math:`C_i` are :class:`.LinearOperator`, by defining
.. math::
g(\mb{z}) = \sum_{i=1}^N g_i(\mb{z}_i) \qquad \mb{z}_i = C_i \mb{x}
in :eq:`eq:admm_prob`, corresponding to defining
.. math::
\acute{A} = \left( \begin{array}{c} C_0 \\ C_1 \\ C_2 \\
\vdots \end{array} \right) \quad
\acute{B} = \left( \begin{array}{cccc}
-I & 0 & 0 & \ldots \\
0 & -I & 0 & \ldots \\
0 & 0 & -I & \ldots \\
\vdots & \vdots & \vdots & \ddots
\end{array} \right) \quad
\mb{z} = \left( \begin{array}{c} \mb{z}_0 \\ \mb{z}_1 \\ \mb{z}_2 \\
\vdots \end{array} \right) \quad
\mb{c} = \left( \begin{array}{c} 0 \\ 0 \\ 0 \\
\vdots \end{array} \right) \;.
In :class:`.ADMM`, :math:`f` is a :class:`.Functional`, typically a
:class:`.Loss`, corresponding to the forward model of an imaging
problem, and the :math:`g_i` are :class:`.Functional`, typically
corresponding to a regularization term or constraint. Each of the
:math:`g_i` must have a proximal operator defined. It is also possible
to set ``f = None``, which corresponds to defining :math:`f = 0`,
i.e. the zero function.
Subproblem Solvers
^^^^^^^^^^^^^^^^^^
The most computational expensive component of the ADMM iterations is typically
the :math:`\mb{x}`-update,
.. math::
:label: eq:admm_x_step
\argmin_{\mb{x}} \; f(\mb{x}) + \sum_i \frac{\rho_i}{2}
\norm{\mb{z}^{(k)}_i - \mb{u}^{(k)}_i - C_i \mb{x}}_2^2 \;.
The available solvers for this problem are:
* :class:`.admm.GenericSubproblemSolver`
This is the default subproblem solver as it is applicable in all cases. It
it is only suitable for relatively small-scale problems as it makes use of
:func:`.solver.minimize`, which wraps :func:`scipy.optimize.minimize`.
* :class:`.admm.LinearSubproblemSolver`
This subproblem solver can be used when :math:`f` takes the form
:math:`\norm{\mb{A} \mb{x} - \mb{y}}^2_W`. It makes use of the conjugate
gradient method, and is significantly more efficient than
:class:`.admm.GenericSubproblemSolver` when it can be used.
* :class:`.admm.MatrixSubproblemSolver`
This subproblem solver can be used when :math:`f` takes the form
:math:`\norm{\mb{A} \mb{x} - \mb{y}}^2_W`, and :math:`A` and all of the
:math:`C_i` are diagonal (:class:`.Diagonal`) or matrix operators
(:class:`MatrixOperator`). It exploits a pre-computed matrix factorization
for a significantly more efficient solution than conjugate gradient.
* :class:`.admm.CircularConvolveSolver`
This subproblem solver can be used when :math:`f` takes the form
:math:`\norm{\mb{A} \mb{x} - \mb{y}}^2_W` and :math:`\mb{A}` and all
the :math:`C_i` s are circulant (i.e., diagonalized by the DFT).
* :class:`.admm.FBlockCircularConvolveSolver` and :class:`.admm.G0BlockCircularConvolveSolver`
These subproblem solvers can be used when the primary linear operator
is block-circulant (i.e. an operator with blocks that are diagonalied
by the DFT).
For more details of these solvers and how to specify them, see the API
reference page for :mod:`scico.optimize.admm`.
Proximal ADMM
-------------
Proximal ADMM :cite:`deng-2015-global` is an algorithm for solving
problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(\mb{z}) \;
\text{such that}\; A \mb{x} + B \mb{z} = \mb{c} \;,
where :math:`f` and :math:`g` are are convex (but not necessarily
smooth) functionals and :math:`A` and :math:`B` are linear
operators. Although convergence per iteration is typically somewhat
worse than that of ADMM, the iterations can be much cheaper than that
of ADMM, giving Proximal ADMM competitive time convergence
performance.
The SCICO Proximal ADMM solver, :class:`.ProximalADMM`, requires
:math:`f` and :math:`g` to be instances of :class:`.Functional`, and
to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`A` and :math:`B` are required to be an instance of
:class:`.LinearOperator`.
Non-Linear Proximal ADMM
------------------------
Non-Linear Proximal ADMM :cite:`benning-2016-preconditioned` is an
algorithm for solving problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(\mb{z}) \;
\text{such that}\; H(\mb{x}, \mb{z}) = 0 \;,
where :math:`f` and :math:`g` are are convex (but not necessarily
smooth) functionals and :math:`H` is a function of two vector variables.
The SCICO Non-Linear Proximal ADMM solver, :class:`.NonLinearPADMM`, requires
:math:`f` and :math:`g` to be instances of :class:`.Functional`, and
to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`H` is required to be an instance of :class:`.Function`.
Linearized ADMM
---------------
Linearized ADMM :cite:`yang-2012-linearized`
:cite:`parikh-2014-proximal` (Sec. 4.4.2) is an algorithm for solving
problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(C \mb{x}) \;,
where :math:`f` and :math:`g` are are convex (but not necessarily
smooth) functionals. Although convergence per iteration is typically
significantly worse than that of ADMM, the :math:`\mb{x}`-update, can
be much cheaper than that of ADMM, giving Linearized ADMM competitive
time convergence performance.
The SCICO Linearized ADMM solver, :class:`.LinearizedADMM`,
requires :math:`f` and :math:`g` to be instances of :class:`.Functional`,
and to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`C` is required to be an instance of :class:`.LinearOperator`.
PDHG
----
The Primal–Dual Hybrid Gradient (PDHG) algorithm
:cite:`esser-2010-general` :cite:`chambolle-2010-firstorder`
:cite:`pock-2011-diagonal` solves problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(C \mb{x}) \;,
where :math:`f` and :math:`g` are are convex (but not necessarily smooth)
functionals. The algorithm has similar advantages over ADMM to those of Linearized ADMM, but typically exhibits better convergence properties.
The SCICO PDHG solver, :class:`.PDHG`,
requires :math:`f` and :math:`g` to be instances of :class:`.Functional`,
and to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`C` is required to be an instance of :class:`.Operator` or :class:`.LinearOperator`.
PGM
---
The Proximal Gradient Method (PGM) :cite:`daubechies-2004-iterative`
:cite:`beck-2010-gradient` and Accelerated Proximal Gradient Method
(AcceleratedPGM) :cite:`beck-2009-fast` are algorithms for minimizing
problems of the form
.. math::
\argmin_{\mb{x}} f(\mb{x}) + g(\mb{x}) \;,
where :math:`g` is convex and :math:`f` is smooth and convex. The
corresponding SCICO solvers are :class:`.PGM` and :class:`.AcceleratedPGM`
respectively. In most cases :class:`.AcceleratedPGM` is expected to provide
faster convergence. In both of these classes, :math:`f` and :math:`g` are
both of type :class:`.Functional`, where :math:`f` must be differentiable,
and :math:`g` must have a proximal operator defined.
While ADMM provides significantly more flexibility than PGM, and often
converges faster, the latter is preferred when solving the ADMM
:math:`\mb{x}`-step is very computationally expensive, such as in the case of
:math:`f(\mb{x}) = \norm{\mb{A} \mb{x} - \mb{y}}^2_W` where :math:`A` is
large and does not have any special structure that would allow an efficient
solution of :eq:`eq:admm_x_step`.
Step Size Options
^^^^^^^^^^^^^^^^^
The step size (usually referred to in terms of its reciprocal,
:math:`L`) for the gradient descent in :class:`PGM` can be adapted via
Barzilai-Borwein methods (also called spectral methods) and iterative
line search methods.
The available step size policy classes are:
* :class:`.BBStepSize`
This implements the step size adaptation based on the Barzilai-Borwein
method :cite:`barzilai-1988-stepsize`. The step size :math:`\alpha` is
estimated as
.. math::
\mb{\Delta x} = \mb{x}_k - \mb{x}_{k-1} \; \\
\mb{\Delta g} = \nabla f(\mb{x}_k) - \nabla f (\mb{x}_{k-1}) \; \\
\alpha = \frac{\mb{\Delta x}^T \mb{\Delta g}}{\mb{\Delta g}^T
\mb{\Delta g}} \;.
Since the PGM solver uses the reciprocal of the step size, the value
:math:`L = 1 / \alpha` is returned.
* :class:`.AdaptiveBBStepSize`
This implements the adaptive Barzilai-Borwein method as introduced in
:cite:`zhou-2006-adaptive`. The adaptive step size rule computes
.. math::
\mb{\Delta x} = \mb{x}_k - \mb{x}_{k-1} \; \\
\mb{\Delta g} = \nabla f(\mb{x}_k) - \nabla f (\mb{x}_{k-1}) \; \\
\alpha^{\mathrm{BB1}} = \frac{\mb{\Delta x}^T \mb{\Delta x}}
{\mb{\Delta x}^T \mb{\Delta g}} \; \\
\alpha^{\mathrm{BB2}} = \frac{\mb{\Delta x}^T \mb{\Delta g}}
{\mb{\Delta g}^T \mb{\Delta g}} \;.
The determination of the new step size is made via the rule
.. math::
\alpha = \left\{ \begin{array}{ll} \alpha^{\mathrm{BB2}} &
\mathrm{~if~} \alpha^{\mathrm{BB2}} / \alpha^{\mathrm{BB1}}
< \kappa \; \\
\alpha^{\mathrm{BB1}} & \mathrm{~otherwise} \end{array}
\right . \;,
with :math:`\kappa \in (0, 1)`.
Since the PGM solver uses the reciprocal of the step size, the value
:math:`L = 1 / \alpha` is returned.
* :class:`.LineSearchStepSize`
This implements the line search strategy described in :cite:`beck-2009-fast`.
This strategy estimates :math:`L` such that
:math:`f(\mb{x}) \leq \hat{f}_{L}(\mb{x})` is satisfied with
:math:`\hat{f}_{L}` a quadratic approximation to :math:`f` defined as
.. math::
\hat{f}_{L}(\mb{x}, \mb{y}) = f(\mb{y}) + \nabla f(\mb{y})^H
(\mb{x} - \mb{y}) + \frac{L}{2} \left\| \mb{x} - \mb{y}
\right\|_2^2 \;,
with :math:`\mb{x}` the potential new update and :math:`\mb{y}` the
current solution or current extrapolation (if using :class:`.AcceleratedPGM`).
* :class:`.RobustLineSearchStepSize`
This implements the robust line search strategy described in
:cite:`florea-2017-robust`. This strategy estimates :math:`L` such that
:math:`f(\mb{x}) \leq \hat{f}_{L}(\mb{x})` is satisfied with
:math:`\hat{f}_{L}` a quadratic approximation to :math:`f` defined as
.. math::
\hat{f}_{L}(\mb{x}, \mb{y}) = f(\mb{y}) + \nabla f(\mb{y})^H
(\mb{x} - \mb{y}) + \frac{L}{2} \left\| \mb{x} - \mb{y} \right\|_2^2 \;,
with :math:`\mb{x}` the potential new update and :math:`\mb{y}` the
auxiliary extrapolation state. Note that this should only be used
with :class:`.AcceleratedPGM`.
For more details of these step size managers and how to specify them, see
the API reference page for :mod:`scico.optimize.pgm`.
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/docs/source/include/optimizer.rst
|
optimizer.rst
|
.. _optimizer:
Optimization Algorithms
=======================
ADMM
----
The Alternating Direction Method of Multipliers (ADMM)
:cite:`glowinski-1975-approximation` :cite:`gabay-1976-dual` is an
algorithm for minimizing problems of the form
.. math::
:label: eq:admm_prob
\argmin_{\mb{x}, \mb{z}} \; f(\mb{x}) + g(\mb{z}) \; \text{such that}
\; \acute{A} \mb{x} + \acute{B} \mb{z} = \mb{c} \;,
where :math:`f` and :math:`g` are convex (but not necessarily smooth)
functionals, :math:`\acute{A}` and :math:`\acute{B}` are linear operators,
and :math:`\mb{c}` is a constant vector. (For a thorough introduction and
overview, see :cite:`boyd-2010-distributed`.)
The SCICO ADMM solver, :class:`.ADMM`, solves problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + \sum_{i=1}^N g_i(C_i \mb{x}) \;,
where :math:`f` and the :math:`g_i` are instances of :class:`.Functional`,
and the :math:`C_i` are :class:`.LinearOperator`, by defining
.. math::
g(\mb{z}) = \sum_{i=1}^N g_i(\mb{z}_i) \qquad \mb{z}_i = C_i \mb{x}
in :eq:`eq:admm_prob`, corresponding to defining
.. math::
\acute{A} = \left( \begin{array}{c} C_0 \\ C_1 \\ C_2 \\
\vdots \end{array} \right) \quad
\acute{B} = \left( \begin{array}{cccc}
-I & 0 & 0 & \ldots \\
0 & -I & 0 & \ldots \\
0 & 0 & -I & \ldots \\
\vdots & \vdots & \vdots & \ddots
\end{array} \right) \quad
\mb{z} = \left( \begin{array}{c} \mb{z}_0 \\ \mb{z}_1 \\ \mb{z}_2 \\
\vdots \end{array} \right) \quad
\mb{c} = \left( \begin{array}{c} 0 \\ 0 \\ 0 \\
\vdots \end{array} \right) \;.
In :class:`.ADMM`, :math:`f` is a :class:`.Functional`, typically a
:class:`.Loss`, corresponding to the forward model of an imaging
problem, and the :math:`g_i` are :class:`.Functional`, typically
corresponding to a regularization term or constraint. Each of the
:math:`g_i` must have a proximal operator defined. It is also possible
to set ``f = None``, which corresponds to defining :math:`f = 0`,
i.e. the zero function.
Subproblem Solvers
^^^^^^^^^^^^^^^^^^
The most computational expensive component of the ADMM iterations is typically
the :math:`\mb{x}`-update,
.. math::
:label: eq:admm_x_step
\argmin_{\mb{x}} \; f(\mb{x}) + \sum_i \frac{\rho_i}{2}
\norm{\mb{z}^{(k)}_i - \mb{u}^{(k)}_i - C_i \mb{x}}_2^2 \;.
The available solvers for this problem are:
* :class:`.admm.GenericSubproblemSolver`
This is the default subproblem solver as it is applicable in all cases. It
it is only suitable for relatively small-scale problems as it makes use of
:func:`.solver.minimize`, which wraps :func:`scipy.optimize.minimize`.
* :class:`.admm.LinearSubproblemSolver`
This subproblem solver can be used when :math:`f` takes the form
:math:`\norm{\mb{A} \mb{x} - \mb{y}}^2_W`. It makes use of the conjugate
gradient method, and is significantly more efficient than
:class:`.admm.GenericSubproblemSolver` when it can be used.
* :class:`.admm.MatrixSubproblemSolver`
This subproblem solver can be used when :math:`f` takes the form
:math:`\norm{\mb{A} \mb{x} - \mb{y}}^2_W`, and :math:`A` and all of the
:math:`C_i` are diagonal (:class:`.Diagonal`) or matrix operators
(:class:`MatrixOperator`). It exploits a pre-computed matrix factorization
for a significantly more efficient solution than conjugate gradient.
* :class:`.admm.CircularConvolveSolver`
This subproblem solver can be used when :math:`f` takes the form
:math:`\norm{\mb{A} \mb{x} - \mb{y}}^2_W` and :math:`\mb{A}` and all
the :math:`C_i` s are circulant (i.e., diagonalized by the DFT).
* :class:`.admm.FBlockCircularConvolveSolver` and :class:`.admm.G0BlockCircularConvolveSolver`
These subproblem solvers can be used when the primary linear operator
is block-circulant (i.e. an operator with blocks that are diagonalied
by the DFT).
For more details of these solvers and how to specify them, see the API
reference page for :mod:`scico.optimize.admm`.
Proximal ADMM
-------------
Proximal ADMM :cite:`deng-2015-global` is an algorithm for solving
problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(\mb{z}) \;
\text{such that}\; A \mb{x} + B \mb{z} = \mb{c} \;,
where :math:`f` and :math:`g` are are convex (but not necessarily
smooth) functionals and :math:`A` and :math:`B` are linear
operators. Although convergence per iteration is typically somewhat
worse than that of ADMM, the iterations can be much cheaper than that
of ADMM, giving Proximal ADMM competitive time convergence
performance.
The SCICO Proximal ADMM solver, :class:`.ProximalADMM`, requires
:math:`f` and :math:`g` to be instances of :class:`.Functional`, and
to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`A` and :math:`B` are required to be an instance of
:class:`.LinearOperator`.
Non-Linear Proximal ADMM
------------------------
Non-Linear Proximal ADMM :cite:`benning-2016-preconditioned` is an
algorithm for solving problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(\mb{z}) \;
\text{such that}\; H(\mb{x}, \mb{z}) = 0 \;,
where :math:`f` and :math:`g` are are convex (but not necessarily
smooth) functionals and :math:`H` is a function of two vector variables.
The SCICO Non-Linear Proximal ADMM solver, :class:`.NonLinearPADMM`, requires
:math:`f` and :math:`g` to be instances of :class:`.Functional`, and
to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`H` is required to be an instance of :class:`.Function`.
Linearized ADMM
---------------
Linearized ADMM :cite:`yang-2012-linearized`
:cite:`parikh-2014-proximal` (Sec. 4.4.2) is an algorithm for solving
problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(C \mb{x}) \;,
where :math:`f` and :math:`g` are are convex (but not necessarily
smooth) functionals. Although convergence per iteration is typically
significantly worse than that of ADMM, the :math:`\mb{x}`-update, can
be much cheaper than that of ADMM, giving Linearized ADMM competitive
time convergence performance.
The SCICO Linearized ADMM solver, :class:`.LinearizedADMM`,
requires :math:`f` and :math:`g` to be instances of :class:`.Functional`,
and to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`C` is required to be an instance of :class:`.LinearOperator`.
PDHG
----
The Primal–Dual Hybrid Gradient (PDHG) algorithm
:cite:`esser-2010-general` :cite:`chambolle-2010-firstorder`
:cite:`pock-2011-diagonal` solves problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(C \mb{x}) \;,
where :math:`f` and :math:`g` are are convex (but not necessarily smooth)
functionals. The algorithm has similar advantages over ADMM to those of Linearized ADMM, but typically exhibits better convergence properties.
The SCICO PDHG solver, :class:`.PDHG`,
requires :math:`f` and :math:`g` to be instances of :class:`.Functional`,
and to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`C` is required to be an instance of :class:`.Operator` or :class:`.LinearOperator`.
PGM
---
The Proximal Gradient Method (PGM) :cite:`daubechies-2004-iterative`
:cite:`beck-2010-gradient` and Accelerated Proximal Gradient Method
(AcceleratedPGM) :cite:`beck-2009-fast` are algorithms for minimizing
problems of the form
.. math::
\argmin_{\mb{x}} f(\mb{x}) + g(\mb{x}) \;,
where :math:`g` is convex and :math:`f` is smooth and convex. The
corresponding SCICO solvers are :class:`.PGM` and :class:`.AcceleratedPGM`
respectively. In most cases :class:`.AcceleratedPGM` is expected to provide
faster convergence. In both of these classes, :math:`f` and :math:`g` are
both of type :class:`.Functional`, where :math:`f` must be differentiable,
and :math:`g` must have a proximal operator defined.
While ADMM provides significantly more flexibility than PGM, and often
converges faster, the latter is preferred when solving the ADMM
:math:`\mb{x}`-step is very computationally expensive, such as in the case of
:math:`f(\mb{x}) = \norm{\mb{A} \mb{x} - \mb{y}}^2_W` where :math:`A` is
large and does not have any special structure that would allow an efficient
solution of :eq:`eq:admm_x_step`.
Step Size Options
^^^^^^^^^^^^^^^^^
The step size (usually referred to in terms of its reciprocal,
:math:`L`) for the gradient descent in :class:`PGM` can be adapted via
Barzilai-Borwein methods (also called spectral methods) and iterative
line search methods.
The available step size policy classes are:
* :class:`.BBStepSize`
This implements the step size adaptation based on the Barzilai-Borwein
method :cite:`barzilai-1988-stepsize`. The step size :math:`\alpha` is
estimated as
.. math::
\mb{\Delta x} = \mb{x}_k - \mb{x}_{k-1} \; \\
\mb{\Delta g} = \nabla f(\mb{x}_k) - \nabla f (\mb{x}_{k-1}) \; \\
\alpha = \frac{\mb{\Delta x}^T \mb{\Delta g}}{\mb{\Delta g}^T
\mb{\Delta g}} \;.
Since the PGM solver uses the reciprocal of the step size, the value
:math:`L = 1 / \alpha` is returned.
* :class:`.AdaptiveBBStepSize`
This implements the adaptive Barzilai-Borwein method as introduced in
:cite:`zhou-2006-adaptive`. The adaptive step size rule computes
.. math::
\mb{\Delta x} = \mb{x}_k - \mb{x}_{k-1} \; \\
\mb{\Delta g} = \nabla f(\mb{x}_k) - \nabla f (\mb{x}_{k-1}) \; \\
\alpha^{\mathrm{BB1}} = \frac{\mb{\Delta x}^T \mb{\Delta x}}
{\mb{\Delta x}^T \mb{\Delta g}} \; \\
\alpha^{\mathrm{BB2}} = \frac{\mb{\Delta x}^T \mb{\Delta g}}
{\mb{\Delta g}^T \mb{\Delta g}} \;.
The determination of the new step size is made via the rule
.. math::
\alpha = \left\{ \begin{array}{ll} \alpha^{\mathrm{BB2}} &
\mathrm{~if~} \alpha^{\mathrm{BB2}} / \alpha^{\mathrm{BB1}}
< \kappa \; \\
\alpha^{\mathrm{BB1}} & \mathrm{~otherwise} \end{array}
\right . \;,
with :math:`\kappa \in (0, 1)`.
Since the PGM solver uses the reciprocal of the step size, the value
:math:`L = 1 / \alpha` is returned.
* :class:`.LineSearchStepSize`
This implements the line search strategy described in :cite:`beck-2009-fast`.
This strategy estimates :math:`L` such that
:math:`f(\mb{x}) \leq \hat{f}_{L}(\mb{x})` is satisfied with
:math:`\hat{f}_{L}` a quadratic approximation to :math:`f` defined as
.. math::
\hat{f}_{L}(\mb{x}, \mb{y}) = f(\mb{y}) + \nabla f(\mb{y})^H
(\mb{x} - \mb{y}) + \frac{L}{2} \left\| \mb{x} - \mb{y}
\right\|_2^2 \;,
with :math:`\mb{x}` the potential new update and :math:`\mb{y}` the
current solution or current extrapolation (if using :class:`.AcceleratedPGM`).
* :class:`.RobustLineSearchStepSize`
This implements the robust line search strategy described in
:cite:`florea-2017-robust`. This strategy estimates :math:`L` such that
:math:`f(\mb{x}) \leq \hat{f}_{L}(\mb{x})` is satisfied with
:math:`\hat{f}_{L}` a quadratic approximation to :math:`f` defined as
.. math::
\hat{f}_{L}(\mb{x}, \mb{y}) = f(\mb{y}) + \nabla f(\mb{y})^H
(\mb{x} - \mb{y}) + \frac{L}{2} \left\| \mb{x} - \mb{y} \right\|_2^2 \;,
with :math:`\mb{x}` the potential new update and :math:`\mb{y}` the
auxiliary extrapolation state. Note that this should only be used
with :class:`.AcceleratedPGM`.
For more details of these step size managers and how to specify them, see
the API reference page for :mod:`scico.optimize.pgm`.
| 0.929568 | 0.807726 |
# Construct an index README file and a docs example index file from
# source index file "scripts/index.rst".
# Run as
# python makeindex.py
import re
from pathlib import Path
import nbformat as nbf
import py2jn
import pypandoc
src = "scripts/index.rst"
# Make dict mapping script names to docstring header titles
titles = {}
scripts = list(Path("scripts").glob("*py"))
for s in scripts:
prevline = None
with open(s, "r") as sfile:
for line in sfile:
if line[0:3] == "===":
titles[s.name] = prevline.rstrip()
break
else:
prevline = line
# Build README in scripts directory
dst = "scripts/README.rst"
with open(dst, "w") as dstfile:
with open(src, "r") as srcfile:
for line in srcfile:
# Detect lines containing script filenames
m = re.match(r"(\s+)- ([^\s]+.py)", line)
if m:
prespace = m.group(1)
name = m.group(2)
title = titles[name]
print(
"%s`%s <%s>`_\n%s %s" % (prespace, name, name, prespace, title), file=dstfile
)
else:
print(line, end="", file=dstfile)
# Build notebooks index file in notebooks directory
dst = "notebooks/index.ipynb"
rst_text = ""
with open(src, "r") as srcfile:
for line in srcfile:
# Detect lines containing script filenames
m = re.match(r"(\s+)- ([^\s]+).py", line)
if m:
prespace = m.group(1)
name = m.group(2)
title = titles[name + ".py"]
rst_text += "%s- `%s <%s.ipynb>`_\n" % (prespace, title, name)
else:
rst_text += line
# Convert text from rst to markdown
md_format = "markdown_github+tex_math_dollars+fenced_code_attributes"
md_text = pypandoc.convert_text(rst_text, md_format, format="rst", extra_args=["--atx-headers"])
md_text = '"""' + md_text + '"""'
# Convert from python to notebook format and write notebook
nb = py2jn.py_string_to_notebook(md_text)
py2jn.tools.write_notebook(nb, dst, nbver=4)
nb = nbf.read(dst, nbf.NO_CONVERT)
nb.metadata = {"nbsphinx": {"orphan": True}}
nbf.write(nb, dst)
# Build examples index for docs
dst = "../docs/source/examples.rst"
prfx = "examples/"
with open(dst, "w") as dstfile:
print(".. _example_notebooks:\n", file=dstfile)
with open(src, "r") as srcfile:
for line in srcfile:
# Add toctree and include statements after main heading
if line[0:3] == "===":
print(line, end="", file=dstfile)
print("\n.. toctree::\n :maxdepth: 1", file=dstfile)
print("\n.. include:: include/examplenotes.rst", file=dstfile)
continue
# Detect lines containing script filenames
m = re.match(r"(\s+)- ([^\s]+).py", line)
if m:
print(" " + prfx + m.group(2), file=dstfile)
else:
print(line, end="", file=dstfile)
# Add toctree statement after section headings
if line[0:3] == line[0] * 3 and line[0] in ["=", "-", "^"]:
print("\n.. toctree::\n :maxdepth: 1", file=dstfile)
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/makeindex.py
|
makeindex.py
|
# Construct an index README file and a docs example index file from
# source index file "scripts/index.rst".
# Run as
# python makeindex.py
import re
from pathlib import Path
import nbformat as nbf
import py2jn
import pypandoc
src = "scripts/index.rst"
# Make dict mapping script names to docstring header titles
titles = {}
scripts = list(Path("scripts").glob("*py"))
for s in scripts:
prevline = None
with open(s, "r") as sfile:
for line in sfile:
if line[0:3] == "===":
titles[s.name] = prevline.rstrip()
break
else:
prevline = line
# Build README in scripts directory
dst = "scripts/README.rst"
with open(dst, "w") as dstfile:
with open(src, "r") as srcfile:
for line in srcfile:
# Detect lines containing script filenames
m = re.match(r"(\s+)- ([^\s]+.py)", line)
if m:
prespace = m.group(1)
name = m.group(2)
title = titles[name]
print(
"%s`%s <%s>`_\n%s %s" % (prespace, name, name, prespace, title), file=dstfile
)
else:
print(line, end="", file=dstfile)
# Build notebooks index file in notebooks directory
dst = "notebooks/index.ipynb"
rst_text = ""
with open(src, "r") as srcfile:
for line in srcfile:
# Detect lines containing script filenames
m = re.match(r"(\s+)- ([^\s]+).py", line)
if m:
prespace = m.group(1)
name = m.group(2)
title = titles[name + ".py"]
rst_text += "%s- `%s <%s.ipynb>`_\n" % (prespace, title, name)
else:
rst_text += line
# Convert text from rst to markdown
md_format = "markdown_github+tex_math_dollars+fenced_code_attributes"
md_text = pypandoc.convert_text(rst_text, md_format, format="rst", extra_args=["--atx-headers"])
md_text = '"""' + md_text + '"""'
# Convert from python to notebook format and write notebook
nb = py2jn.py_string_to_notebook(md_text)
py2jn.tools.write_notebook(nb, dst, nbver=4)
nb = nbf.read(dst, nbf.NO_CONVERT)
nb.metadata = {"nbsphinx": {"orphan": True}}
nbf.write(nb, dst)
# Build examples index for docs
dst = "../docs/source/examples.rst"
prfx = "examples/"
with open(dst, "w") as dstfile:
print(".. _example_notebooks:\n", file=dstfile)
with open(src, "r") as srcfile:
for line in srcfile:
# Add toctree and include statements after main heading
if line[0:3] == "===":
print(line, end="", file=dstfile)
print("\n.. toctree::\n :maxdepth: 1", file=dstfile)
print("\n.. include:: include/examplenotes.rst", file=dstfile)
continue
# Detect lines containing script filenames
m = re.match(r"(\s+)- ([^\s]+).py", line)
if m:
print(" " + prfx + m.group(2), file=dstfile)
else:
print(line, end="", file=dstfile)
# Add toctree statement after section headings
if line[0:3] == line[0] * 3 and line[0] in ["=", "-", "^"]:
print("\n.. toctree::\n :maxdepth: 1", file=dstfile)
| 0.462716 | 0.352536 |
import jax
import scico
import scico.numpy as snp
import scico.random
from scico import denoiser, functional, linop, loss, metric, plot
from scico.data import kodim23
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.solver import cg
from scico.util import device_info
"""
Define downsampling function.
"""
def downsample_image(img, rate):
img = snp.mean(snp.reshape(img, (-1, rate, img.shape[1], img.shape[2])), axis=1)
img = snp.mean(snp.reshape(img, (img.shape[0], -1, rate, img.shape[2])), axis=2)
return img
"""
Read a ground truth image.
"""
img = kodim23(asfloat=True)[160:416, 60:316]
img = jax.device_put(img)
"""
Create a test image by downsampling and adding Gaussian white noise.
"""
rate = 4 # downsampling rate
σ = 2e-2 # noise standard deviation
Afn = lambda x: downsample_image(x, rate=rate)
s = Afn(img)
input_shape = img.shape
output_shape = s.shape
noise, key = scico.random.randn(s.shape, seed=0)
sn = s + σ * noise
"""
Set up the PPP problem pseudo-functional. The DnCNN denoiser
:cite:`zhang-2017-dncnn` is used as a regularizer.
"""
A = linop.LinearOperator(input_shape=input_shape, output_shape=output_shape, eval_fn=Afn)
f = loss.SquaredL2Loss(y=sn, A=A)
C = linop.Identity(input_shape=input_shape)
g = functional.DnCNN("17M")
"""
Compute a baseline solution via denoising of the pseudo-inverse of the
forward operator. This baseline solution is also used to initialize the
PPP solver.
"""
xpinv, info = cg(A.T @ A, A.T @ sn, snp.zeros(input_shape))
dncnn = denoiser.DnCNN("17M")
xden = dncnn(xpinv)
"""
Set up an ADMM solver and solve.
"""
ρ = 3.4e-2 # ADMM penalty parameter
maxiter = 12 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=xden,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 10}),
itstat_options={"display": True},
)
print(f"Solving on {device_info()}\n")
xppp = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
"""
Show reference and test images.
"""
fig = plot.figure(figsize=(8, 6))
ax0 = plot.plt.subplot2grid((1, rate + 1), (0, 0), colspan=rate)
plot.imview(img, title="Reference", fig=fig, ax=ax0)
ax1 = plot.plt.subplot2grid((1, rate + 1), (0, rate))
plot.imview(sn, title="Downsampled", fig=fig, ax=ax1)
fig.show()
"""
Show recovered full-resolution images.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=True, figsize=(21, 7))
plot.imview(xpinv, title="Pseudo-inverse: %.2f (dB)" % metric.psnr(img, xpinv), fig=fig, ax=ax[0])
plot.imview(
xden, title="Denoised pseudo-inverse: %.2f (dB)" % metric.psnr(img, xden), fig=fig, ax=ax[1]
)
plot.imview(xppp, title="PPP solution: %.2f (dB)" % metric.psnr(img, xppp), fig=fig, ax=ax[2])
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/superres_ppp_dncnn_admm.py
|
superres_ppp_dncnn_admm.py
|
import jax
import scico
import scico.numpy as snp
import scico.random
from scico import denoiser, functional, linop, loss, metric, plot
from scico.data import kodim23
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.solver import cg
from scico.util import device_info
"""
Define downsampling function.
"""
def downsample_image(img, rate):
img = snp.mean(snp.reshape(img, (-1, rate, img.shape[1], img.shape[2])), axis=1)
img = snp.mean(snp.reshape(img, (img.shape[0], -1, rate, img.shape[2])), axis=2)
return img
"""
Read a ground truth image.
"""
img = kodim23(asfloat=True)[160:416, 60:316]
img = jax.device_put(img)
"""
Create a test image by downsampling and adding Gaussian white noise.
"""
rate = 4 # downsampling rate
σ = 2e-2 # noise standard deviation
Afn = lambda x: downsample_image(x, rate=rate)
s = Afn(img)
input_shape = img.shape
output_shape = s.shape
noise, key = scico.random.randn(s.shape, seed=0)
sn = s + σ * noise
"""
Set up the PPP problem pseudo-functional. The DnCNN denoiser
:cite:`zhang-2017-dncnn` is used as a regularizer.
"""
A = linop.LinearOperator(input_shape=input_shape, output_shape=output_shape, eval_fn=Afn)
f = loss.SquaredL2Loss(y=sn, A=A)
C = linop.Identity(input_shape=input_shape)
g = functional.DnCNN("17M")
"""
Compute a baseline solution via denoising of the pseudo-inverse of the
forward operator. This baseline solution is also used to initialize the
PPP solver.
"""
xpinv, info = cg(A.T @ A, A.T @ sn, snp.zeros(input_shape))
dncnn = denoiser.DnCNN("17M")
xden = dncnn(xpinv)
"""
Set up an ADMM solver and solve.
"""
ρ = 3.4e-2 # ADMM penalty parameter
maxiter = 12 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=xden,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 10}),
itstat_options={"display": True},
)
print(f"Solving on {device_info()}\n")
xppp = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
"""
Show reference and test images.
"""
fig = plot.figure(figsize=(8, 6))
ax0 = plot.plt.subplot2grid((1, rate + 1), (0, 0), colspan=rate)
plot.imview(img, title="Reference", fig=fig, ax=ax0)
ax1 = plot.plt.subplot2grid((1, rate + 1), (0, rate))
plot.imview(sn, title="Downsampled", fig=fig, ax=ax1)
fig.show()
"""
Show recovered full-resolution images.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=True, figsize=(21, 7))
plot.imview(xpinv, title="Pseudo-inverse: %.2f (dB)" % metric.psnr(img, xpinv), fig=fig, ax=ax[0])
plot.imview(
xden, title="Denoised pseudo-inverse: %.2f (dB)" % metric.psnr(img, xden), fig=fig, ax=ax[1]
)
plot.imview(xppp, title="PPP solution: %.2f (dB)" % metric.psnr(img, xppp), fig=fig, ax=ax[2])
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.79909 | 0.526525 |
import os
from time import time
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import flax as sflax
from scico import metric, plot
from scico.flax.examples import load_ct_data
"""
Prepare parallel processing. Set an arbitrary processor count (only
applies if GPU is not available).
"""
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8"
platform = jax.lib.xla_bridge.get_backend().platform
print("Platform: ", platform)
"""
Read data from cache or generate if not available.
"""
N = 256 # phantom size
train_nimg = 536 # number of training images
test_nimg = 64 # number of testing images
nimg = train_nimg + test_nimg
n_projection = 45 # CT views
trdt, ttdt = load_ct_data(train_nimg, test_nimg, N, n_projection, verbose=True)
"""
Build training and testing structures. Inputs are the filter
back-projected sinograms and outpus are the original generated foams.
Keep training and testing partitions.
"""
train_ds = {"image": trdt["fbp"], "label": trdt["img"]}
test_ds = {"image": ttdt["fbp"], "label": ttdt["img"]}
"""
Define configuration dictionary for model and training loop.
Parameters have been selected for demonstration purposes and relatively
short training. The model depth controls the levels of pooling in the
U-Net model. The block depth controls the number of layers at each level
of depth. The number of filters controls the number of filters at the
input and output levels and doubles (halves) at each pooling (unpooling)
operation. Better performance may be obtained by increasing depth, block
depth, number of filters or training epochs, but may require longer
training times.
"""
# model configuration
model_conf = {
"depth": 2,
"num_filters": 64,
"block_depth": 2,
}
# training configuration
train_conf: sflax.ConfigDict = {
"seed": 0,
"opt_type": "SGD",
"momentum": 0.9,
"batch_size": 16,
"num_epochs": 200,
"base_learning_rate": 1e-2,
"warmup_epochs": 0,
"log_every_steps": 1000,
"log": True,
}
"""
Construct UNet model.
"""
channels = train_ds["image"].shape[-1]
model = sflax.UNet(
depth=model_conf["depth"],
channels=channels,
num_filters=model_conf["num_filters"],
block_depth=model_conf["block_depth"],
)
"""
Run training loop.
"""
workdir = os.path.join(os.path.expanduser("~"), ".cache", "scico", "examples", "unet_ct_out")
train_conf["workdir"] = workdir
print(f"{'JAX process: '}{jax.process_index()}{' / '}{jax.process_count()}")
print(f"{'JAX local devices: '}{jax.local_devices()}")
# Construct training object
trainer = sflax.BasicFlaxTrainer(
train_conf,
model,
train_ds,
test_ds,
)
start_time = time()
modvar, stats_object = trainer.train()
time_train = time() - start_time
"""
Evaluate on testing data.
"""
start_time = time()
fmap = sflax.FlaxMap(model, modvar)
output = fmap(test_ds["image"])
time_eval = time() - start_time
output = jax.numpy.clip(output, a_min=0, a_max=1.0)
"""
Compare trained model in terms of reconstruction time and data fidelity.
"""
snr_eval = metric.snr(test_ds["label"], output)
psnr_eval = metric.psnr(test_ds["label"], output)
print(
f"{'UNet training':15s}{'epochs:':2s}{train_conf['num_epochs']:>5d}"
f"{'':21s}{'time[s]:':10s}{time_train:>7.2f}"
)
print(
f"{'UNet testing':15s}{'SNR:':5s}{snr_eval:>5.2f}{' dB'}{'':3s}"
f"{'PSNR:':6s}{psnr_eval:>5.2f}{' dB'}{'':3s}{'time[s]:':10s}{time_eval:>7.2f}"
)
"""
Plot comparison.
"""
key = jax.random.PRNGKey(123)
indx = jax.random.randint(key, shape=(1,), minval=0, maxval=test_nimg)[0]
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(test_ds["label"][indx, ..., 0], title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
test_ds["image"][indx, ..., 0],
title="FBP Reconstruction: \nSNR: %.2f (dB), MAE: %.3f"
% (
metric.snr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
metric.mae(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
output[indx, ..., 0],
title="UNet Reconstruction\nSNR: %.2f (dB), MAE: %.3f"
% (
metric.snr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
metric.mae(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
"""
Plot convergence statistics. Statistics only generated if a training
cycle was done (i.e. not reading final epoch results from checkpoint).
"""
if stats_object is not None:
hist = stats_object.history(transpose=True)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
jax.numpy.vstack((hist.Train_Loss, hist.Eval_Loss)).T,
x=hist.Epoch,
ptyp="semilogy",
title="Loss function",
xlbl="Epoch",
ylbl="Loss value",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[0],
)
plot.plot(
jax.numpy.vstack((hist.Train_SNR, hist.Eval_SNR)).T,
x=hist.Epoch,
title="Metric",
xlbl="Epoch",
ylbl="SNR (dB)",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_astra_unet_train_foam2.py
|
ct_astra_unet_train_foam2.py
|
import os
from time import time
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import flax as sflax
from scico import metric, plot
from scico.flax.examples import load_ct_data
"""
Prepare parallel processing. Set an arbitrary processor count (only
applies if GPU is not available).
"""
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8"
platform = jax.lib.xla_bridge.get_backend().platform
print("Platform: ", platform)
"""
Read data from cache or generate if not available.
"""
N = 256 # phantom size
train_nimg = 536 # number of training images
test_nimg = 64 # number of testing images
nimg = train_nimg + test_nimg
n_projection = 45 # CT views
trdt, ttdt = load_ct_data(train_nimg, test_nimg, N, n_projection, verbose=True)
"""
Build training and testing structures. Inputs are the filter
back-projected sinograms and outpus are the original generated foams.
Keep training and testing partitions.
"""
train_ds = {"image": trdt["fbp"], "label": trdt["img"]}
test_ds = {"image": ttdt["fbp"], "label": ttdt["img"]}
"""
Define configuration dictionary for model and training loop.
Parameters have been selected for demonstration purposes and relatively
short training. The model depth controls the levels of pooling in the
U-Net model. The block depth controls the number of layers at each level
of depth. The number of filters controls the number of filters at the
input and output levels and doubles (halves) at each pooling (unpooling)
operation. Better performance may be obtained by increasing depth, block
depth, number of filters or training epochs, but may require longer
training times.
"""
# model configuration
model_conf = {
"depth": 2,
"num_filters": 64,
"block_depth": 2,
}
# training configuration
train_conf: sflax.ConfigDict = {
"seed": 0,
"opt_type": "SGD",
"momentum": 0.9,
"batch_size": 16,
"num_epochs": 200,
"base_learning_rate": 1e-2,
"warmup_epochs": 0,
"log_every_steps": 1000,
"log": True,
}
"""
Construct UNet model.
"""
channels = train_ds["image"].shape[-1]
model = sflax.UNet(
depth=model_conf["depth"],
channels=channels,
num_filters=model_conf["num_filters"],
block_depth=model_conf["block_depth"],
)
"""
Run training loop.
"""
workdir = os.path.join(os.path.expanduser("~"), ".cache", "scico", "examples", "unet_ct_out")
train_conf["workdir"] = workdir
print(f"{'JAX process: '}{jax.process_index()}{' / '}{jax.process_count()}")
print(f"{'JAX local devices: '}{jax.local_devices()}")
# Construct training object
trainer = sflax.BasicFlaxTrainer(
train_conf,
model,
train_ds,
test_ds,
)
start_time = time()
modvar, stats_object = trainer.train()
time_train = time() - start_time
"""
Evaluate on testing data.
"""
start_time = time()
fmap = sflax.FlaxMap(model, modvar)
output = fmap(test_ds["image"])
time_eval = time() - start_time
output = jax.numpy.clip(output, a_min=0, a_max=1.0)
"""
Compare trained model in terms of reconstruction time and data fidelity.
"""
snr_eval = metric.snr(test_ds["label"], output)
psnr_eval = metric.psnr(test_ds["label"], output)
print(
f"{'UNet training':15s}{'epochs:':2s}{train_conf['num_epochs']:>5d}"
f"{'':21s}{'time[s]:':10s}{time_train:>7.2f}"
)
print(
f"{'UNet testing':15s}{'SNR:':5s}{snr_eval:>5.2f}{' dB'}{'':3s}"
f"{'PSNR:':6s}{psnr_eval:>5.2f}{' dB'}{'':3s}{'time[s]:':10s}{time_eval:>7.2f}"
)
"""
Plot comparison.
"""
key = jax.random.PRNGKey(123)
indx = jax.random.randint(key, shape=(1,), minval=0, maxval=test_nimg)[0]
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(test_ds["label"][indx, ..., 0], title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
test_ds["image"][indx, ..., 0],
title="FBP Reconstruction: \nSNR: %.2f (dB), MAE: %.3f"
% (
metric.snr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
metric.mae(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
output[indx, ..., 0],
title="UNet Reconstruction\nSNR: %.2f (dB), MAE: %.3f"
% (
metric.snr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
metric.mae(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
"""
Plot convergence statistics. Statistics only generated if a training
cycle was done (i.e. not reading final epoch results from checkpoint).
"""
if stats_object is not None:
hist = stats_object.history(transpose=True)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
jax.numpy.vstack((hist.Train_Loss, hist.Eval_Loss)).T,
x=hist.Epoch,
ptyp="semilogy",
title="Loss function",
xlbl="Epoch",
ylbl="Loss value",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[0],
)
plot.plot(
jax.numpy.vstack((hist.Train_SNR, hist.Eval_SNR)).T,
x=hist.Epoch,
title="Metric",
xlbl="Epoch",
ylbl="SNR (dB)",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.723505 | 0.523116 |
r"""
Image Deconvolution with TV Regularization (ADMM Solver)
========================================================
This example demonstrates the solution of an image deconvolution problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is a convolution operator, $\mathbf{y}$ is the blurred image,
$D$ is a 2D finite fifference operator, and $\mathbf{x}$ is the
deconvolved image.
In this example the problem is solved via standard ADMM, while proximal
ADMM is used in a [companion example](deconv_tv_padmm.rst).
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
C = linop.Convolve(h=psf, input_shape=x_gt.shape)
Cx = C(x_gt) # blurred image
noise, key = scico.random.randn(Cx.shape, seed=0)
y = Cx + σ * noise
r"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is the convolution operator and $D$ is a finite difference
operator. This problem can be expressed as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}} \; (1/2) \| \mathbf{y} -
C \mathbf{x} \|_2^2 + \lambda \| \mathbf{z} \|_{2,1} \;\;
\text{such that} \;\; \mathbf{z} = D \mathbf{x} \;,$$
which is easily written in the form of a standard ADMM problem.
This is simpler splitting than that used in the
[companion example](deconv_tv_padmm.rst), but it requires the use
conjugate gradient sub-iterations to solve the ADMM step associated with
the data fidelity term.
"""
f = loss.SquaredL2Loss(y=y, A=C)
# Penalty parameters must be accounted for in the gi functions, not as
# additional inputs.
λ = 2.1e-2 # L21 norm regularization parameter
g = λ * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
D = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
"""
Set up an ADMM solver object.
"""
ρ = 1.0e-1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[D],
rho_list=[ρ],
x0=C.adj(y),
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc]
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(
solver.x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, solver.x), fig=fig, ax=ax[2]
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_tv_admm.py
|
deconv_tv_admm.py
|
r"""
Image Deconvolution with TV Regularization (ADMM Solver)
========================================================
This example demonstrates the solution of an image deconvolution problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is a convolution operator, $\mathbf{y}$ is the blurred image,
$D$ is a 2D finite fifference operator, and $\mathbf{x}$ is the
deconvolved image.
In this example the problem is solved via standard ADMM, while proximal
ADMM is used in a [companion example](deconv_tv_padmm.rst).
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
C = linop.Convolve(h=psf, input_shape=x_gt.shape)
Cx = C(x_gt) # blurred image
noise, key = scico.random.randn(Cx.shape, seed=0)
y = Cx + σ * noise
r"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is the convolution operator and $D$ is a finite difference
operator. This problem can be expressed as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}} \; (1/2) \| \mathbf{y} -
C \mathbf{x} \|_2^2 + \lambda \| \mathbf{z} \|_{2,1} \;\;
\text{such that} \;\; \mathbf{z} = D \mathbf{x} \;,$$
which is easily written in the form of a standard ADMM problem.
This is simpler splitting than that used in the
[companion example](deconv_tv_padmm.rst), but it requires the use
conjugate gradient sub-iterations to solve the ADMM step associated with
the data fidelity term.
"""
f = loss.SquaredL2Loss(y=y, A=C)
# Penalty parameters must be accounted for in the gi functions, not as
# additional inputs.
λ = 2.1e-2 # L21 norm regularization parameter
g = λ * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
D = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
"""
Set up an ADMM solver object.
"""
ρ = 1.0e-1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[D],
rho_list=[ρ],
x0=C.adj(y),
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc]
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(
solver.x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, solver.x), fig=fig, ax=ax[2]
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.939789 | 0.955089 |
Usage Examples
==============
Organized by Application
------------------------
Computed Tomography
^^^^^^^^^^^^^^^^^^^
`ct_abel_tv_admm.py <ct_abel_tv_admm.py>`_
TV-Regularized Abel Inversion
`ct_abel_tv_admm_tune.py <ct_abel_tv_admm_tune.py>`_
Parameter Tuning for TV-Regularized Abel Inversion
`ct_astra_noreg_pcg.py <ct_astra_noreg_pcg.py>`_
CT Reconstruction with CG and PCG
`ct_astra_3d_tv_admm.py <ct_astra_3d_tv_admm.py>`_
3D TV-Regularized Sparse-View CT Reconstruction
`ct_astra_tv_admm.py <ct_astra_tv_admm.py>`_
TV-Regularized Sparse-View CT Reconstruction
`ct_astra_weighted_tv_admm.py <ct_astra_weighted_tv_admm.py>`_
TV-Regularized Low-Dose CT Reconstruction
`ct_svmbir_tv_multi.py <ct_svmbir_tv_multi.py>`_
TV-Regularized CT Reconstruction (Multiple Algorithms)
`ct_svmbir_ppp_bm3d_admm_cg.py <ct_svmbir_ppp_bm3d_admm_cg.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem Solver)
`ct_svmbir_ppp_bm3d_admm_prox.py <ct_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR Prox)
`ct_fan_svmbir_ppp_bm3d_admm_prox.py <ct_fan_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) Fan-Beam CT Reconstruction
`ct_astra_modl_train_foam2.py <ct_astra_modl_train_foam2.py>`_
CT Training and Reconstructions with MoDL
`ct_astra_odp_train_foam2.py <ct_astra_odp_train_foam2.py>`_
CT Training and Reconstructions with ODP
`ct_astra_unet_train_foam2.py <ct_astra_unet_train_foam2.py>`_
CT Training and Reconstructions with UNet
Deconvolution
^^^^^^^^^^^^^
`deconv_circ_tv_admm.py <deconv_circ_tv_admm.py>`_
Circulant Blur Image Deconvolution with TV Regularization
`deconv_tv_admm.py <deconv_tv_admm.py>`_
Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_tv_padmm.py <deconv_tv_padmm.py>`_
Image Deconvolution with TV Regularization (Proximal ADMM Solver)
`deconv_tv_admm_tune.py <deconv_tv_admm_tune.py>`_
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_microscopy_tv_admm.py <deconv_microscopy_tv_admm.py>`_
Deconvolution Microscopy (Single Channel)
`deconv_microscopy_allchn_tv_admm.py <deconv_microscopy_allchn_tv_admm.py>`_
Deconvolution Microscopy (All Channels)
`deconv_ppp_bm3d_admm.py <deconv_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Deconvolution (ADMM Solver)
`deconv_ppp_bm3d_pgm.py <deconv_ppp_bm3d_pgm.py>`_
PPP (with BM3D) Image Deconvolution (APGM Solver)
`deconv_ppp_dncnn_admm.py <deconv_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Deconvolution (ADMM Solver)
`deconv_ppp_dncnn_padmm.py <deconv_ppp_dncnn_padmm.py>`_
PPP (with DnCNN) Image Deconvolution (Proximal ADMM Solver)
`deconv_ppp_bm4d_admm.py <deconv_ppp_bm4d_admm.py>`_
PPP (with BM4D) Volume Deconvolution
`deconv_modl_train_foam1.py <deconv_modl_train_foam1.py>`_
Deconvolution Training and Reconstructions with MoDL
`deconv_odp_train_foam1.py <deconv_odp_train_foam1.py>`_
Deconvolution Training and Reconstructions with ODP
Sparse Coding
^^^^^^^^^^^^^
`sparsecode_admm.py <sparsecode_admm.py>`_
Non-Negative Basis Pursuit DeNoising (ADMM)
`sparsecode_conv_admm.py <sparsecode_conv_admm.py>`_
Convolutional Sparse Coding (ADMM)
`sparsecode_conv_md_admm.py <sparsecode_conv_md_admm.py>`_
Convolutional Sparse Coding with Mask Decoupling (ADMM)
`sparsecode_pgm.py <sparsecode_pgm.py>`_
Basis Pursuit DeNoising (APGM)
`sparsecode_poisson_pgm.py <sparsecode_poisson_pgm.py>`_
Non-negative Poisson Loss Reconstruction (APGM)
Miscellaneous
^^^^^^^^^^^^^
`demosaic_ppp_bm3d_admm.py <demosaic_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Demosaicing
`superres_ppp_dncnn_admm.py <superres_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Superresolution
`denoise_l1tv_admm.py <denoise_l1tv_admm.py>`_
ℓ1 Total Variation Denoising
`denoise_tv_admm.py <denoise_tv_admm.py>`_
Total Variation Denoising (ADMM)
`denoise_tv_pgm.py <denoise_tv_pgm.py>`_
Total Variation Denoising with Constraint (APGM)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
`denoise_cplx_tv_nlpadmm.py <denoise_cplx_tv_nlpadmm.py>`_
Complex Total Variation Denoising with NLPADMM Solver
`denoise_cplx_tv_pdhg.py <denoise_cplx_tv_pdhg.py>`_
Complex Total Variation Denoising with PDHG Solver
`denoise_dncnn_universal.py <denoise_dncnn_universal.py>`_
Comparison of DnCNN Variants for Image Denoising
`diffusercam_tv_admm.py <diffusercam_tv_admm.py>`_
TV-Regularized 3D DiffuserCam Reconstruction
`video_rpca_admm.py <video_rpca_admm.py>`_
Video Decomposition via Robust PCA
`ct_astra_datagen_foam2.py <ct_astra_datagen_foam2.py>`_
CT Data Generation for NN Training
`deconv_datagen_bsds.py <deconv_datagen_bsds.py>`_
Blurred Data Generation (Natural Images) for NN Training
`deconv_datagen_foam1.py <deconv_datagen_foam1.py>`_
Blurred Data Generation (Foams) for NN Training
`denoise_datagen_bsds.py <denoise_datagen_bsds.py>`_
Noisy Data Generation for NN Training
Organized by Regularization
---------------------------
Plug and Play Priors
^^^^^^^^^^^^^^^^^^^^
`ct_svmbir_ppp_bm3d_admm_cg.py <ct_svmbir_ppp_bm3d_admm_cg.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem Solver)
`ct_svmbir_ppp_bm3d_admm_prox.py <ct_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR Prox)
`ct_fan_svmbir_ppp_bm3d_admm_prox.py <ct_fan_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) Fan-Beam CT Reconstruction
`deconv_ppp_bm3d_admm.py <deconv_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Deconvolution (ADMM Solver)
`deconv_ppp_bm3d_pgm.py <deconv_ppp_bm3d_pgm.py>`_
PPP (with BM3D) Image Deconvolution (APGM Solver)
`deconv_ppp_dncnn_admm.py <deconv_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Deconvolution (ADMM Solver)
`deconv_ppp_dncnn_padmm.py <deconv_ppp_dncnn_padmm.py>`_
PPP (with DnCNN) Image Deconvolution (Proximal ADMM Solver)
`deconv_ppp_bm4d_admm.py <deconv_ppp_bm4d_admm.py>`_
PPP (with BM4D) Volume Deconvolution
`demosaic_ppp_bm3d_admm.py <demosaic_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Demosaicing
`superres_ppp_dncnn_admm.py <superres_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Superresolution
Total Variation
^^^^^^^^^^^^^^^
`ct_abel_tv_admm.py <ct_abel_tv_admm.py>`_
TV-Regularized Abel Inversion
`ct_abel_tv_admm_tune.py <ct_abel_tv_admm_tune.py>`_
Parameter Tuning for TV-Regularized Abel Inversion
`ct_astra_tv_admm.py <ct_astra_tv_admm.py>`_
TV-Regularized Sparse-View CT Reconstruction
`ct_astra_3d_tv_admm.py <ct_astra_3d_tv_admm.py>`_
3D TV-Regularized Sparse-View CT Reconstruction
`ct_astra_weighted_tv_admm.py <ct_astra_weighted_tv_admm.py>`_
TV-Regularized Low-Dose CT Reconstruction
`ct_svmbir_tv_multi.py <ct_svmbir_tv_multi.py>`_
TV-Regularized CT Reconstruction (Multiple Algorithms)
`deconv_circ_tv_admm.py <deconv_circ_tv_admm.py>`_
Circulant Blur Image Deconvolution with TV Regularization
`deconv_tv_admm.py <deconv_tv_admm.py>`_
Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_tv_admm_tune.py <deconv_tv_admm_tune.py>`_
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_tv_padmm.py <deconv_tv_padmm.py>`_
Image Deconvolution with TV Regularization (Proximal ADMM Solver)
`deconv_microscopy_tv_admm.py <deconv_microscopy_tv_admm.py>`_
Deconvolution Microscopy (Single Channel)
`deconv_microscopy_allchn_tv_admm.py <deconv_microscopy_allchn_tv_admm.py>`_
Deconvolution Microscopy (All Channels)
`denoise_l1tv_admm.py <denoise_l1tv_admm.py>`_
ℓ1 Total Variation Denoising
`denoise_tv_admm.py <denoise_tv_admm.py>`_
Total Variation Denoising (ADMM)
`denoise_tv_pgm.py <denoise_tv_pgm.py>`_
Total Variation Denoising with Constraint (APGM)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
`denoise_cplx_tv_nlpadmm.py <denoise_cplx_tv_nlpadmm.py>`_
Complex Total Variation Denoising with NLPADMM Solver
`denoise_cplx_tv_pdhg.py <denoise_cplx_tv_pdhg.py>`_
Complex Total Variation Denoising with PDHG Solver
`diffusercam_tv_admm.py <diffusercam_tv_admm.py>`_
TV-Regularized 3D DiffuserCam Reconstruction
Sparsity
^^^^^^^^
`diffusercam_tv_admm.py <diffusercam_tv_admm.py>`_
TV-Regularized 3D DiffuserCam Reconstruction
`sparsecode_admm.py <sparsecode_admm.py>`_
Non-Negative Basis Pursuit DeNoising (ADMM)
`sparsecode_conv_admm.py <sparsecode_conv_admm.py>`_
Convolutional Sparse Coding (ADMM)
`sparsecode_conv_md_admm.py <sparsecode_conv_md_admm.py>`_
Convolutional Sparse Coding with Mask Decoupling (ADMM)
`sparsecode_pgm.py <sparsecode_pgm.py>`_
Basis Pursuit DeNoising (APGM)
`sparsecode_poisson_pgm.py <sparsecode_poisson_pgm.py>`_
Non-negative Poisson Loss Reconstruction (APGM)
`video_rpca_admm.py <video_rpca_admm.py>`_
Video Decomposition via Robust PCA
Machine Learning
^^^^^^^^^^^^^^^^
`ct_astra_datagen_foam2.py <ct_astra_datagen_foam2.py>`_
CT Data Generation for NN Training
`ct_astra_modl_train_foam2.py <ct_astra_modl_train_foam2.py>`_
CT Training and Reconstructions with MoDL
`ct_astra_odp_train_foam2.py <ct_astra_odp_train_foam2.py>`_
CT Training and Reconstructions with ODP
`ct_astra_unet_train_foam2.py <ct_astra_unet_train_foam2.py>`_
CT Training and Reconstructions with UNet
`deconv_datagen_bsds.py <deconv_datagen_bsds.py>`_
Blurred Data Generation (Natural Images) for NN Training
`deconv_datagen_foam1.py <deconv_datagen_foam1.py>`_
Blurred Data Generation (Foams) for NN Training
`deconv_modl_train_foam1.py <deconv_modl_train_foam1.py>`_
Deconvolution Training and Reconstructions with MoDL
`deconv_odp_train_foam1.py <deconv_odp_train_foam1.py>`_
Deconvolution Training and Reconstructions with ODP
`denoise_datagen_bsds.py <denoise_datagen_bsds.py>`_
Noisy Data Generation for NN Training
`denoise_dncnn_train_bsds.py <denoise_dncnn_train_bsds.py>`_
Training of DnCNN for Denoising
`denoise_dncnn_universal.py <denoise_dncnn_universal.py>`_
Comparison of DnCNN Variants for Image Denoising
Organized by Optimization Algorithm
-----------------------------------
ADMM
^^^^
`ct_abel_tv_admm.py <ct_abel_tv_admm.py>`_
TV-Regularized Abel Inversion
`ct_abel_tv_admm_tune.py <ct_abel_tv_admm_tune.py>`_
Parameter Tuning for TV-Regularized Abel Inversion
`ct_astra_tv_admm.py <ct_astra_tv_admm.py>`_
TV-Regularized Sparse-View CT Reconstruction
`ct_astra_3d_tv_admm.py <ct_astra_3d_tv_admm.py>`_
3D TV-Regularized Sparse-View CT Reconstruction
`ct_astra_weighted_tv_admm.py <ct_astra_weighted_tv_admm.py>`_
TV-Regularized Low-Dose CT Reconstruction
`ct_svmbir_tv_multi.py <ct_svmbir_tv_multi.py>`_
TV-Regularized CT Reconstruction (Multiple Algorithms)
`ct_svmbir_ppp_bm3d_admm_cg.py <ct_svmbir_ppp_bm3d_admm_cg.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem Solver)
`ct_svmbir_ppp_bm3d_admm_prox.py <ct_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR Prox)
`ct_fan_svmbir_ppp_bm3d_admm_prox.py <ct_fan_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) Fan-Beam CT Reconstruction
`deconv_circ_tv_admm.py <deconv_circ_tv_admm.py>`_
Circulant Blur Image Deconvolution with TV Regularization
`deconv_tv_admm.py <deconv_tv_admm.py>`_
Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_tv_admm_tune.py <deconv_tv_admm_tune.py>`_
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_microscopy_tv_admm.py <deconv_microscopy_tv_admm.py>`_
Deconvolution Microscopy (Single Channel)
`deconv_microscopy_allchn_tv_admm.py <deconv_microscopy_allchn_tv_admm.py>`_
Deconvolution Microscopy (All Channels)
`deconv_ppp_bm3d_admm.py <deconv_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Deconvolution (ADMM Solver)
`deconv_ppp_dncnn_admm.py <deconv_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Deconvolution (ADMM Solver)
`deconv_ppp_bm4d_admm.py <deconv_ppp_bm4d_admm.py>`_
PPP (with BM4D) Volume Deconvolution
`diffusercam_tv_admm.py <diffusercam_tv_admm.py>`_
TV-Regularized 3D DiffuserCam Reconstruction
`sparsecode_admm.py <sparsecode_admm.py>`_
Non-Negative Basis Pursuit DeNoising (ADMM)
`sparsecode_conv_admm.py <sparsecode_conv_admm.py>`_
Convolutional Sparse Coding (ADMM)
`sparsecode_conv_md_admm.py <sparsecode_conv_md_admm.py>`_
Convolutional Sparse Coding with Mask Decoupling (ADMM)
`demosaic_ppp_bm3d_admm.py <demosaic_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Demosaicing
`superres_ppp_dncnn_admm.py <superres_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Superresolution
`denoise_l1tv_admm.py <denoise_l1tv_admm.py>`_
ℓ1 Total Variation Denoising
`denoise_tv_admm.py <denoise_tv_admm.py>`_
Total Variation Denoising (ADMM)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
`video_rpca_admm.py <video_rpca_admm.py>`_
Video Decomposition via Robust PCA
Linearized ADMM
^^^^^^^^^^^^^^^
`ct_svmbir_tv_multi.py <ct_svmbir_tv_multi.py>`_
TV-Regularized CT Reconstruction (Multiple Algorithms)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
Proximal ADMM
^^^^^^^^^^^^^
`deconv_tv_padmm.py <deconv_tv_padmm.py>`_
Image Deconvolution with TV Regularization (Proximal ADMM Solver)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
`denoise_cplx_tv_nlpadmm.py <denoise_cplx_tv_nlpadmm.py>`_
Complex Total Variation Denoising with NLPADMM Solver
`deconv_ppp_dncnn_padmm.py <deconv_ppp_dncnn_padmm.py>`_
PPP (with DnCNN) Image Deconvolution (Proximal ADMM Solver)
Non-linear Proximal ADMM
^^^^^^^^^^^^^^^^^^^^^^^^
`denoise_cplx_tv_nlpadmm.py <denoise_cplx_tv_nlpadmm.py>`_
Complex Total Variation Denoising with NLPADMM Solver
PDHG
^^^^
`ct_svmbir_tv_multi.py <ct_svmbir_tv_multi.py>`_
TV-Regularized CT Reconstruction (Multiple Algorithms)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
`denoise_cplx_tv_pdhg.py <denoise_cplx_tv_pdhg.py>`_
Complex Total Variation Denoising with PDHG Solver
PGM
^^^
`deconv_ppp_bm3d_pgm.py <deconv_ppp_bm3d_pgm.py>`_
PPP (with BM3D) Image Deconvolution (APGM Solver)
`sparsecode_pgm.py <sparsecode_pgm.py>`_
Basis Pursuit DeNoising (APGM)
`sparsecode_poisson_pgm.py <sparsecode_poisson_pgm.py>`_
Non-negative Poisson Loss Reconstruction (APGM)
`denoise_tv_pgm.py <denoise_tv_pgm.py>`_
Total Variation Denoising with Constraint (APGM)
PCG
^^^
`ct_astra_noreg_pcg.py <ct_astra_noreg_pcg.py>`_
CT Reconstruction with CG and PCG
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/README.rst
|
README.rst
|
Usage Examples
==============
Organized by Application
------------------------
Computed Tomography
^^^^^^^^^^^^^^^^^^^
`ct_abel_tv_admm.py <ct_abel_tv_admm.py>`_
TV-Regularized Abel Inversion
`ct_abel_tv_admm_tune.py <ct_abel_tv_admm_tune.py>`_
Parameter Tuning for TV-Regularized Abel Inversion
`ct_astra_noreg_pcg.py <ct_astra_noreg_pcg.py>`_
CT Reconstruction with CG and PCG
`ct_astra_3d_tv_admm.py <ct_astra_3d_tv_admm.py>`_
3D TV-Regularized Sparse-View CT Reconstruction
`ct_astra_tv_admm.py <ct_astra_tv_admm.py>`_
TV-Regularized Sparse-View CT Reconstruction
`ct_astra_weighted_tv_admm.py <ct_astra_weighted_tv_admm.py>`_
TV-Regularized Low-Dose CT Reconstruction
`ct_svmbir_tv_multi.py <ct_svmbir_tv_multi.py>`_
TV-Regularized CT Reconstruction (Multiple Algorithms)
`ct_svmbir_ppp_bm3d_admm_cg.py <ct_svmbir_ppp_bm3d_admm_cg.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem Solver)
`ct_svmbir_ppp_bm3d_admm_prox.py <ct_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR Prox)
`ct_fan_svmbir_ppp_bm3d_admm_prox.py <ct_fan_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) Fan-Beam CT Reconstruction
`ct_astra_modl_train_foam2.py <ct_astra_modl_train_foam2.py>`_
CT Training and Reconstructions with MoDL
`ct_astra_odp_train_foam2.py <ct_astra_odp_train_foam2.py>`_
CT Training and Reconstructions with ODP
`ct_astra_unet_train_foam2.py <ct_astra_unet_train_foam2.py>`_
CT Training and Reconstructions with UNet
Deconvolution
^^^^^^^^^^^^^
`deconv_circ_tv_admm.py <deconv_circ_tv_admm.py>`_
Circulant Blur Image Deconvolution with TV Regularization
`deconv_tv_admm.py <deconv_tv_admm.py>`_
Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_tv_padmm.py <deconv_tv_padmm.py>`_
Image Deconvolution with TV Regularization (Proximal ADMM Solver)
`deconv_tv_admm_tune.py <deconv_tv_admm_tune.py>`_
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_microscopy_tv_admm.py <deconv_microscopy_tv_admm.py>`_
Deconvolution Microscopy (Single Channel)
`deconv_microscopy_allchn_tv_admm.py <deconv_microscopy_allchn_tv_admm.py>`_
Deconvolution Microscopy (All Channels)
`deconv_ppp_bm3d_admm.py <deconv_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Deconvolution (ADMM Solver)
`deconv_ppp_bm3d_pgm.py <deconv_ppp_bm3d_pgm.py>`_
PPP (with BM3D) Image Deconvolution (APGM Solver)
`deconv_ppp_dncnn_admm.py <deconv_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Deconvolution (ADMM Solver)
`deconv_ppp_dncnn_padmm.py <deconv_ppp_dncnn_padmm.py>`_
PPP (with DnCNN) Image Deconvolution (Proximal ADMM Solver)
`deconv_ppp_bm4d_admm.py <deconv_ppp_bm4d_admm.py>`_
PPP (with BM4D) Volume Deconvolution
`deconv_modl_train_foam1.py <deconv_modl_train_foam1.py>`_
Deconvolution Training and Reconstructions with MoDL
`deconv_odp_train_foam1.py <deconv_odp_train_foam1.py>`_
Deconvolution Training and Reconstructions with ODP
Sparse Coding
^^^^^^^^^^^^^
`sparsecode_admm.py <sparsecode_admm.py>`_
Non-Negative Basis Pursuit DeNoising (ADMM)
`sparsecode_conv_admm.py <sparsecode_conv_admm.py>`_
Convolutional Sparse Coding (ADMM)
`sparsecode_conv_md_admm.py <sparsecode_conv_md_admm.py>`_
Convolutional Sparse Coding with Mask Decoupling (ADMM)
`sparsecode_pgm.py <sparsecode_pgm.py>`_
Basis Pursuit DeNoising (APGM)
`sparsecode_poisson_pgm.py <sparsecode_poisson_pgm.py>`_
Non-negative Poisson Loss Reconstruction (APGM)
Miscellaneous
^^^^^^^^^^^^^
`demosaic_ppp_bm3d_admm.py <demosaic_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Demosaicing
`superres_ppp_dncnn_admm.py <superres_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Superresolution
`denoise_l1tv_admm.py <denoise_l1tv_admm.py>`_
ℓ1 Total Variation Denoising
`denoise_tv_admm.py <denoise_tv_admm.py>`_
Total Variation Denoising (ADMM)
`denoise_tv_pgm.py <denoise_tv_pgm.py>`_
Total Variation Denoising with Constraint (APGM)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
`denoise_cplx_tv_nlpadmm.py <denoise_cplx_tv_nlpadmm.py>`_
Complex Total Variation Denoising with NLPADMM Solver
`denoise_cplx_tv_pdhg.py <denoise_cplx_tv_pdhg.py>`_
Complex Total Variation Denoising with PDHG Solver
`denoise_dncnn_universal.py <denoise_dncnn_universal.py>`_
Comparison of DnCNN Variants for Image Denoising
`diffusercam_tv_admm.py <diffusercam_tv_admm.py>`_
TV-Regularized 3D DiffuserCam Reconstruction
`video_rpca_admm.py <video_rpca_admm.py>`_
Video Decomposition via Robust PCA
`ct_astra_datagen_foam2.py <ct_astra_datagen_foam2.py>`_
CT Data Generation for NN Training
`deconv_datagen_bsds.py <deconv_datagen_bsds.py>`_
Blurred Data Generation (Natural Images) for NN Training
`deconv_datagen_foam1.py <deconv_datagen_foam1.py>`_
Blurred Data Generation (Foams) for NN Training
`denoise_datagen_bsds.py <denoise_datagen_bsds.py>`_
Noisy Data Generation for NN Training
Organized by Regularization
---------------------------
Plug and Play Priors
^^^^^^^^^^^^^^^^^^^^
`ct_svmbir_ppp_bm3d_admm_cg.py <ct_svmbir_ppp_bm3d_admm_cg.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem Solver)
`ct_svmbir_ppp_bm3d_admm_prox.py <ct_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR Prox)
`ct_fan_svmbir_ppp_bm3d_admm_prox.py <ct_fan_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) Fan-Beam CT Reconstruction
`deconv_ppp_bm3d_admm.py <deconv_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Deconvolution (ADMM Solver)
`deconv_ppp_bm3d_pgm.py <deconv_ppp_bm3d_pgm.py>`_
PPP (with BM3D) Image Deconvolution (APGM Solver)
`deconv_ppp_dncnn_admm.py <deconv_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Deconvolution (ADMM Solver)
`deconv_ppp_dncnn_padmm.py <deconv_ppp_dncnn_padmm.py>`_
PPP (with DnCNN) Image Deconvolution (Proximal ADMM Solver)
`deconv_ppp_bm4d_admm.py <deconv_ppp_bm4d_admm.py>`_
PPP (with BM4D) Volume Deconvolution
`demosaic_ppp_bm3d_admm.py <demosaic_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Demosaicing
`superres_ppp_dncnn_admm.py <superres_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Superresolution
Total Variation
^^^^^^^^^^^^^^^
`ct_abel_tv_admm.py <ct_abel_tv_admm.py>`_
TV-Regularized Abel Inversion
`ct_abel_tv_admm_tune.py <ct_abel_tv_admm_tune.py>`_
Parameter Tuning for TV-Regularized Abel Inversion
`ct_astra_tv_admm.py <ct_astra_tv_admm.py>`_
TV-Regularized Sparse-View CT Reconstruction
`ct_astra_3d_tv_admm.py <ct_astra_3d_tv_admm.py>`_
3D TV-Regularized Sparse-View CT Reconstruction
`ct_astra_weighted_tv_admm.py <ct_astra_weighted_tv_admm.py>`_
TV-Regularized Low-Dose CT Reconstruction
`ct_svmbir_tv_multi.py <ct_svmbir_tv_multi.py>`_
TV-Regularized CT Reconstruction (Multiple Algorithms)
`deconv_circ_tv_admm.py <deconv_circ_tv_admm.py>`_
Circulant Blur Image Deconvolution with TV Regularization
`deconv_tv_admm.py <deconv_tv_admm.py>`_
Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_tv_admm_tune.py <deconv_tv_admm_tune.py>`_
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_tv_padmm.py <deconv_tv_padmm.py>`_
Image Deconvolution with TV Regularization (Proximal ADMM Solver)
`deconv_microscopy_tv_admm.py <deconv_microscopy_tv_admm.py>`_
Deconvolution Microscopy (Single Channel)
`deconv_microscopy_allchn_tv_admm.py <deconv_microscopy_allchn_tv_admm.py>`_
Deconvolution Microscopy (All Channels)
`denoise_l1tv_admm.py <denoise_l1tv_admm.py>`_
ℓ1 Total Variation Denoising
`denoise_tv_admm.py <denoise_tv_admm.py>`_
Total Variation Denoising (ADMM)
`denoise_tv_pgm.py <denoise_tv_pgm.py>`_
Total Variation Denoising with Constraint (APGM)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
`denoise_cplx_tv_nlpadmm.py <denoise_cplx_tv_nlpadmm.py>`_
Complex Total Variation Denoising with NLPADMM Solver
`denoise_cplx_tv_pdhg.py <denoise_cplx_tv_pdhg.py>`_
Complex Total Variation Denoising with PDHG Solver
`diffusercam_tv_admm.py <diffusercam_tv_admm.py>`_
TV-Regularized 3D DiffuserCam Reconstruction
Sparsity
^^^^^^^^
`diffusercam_tv_admm.py <diffusercam_tv_admm.py>`_
TV-Regularized 3D DiffuserCam Reconstruction
`sparsecode_admm.py <sparsecode_admm.py>`_
Non-Negative Basis Pursuit DeNoising (ADMM)
`sparsecode_conv_admm.py <sparsecode_conv_admm.py>`_
Convolutional Sparse Coding (ADMM)
`sparsecode_conv_md_admm.py <sparsecode_conv_md_admm.py>`_
Convolutional Sparse Coding with Mask Decoupling (ADMM)
`sparsecode_pgm.py <sparsecode_pgm.py>`_
Basis Pursuit DeNoising (APGM)
`sparsecode_poisson_pgm.py <sparsecode_poisson_pgm.py>`_
Non-negative Poisson Loss Reconstruction (APGM)
`video_rpca_admm.py <video_rpca_admm.py>`_
Video Decomposition via Robust PCA
Machine Learning
^^^^^^^^^^^^^^^^
`ct_astra_datagen_foam2.py <ct_astra_datagen_foam2.py>`_
CT Data Generation for NN Training
`ct_astra_modl_train_foam2.py <ct_astra_modl_train_foam2.py>`_
CT Training and Reconstructions with MoDL
`ct_astra_odp_train_foam2.py <ct_astra_odp_train_foam2.py>`_
CT Training and Reconstructions with ODP
`ct_astra_unet_train_foam2.py <ct_astra_unet_train_foam2.py>`_
CT Training and Reconstructions with UNet
`deconv_datagen_bsds.py <deconv_datagen_bsds.py>`_
Blurred Data Generation (Natural Images) for NN Training
`deconv_datagen_foam1.py <deconv_datagen_foam1.py>`_
Blurred Data Generation (Foams) for NN Training
`deconv_modl_train_foam1.py <deconv_modl_train_foam1.py>`_
Deconvolution Training and Reconstructions with MoDL
`deconv_odp_train_foam1.py <deconv_odp_train_foam1.py>`_
Deconvolution Training and Reconstructions with ODP
`denoise_datagen_bsds.py <denoise_datagen_bsds.py>`_
Noisy Data Generation for NN Training
`denoise_dncnn_train_bsds.py <denoise_dncnn_train_bsds.py>`_
Training of DnCNN for Denoising
`denoise_dncnn_universal.py <denoise_dncnn_universal.py>`_
Comparison of DnCNN Variants for Image Denoising
Organized by Optimization Algorithm
-----------------------------------
ADMM
^^^^
`ct_abel_tv_admm.py <ct_abel_tv_admm.py>`_
TV-Regularized Abel Inversion
`ct_abel_tv_admm_tune.py <ct_abel_tv_admm_tune.py>`_
Parameter Tuning for TV-Regularized Abel Inversion
`ct_astra_tv_admm.py <ct_astra_tv_admm.py>`_
TV-Regularized Sparse-View CT Reconstruction
`ct_astra_3d_tv_admm.py <ct_astra_3d_tv_admm.py>`_
3D TV-Regularized Sparse-View CT Reconstruction
`ct_astra_weighted_tv_admm.py <ct_astra_weighted_tv_admm.py>`_
TV-Regularized Low-Dose CT Reconstruction
`ct_svmbir_tv_multi.py <ct_svmbir_tv_multi.py>`_
TV-Regularized CT Reconstruction (Multiple Algorithms)
`ct_svmbir_ppp_bm3d_admm_cg.py <ct_svmbir_ppp_bm3d_admm_cg.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem Solver)
`ct_svmbir_ppp_bm3d_admm_prox.py <ct_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR Prox)
`ct_fan_svmbir_ppp_bm3d_admm_prox.py <ct_fan_svmbir_ppp_bm3d_admm_prox.py>`_
PPP (with BM3D) Fan-Beam CT Reconstruction
`deconv_circ_tv_admm.py <deconv_circ_tv_admm.py>`_
Circulant Blur Image Deconvolution with TV Regularization
`deconv_tv_admm.py <deconv_tv_admm.py>`_
Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_tv_admm_tune.py <deconv_tv_admm_tune.py>`_
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
`deconv_microscopy_tv_admm.py <deconv_microscopy_tv_admm.py>`_
Deconvolution Microscopy (Single Channel)
`deconv_microscopy_allchn_tv_admm.py <deconv_microscopy_allchn_tv_admm.py>`_
Deconvolution Microscopy (All Channels)
`deconv_ppp_bm3d_admm.py <deconv_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Deconvolution (ADMM Solver)
`deconv_ppp_dncnn_admm.py <deconv_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Deconvolution (ADMM Solver)
`deconv_ppp_bm4d_admm.py <deconv_ppp_bm4d_admm.py>`_
PPP (with BM4D) Volume Deconvolution
`diffusercam_tv_admm.py <diffusercam_tv_admm.py>`_
TV-Regularized 3D DiffuserCam Reconstruction
`sparsecode_admm.py <sparsecode_admm.py>`_
Non-Negative Basis Pursuit DeNoising (ADMM)
`sparsecode_conv_admm.py <sparsecode_conv_admm.py>`_
Convolutional Sparse Coding (ADMM)
`sparsecode_conv_md_admm.py <sparsecode_conv_md_admm.py>`_
Convolutional Sparse Coding with Mask Decoupling (ADMM)
`demosaic_ppp_bm3d_admm.py <demosaic_ppp_bm3d_admm.py>`_
PPP (with BM3D) Image Demosaicing
`superres_ppp_dncnn_admm.py <superres_ppp_dncnn_admm.py>`_
PPP (with DnCNN) Image Superresolution
`denoise_l1tv_admm.py <denoise_l1tv_admm.py>`_
ℓ1 Total Variation Denoising
`denoise_tv_admm.py <denoise_tv_admm.py>`_
Total Variation Denoising (ADMM)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
`video_rpca_admm.py <video_rpca_admm.py>`_
Video Decomposition via Robust PCA
Linearized ADMM
^^^^^^^^^^^^^^^
`ct_svmbir_tv_multi.py <ct_svmbir_tv_multi.py>`_
TV-Regularized CT Reconstruction (Multiple Algorithms)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
Proximal ADMM
^^^^^^^^^^^^^
`deconv_tv_padmm.py <deconv_tv_padmm.py>`_
Image Deconvolution with TV Regularization (Proximal ADMM Solver)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
`denoise_cplx_tv_nlpadmm.py <denoise_cplx_tv_nlpadmm.py>`_
Complex Total Variation Denoising with NLPADMM Solver
`deconv_ppp_dncnn_padmm.py <deconv_ppp_dncnn_padmm.py>`_
PPP (with DnCNN) Image Deconvolution (Proximal ADMM Solver)
Non-linear Proximal ADMM
^^^^^^^^^^^^^^^^^^^^^^^^
`denoise_cplx_tv_nlpadmm.py <denoise_cplx_tv_nlpadmm.py>`_
Complex Total Variation Denoising with NLPADMM Solver
PDHG
^^^^
`ct_svmbir_tv_multi.py <ct_svmbir_tv_multi.py>`_
TV-Regularized CT Reconstruction (Multiple Algorithms)
`denoise_tv_multi.py <denoise_tv_multi.py>`_
Comparison of Optimization Algorithms for Total Variation Denoising
`denoise_cplx_tv_pdhg.py <denoise_cplx_tv_pdhg.py>`_
Complex Total Variation Denoising with PDHG Solver
PGM
^^^
`deconv_ppp_bm3d_pgm.py <deconv_ppp_bm3d_pgm.py>`_
PPP (with BM3D) Image Deconvolution (APGM Solver)
`sparsecode_pgm.py <sparsecode_pgm.py>`_
Basis Pursuit DeNoising (APGM)
`sparsecode_poisson_pgm.py <sparsecode_poisson_pgm.py>`_
Non-negative Poisson Loss Reconstruction (APGM)
`denoise_tv_pgm.py <denoise_tv_pgm.py>`_
Total Variation Denoising with Constraint (APGM)
PCG
^^^
`ct_astra_noreg_pcg.py <ct_astra_noreg_pcg.py>`_
CT Reconstruction with CG and PCG
| 0.843831 | 0.413418 |
import numpy as np
import jax
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 512 # image size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + R(\mathbf{x}) \;$$
where $R(\cdot)$ is a pseudo-functional having the DnCNN denoiser as its
proximal operator. The problem is solved via ADMM, using the standard
variable splitting for problems of this form, which requires the use of
conjugate gradient sub-iterations in the ADMM step that involves the data
fidelity term.
"""
f = loss.SquaredL2Loss(y=y, A=A)
g = functional.DnCNN("17M")
C = linop.Identity(x_gt.shape)
"""
Set up ADMM solver.
"""
ρ = 0.2 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 30}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = snp.clip(y[nc:-nc, nc:-nc], 0, 1)
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_ppp_dncnn_admm.py
|
deconv_ppp_dncnn_admm.py
|
import numpy as np
import jax
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 512 # image size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + R(\mathbf{x}) \;$$
where $R(\cdot)$ is a pseudo-functional having the DnCNN denoiser as its
proximal operator. The problem is solved via ADMM, using the standard
variable splitting for problems of this form, which requires the use of
conjugate gradient sub-iterations in the ADMM step that involves the data
fidelity term.
"""
f = loss.SquaredL2Loss(y=y, A=A)
g = functional.DnCNN("17M")
C = linop.Identity(x_gt.shape)
"""
Set up ADMM solver.
"""
ρ = 0.2 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 30}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = snp.clip(y[nc:-nc, nc:-nc], 0, 1)
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
| 0.834204 | 0.660487 |
r"""
ℓ1 Total Variation Denoising
============================
This example demonstrates impulse noise removal via ℓ1 total variation
:cite:`alliney-1992-digital` :cite:`esser-2010-primal` (Sec. 2.4.4)
(i.e. total variation regularization with an ℓ1 data fidelity term),
minimizing the functional
$$\mathrm{argmin}_{\mathbf{x}} \; \| \mathbf{y} - \mathbf{x}
\|_1 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $\mathbf{y}$ is the noisy image, $C$ is a 2D finite difference
operator, and $\mathbf{x}$ is the denoised image.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import spnoise
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
from scipy.ndimage import median_filter
"""
Create a ground truth image and impose salt & pepper noise to create a
noisy test image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = 0.5 * x_gt / x_gt.max()
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
y = spnoise(x_gt, 0.5)
"""
Denoise with median filtering.
"""
x_med = median_filter(y, size=(5, 5))
"""
Denoise with ℓ1 total variation.
"""
λ = 1.5e0
g_loss = loss.Loss(y=y, f=functional.L1Norm())
g_tv = λ * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
solver = ADMM(
f=None,
g_list=[g_loss, g_tv],
C_list=[linop.Identity(input_shape=y.shape), C],
rho_list=[5e0, 5e0],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_tv = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot results.
"""
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.0))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(13, 12))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy image", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(
x_med,
title=f"Median filtering: {metric.psnr(x_gt, x_med):.2f} (dB)",
fig=fig,
ax=ax[1, 0],
**plt_args,
)
plot.imview(
x_tv,
title=f"ℓ1-TV denoising: {metric.psnr(x_gt, x_tv):.2f} (dB)",
fig=fig,
ax=ax[1, 1],
**plt_args,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_l1tv_admm.py
|
denoise_l1tv_admm.py
|
r"""
ℓ1 Total Variation Denoising
============================
This example demonstrates impulse noise removal via ℓ1 total variation
:cite:`alliney-1992-digital` :cite:`esser-2010-primal` (Sec. 2.4.4)
(i.e. total variation regularization with an ℓ1 data fidelity term),
minimizing the functional
$$\mathrm{argmin}_{\mathbf{x}} \; \| \mathbf{y} - \mathbf{x}
\|_1 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $\mathbf{y}$ is the noisy image, $C$ is a 2D finite difference
operator, and $\mathbf{x}$ is the denoised image.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import spnoise
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
from scipy.ndimage import median_filter
"""
Create a ground truth image and impose salt & pepper noise to create a
noisy test image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = 0.5 * x_gt / x_gt.max()
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
y = spnoise(x_gt, 0.5)
"""
Denoise with median filtering.
"""
x_med = median_filter(y, size=(5, 5))
"""
Denoise with ℓ1 total variation.
"""
λ = 1.5e0
g_loss = loss.Loss(y=y, f=functional.L1Norm())
g_tv = λ * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
solver = ADMM(
f=None,
g_list=[g_loss, g_tv],
C_list=[linop.Identity(input_shape=y.shape), C],
rho_list=[5e0, 5e0],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_tv = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot results.
"""
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.0))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(13, 12))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy image", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(
x_med,
title=f"Median filtering: {metric.psnr(x_gt, x_med):.2f} (dB)",
fig=fig,
ax=ax[1, 0],
**plt_args,
)
plot.imview(
x_tv,
title=f"ℓ1-TV denoising: {metric.psnr(x_gt, x_tv):.2f} (dB)",
fig=fig,
ax=ax[1, 1],
**plt_args,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.915067 | 0.931618 |
r"""
Non-Negative Basis Pursuit DeNoising (ADMM)
===========================================
This example demonstrates the solution of a non-negative sparse coding
problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - D \mathbf{x} \|_2^2
+ \lambda \| \mathbf{x} \|_1 + I(\mathbf{x} \geq 0) \;,$$
where $D$ the dictionary, $\mathbf{y}$ the signal to be represented,
$\mathbf{x}$ is the sparse representation, and $I(\mathbf{x} \geq 0)$
is the non-negative indicator.
"""
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.admm import ADMM, MatrixSubproblemSolver
from scico.util import device_info
"""
Create random dictionary, reference random sparse representation, and
test signal consisting of the synthesis of the reference sparse
representation.
"""
m = 32 # signal size
n = 128 # dictionary size
s = 10 # sparsity level
np.random.seed(1)
D = np.random.randn(m, n)
D = D / np.linalg.norm(D, axis=0, keepdims=True) # normalize dictionary
xt = np.zeros(n) # true signal
idx = np.random.randint(low=0, high=n, size=s) # support of xt
xt[idx] = np.random.rand(s)
y = D @ xt + 5e-2 * np.random.randn(m) # synthetic signal
xt = jax.device_put(xt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
"""
Set up the forward operator and ADMM solver object.
"""
lmbda = 1e-1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g_list = [lmbda * functional.L1Norm(), functional.NonNegativeIndicator()]
C_list = [linop.Identity((n)), linop.Identity((n))]
rho_list = [1.0, 1.0]
maxiter = 100 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=g_list,
C_list=C_list,
rho_list=rho_list,
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=MatrixSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
"""
Plot the recovered coefficients and signal.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((xt, solver.x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((D @ xt, y, D @ solver.x)).T,
title="Signal",
lgnd=("Ground Truth", "Noisy", "Recovered"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/sparsecode_admm.py
|
sparsecode_admm.py
|
r"""
Non-Negative Basis Pursuit DeNoising (ADMM)
===========================================
This example demonstrates the solution of a non-negative sparse coding
problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - D \mathbf{x} \|_2^2
+ \lambda \| \mathbf{x} \|_1 + I(\mathbf{x} \geq 0) \;,$$
where $D$ the dictionary, $\mathbf{y}$ the signal to be represented,
$\mathbf{x}$ is the sparse representation, and $I(\mathbf{x} \geq 0)$
is the non-negative indicator.
"""
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.admm import ADMM, MatrixSubproblemSolver
from scico.util import device_info
"""
Create random dictionary, reference random sparse representation, and
test signal consisting of the synthesis of the reference sparse
representation.
"""
m = 32 # signal size
n = 128 # dictionary size
s = 10 # sparsity level
np.random.seed(1)
D = np.random.randn(m, n)
D = D / np.linalg.norm(D, axis=0, keepdims=True) # normalize dictionary
xt = np.zeros(n) # true signal
idx = np.random.randint(low=0, high=n, size=s) # support of xt
xt[idx] = np.random.rand(s)
y = D @ xt + 5e-2 * np.random.randn(m) # synthetic signal
xt = jax.device_put(xt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
"""
Set up the forward operator and ADMM solver object.
"""
lmbda = 1e-1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g_list = [lmbda * functional.L1Norm(), functional.NonNegativeIndicator()]
C_list = [linop.Identity((n)), linop.Identity((n))]
rho_list = [1.0, 1.0]
maxiter = 100 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=g_list,
C_list=C_list,
rho_list=rho_list,
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=MatrixSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
"""
Plot the recovered coefficients and signal.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((xt, solver.x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((D @ xt, y, D @ solver.x)).T,
title="Signal",
lgnd=("Ground Truth", "Noisy", "Recovered"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.898908 | 0.890913 |
r"""
Basis Pursuit DeNoising (APGM)
==============================
This example demonstrates the solution of the the sparse coding problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - D \mathbf{x}
\|_2^2 + \lambda \| \mathbf{x} \|_1\;,$$
where $D$ the dictionary, $\mathbf{y}$ the signal to be represented,
and $\mathbf{x}$ is the sparse representation.
"""
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.pgm import AcceleratedPGM
from scico.util import device_info
"""
Construct a random dictionary, a reference random sparse
representation, and a test signal consisting of the synthesis of the
reference sparse representation.
"""
m = 512 # Signal size
n = 4 * m # Dictionary size
s = 32 # Sparsity level (number of non-zeros)
σ = 0.5 # Noise level
np.random.seed(12345)
D = np.random.randn(m, n)
L0 = np.linalg.norm(D, 2) ** 2
x_gt = np.zeros(n) # true signal
idx = np.random.permutation(list(range(0, n - 1)))
x_gt[idx[0:s]] = np.random.randn(s)
y = D @ x_gt + σ * np.random.randn(m) # synthetic signal
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
"""
Set up the forward operator and AcceleratedPGM solver object.
"""
maxiter = 100
λ = 2.98e1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L1Norm()
solver = AcceleratedPGM(
f=f, g=g, L0=L0, x0=A.adj(y), maxiter=maxiter, itstat_options={"display": True, "period": 10}
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot the recovered coefficients and convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((x_gt, x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Objective, hist.Residual)).T,
ptyp="semilogy",
title="Convergence",
xlbl="Iteration",
lgnd=("Objective", "Residual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/sparsecode_pgm.py
|
sparsecode_pgm.py
|
r"""
Basis Pursuit DeNoising (APGM)
==============================
This example demonstrates the solution of the the sparse coding problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - D \mathbf{x}
\|_2^2 + \lambda \| \mathbf{x} \|_1\;,$$
where $D$ the dictionary, $\mathbf{y}$ the signal to be represented,
and $\mathbf{x}$ is the sparse representation.
"""
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.pgm import AcceleratedPGM
from scico.util import device_info
"""
Construct a random dictionary, a reference random sparse
representation, and a test signal consisting of the synthesis of the
reference sparse representation.
"""
m = 512 # Signal size
n = 4 * m # Dictionary size
s = 32 # Sparsity level (number of non-zeros)
σ = 0.5 # Noise level
np.random.seed(12345)
D = np.random.randn(m, n)
L0 = np.linalg.norm(D, 2) ** 2
x_gt = np.zeros(n) # true signal
idx = np.random.permutation(list(range(0, n - 1)))
x_gt[idx[0:s]] = np.random.randn(s)
y = D @ x_gt + σ * np.random.randn(m) # synthetic signal
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
"""
Set up the forward operator and AcceleratedPGM solver object.
"""
maxiter = 100
λ = 2.98e1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L1Norm()
solver = AcceleratedPGM(
f=f, g=g, L0=L0, x0=A.adj(y), maxiter=maxiter, itstat_options={"display": True, "period": 10}
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot the recovered coefficients and convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((x_gt, x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Objective, hist.Residual)).T,
ptyp="semilogy",
title="Convergence",
xlbl="Iteration",
lgnd=("Objective", "Residual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.890097 | 0.927034 |
r"""
TV-Regularized Abel Inversion
=============================
This example demonstrates a TV-regularized Abel inversion by solving the
problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_1 \;,$$
where $A$ is the Abel projector (with an implementation based on a
projector from PyAbel :cite:`pyabel-2022`), $\mathbf{y}$ is the measured
data, $C$ is a 2D finite difference operator, and $\mathbf{x}$ is the
desired image.
"""
import numpy as np
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
"""
Set up the forward operator and create a test measurement.
"""
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
"""
Compute inverse Abel transform solution.
"""
x_inv = A.inverse(y)
"""
Set up the problem to be solved. Anisotropic TV, which gives slightly
better performance than isotropic TV for this problem, is used here.
"""
f = loss.SquaredL2Loss(y=y, A=A)
λ = 2.35e1 # L1 norm regularization parameter
g = λ * functional.L1Norm() # Note the use of anisotropic TV
C = linop.FiniteDifference(input_shape=x_gt.shape)
"""
Set up ADMM solver object.
"""
ρ = 1.03e2 # ADMM penalty parameter
maxiter = 100 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=snp.clip(x_inv, 0.0, 1.0),
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
x_tv = snp.clip(solver.x, 0.0, 1.0)
"""
Show results.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1, vmax=1.2)
fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(12, 12))
plot.imview(x_gt, title="Ground Truth", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(y, title="Measurement", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(
x_inv,
title="Inverse Abel: %.2f (dB)" % metric.psnr(x_gt, x_inv),
cmap=plot.cm.Blues,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
x_tv,
title="TV-Regularized Inversion: %.2f (dB)" % metric.psnr(x_gt, x_tv),
cmap=plot.cm.Blues,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_abel_tv_admm.py
|
ct_abel_tv_admm.py
|
r"""
TV-Regularized Abel Inversion
=============================
This example demonstrates a TV-regularized Abel inversion by solving the
problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_1 \;,$$
where $A$ is the Abel projector (with an implementation based on a
projector from PyAbel :cite:`pyabel-2022`), $\mathbf{y}$ is the measured
data, $C$ is a 2D finite difference operator, and $\mathbf{x}$ is the
desired image.
"""
import numpy as np
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
"""
Set up the forward operator and create a test measurement.
"""
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
"""
Compute inverse Abel transform solution.
"""
x_inv = A.inverse(y)
"""
Set up the problem to be solved. Anisotropic TV, which gives slightly
better performance than isotropic TV for this problem, is used here.
"""
f = loss.SquaredL2Loss(y=y, A=A)
λ = 2.35e1 # L1 norm regularization parameter
g = λ * functional.L1Norm() # Note the use of anisotropic TV
C = linop.FiniteDifference(input_shape=x_gt.shape)
"""
Set up ADMM solver object.
"""
ρ = 1.03e2 # ADMM penalty parameter
maxiter = 100 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=snp.clip(x_inv, 0.0, 1.0),
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
x_tv = snp.clip(solver.x, 0.0, 1.0)
"""
Show results.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1, vmax=1.2)
fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(12, 12))
plot.imview(x_gt, title="Ground Truth", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(y, title="Measurement", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(
x_inv,
title="Inverse Abel: %.2f (dB)" % metric.psnr(x_gt, x_inv),
cmap=plot.cm.Blues,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
x_tv,
title="TV-Regularized Inversion: %.2f (dB)" % metric.psnr(x_gt, x_tv),
cmap=plot.cm.Blues,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.922426 | 0.939969 |
r"""
Non-negative Poisson Loss Reconstruction (APGM)
===============================================
This example demonstrates the use of class
[pgm.PGMStepSize](../_autosummary/scico.optimize.pgm.rst#scico.optimize.pgm.PGMStepSize)
to solve the non-negative reconstruction problem with Poisson negative
log likelihood loss
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \left ( A(\mathbf{x}) -
\mathbf{y} \log\left( A(\mathbf{x}) \right) + \log(\mathbf{y}!) \right
) + I(\mathbf{x}^{(0)} \geq 0) \;,$$
where $A$ is the forward operator, $\mathbf{y}$ is the
measurement, $\mathbf{x}$ is the signal reconstruction, and
$I(\mathbf{x}^{(0)} \geq 0)$ is the non-negative indicator.
This example also demonstrates the application of
[numpy.BlockArray](../_autosummary/scico.numpy.rst#scico.numpy.BlockArray),
[functional.SeparableFunctional](../_autosummary/scico.functional.rst#scico.functional.SeparableFunctional),
and
[functional.ZeroFunctional](../_autosummary/scico.functional.rst#scico.functional.ZeroFunctional)
to implement the forward operator
$A(\mathbf{x}) = A_0(\mathbf{x}^{(0)}) + A_1(\mathbf{x}^{(1)})$
and the selective non-negativity constraint that only applies to
$\mathbf{x}^{(0)}$.
"""
import jax
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import scico.numpy as snp
import scico.random
from scico import functional, loss, plot
from scico.numpy import BlockArray
from scico.operator import Operator
from scico.optimize.pgm import (
AcceleratedPGM,
AdaptiveBBStepSize,
BBStepSize,
LineSearchStepSize,
RobustLineSearchStepSize,
)
from scico.typing import Shape
from scico.util import device_info
from scipy.linalg import dft
"""
Construct a dictionary, a reference random reconstruction, and a test
measurement signal consisting of the synthesis of the reference
reconstruction.
"""
m = 1024 # signal size
n = 8 # dictionary size
n0 = 2
n1 = n - n0
# Create dictionary with bump-like features.
D = ((snp.real(dft(m))[1 : n + 1, :m]) ** 12).T
D0 = D[:, :n0]
D1 = D[:, n0:]
# Define composed operator.
class ForwardOperator(Operator):
"""Toy problem non-linear forward operator with different treatment
of x[0] and x[1].
Attributes:
D0: Matrix multiplying x[0].
D1: Matrix multiplying x[1].
"""
def __init__(self, input_shape: Shape, D0, D1, jit: bool = True):
self.D0 = D0
self.D1 = D1
output_shape = (D0.shape[0],)
super().__init__(
input_shape=input_shape,
input_dtype=snp.complex64,
output_dtype=snp.complex64,
output_shape=output_shape,
jit=jit,
)
def _eval(self, x: BlockArray) -> BlockArray:
return 10 * snp.exp(-D0 @ x[0]) + 5 * snp.exp(-D1 @ x[1])
x_gt, key = scico.random.uniform(((n0,), (n1,)), seed=12345) # true coefficients
A = ForwardOperator(x_gt.shape, D0, D1)
lam = A(x_gt)
y, key = scico.random.poisson(lam, shape=lam.shape, key=key) # synthetic signal
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
"""
Set up the loss function and the regularization.
"""
f = loss.PoissonLoss(y=y, A=A)
g0 = functional.NonNegativeIndicator()
g1 = functional.ZeroFunctional()
g = functional.SeparableFunctional([g0, g1])
"""
Define common setup: maximum of iterations and initial estimation of solution.
"""
maxiter = 50
x0, key = scico.random.uniform(((n0,), (n1,)), key=key)
x0 = jax.device_put(x0) # Initial solution estimate
"""
Define plotting functionality.
"""
def plot_results(hist, str_ss, L0, xsol, xgt, Aop):
# Plot signal, coefficients and convergence statistics.
fig = plot.figure(
figsize=(12, 6),
tight_layout=True,
)
gs = gridspec.GridSpec(nrows=2, ncols=3)
fig.suptitle(
"Results for PGM Solver and " + str_ss + r" ($L_0$: " + "{:4.2f}".format(L0) + ")",
fontsize=16,
)
ax0 = fig.add_subplot(gs[0, 0])
plot.plot(
hist.Objective,
ptyp="semilogy",
title="Objective",
xlbl="Iteration",
fig=fig,
ax=ax0,
)
ax1 = fig.add_subplot(gs[0, 1])
plot.plot(
hist.Residual,
ptyp="semilogy",
title="Residual",
xlbl="Iteration",
fig=fig,
ax=ax1,
)
ax2 = fig.add_subplot(gs[0, 2])
plot.plot(
hist.L,
ptyp="semilogy",
title="L",
xlbl="Iteration",
fig=fig,
ax=ax2,
)
ax3 = fig.add_subplot(gs[1, 0])
plt.stem(snp.concatenate((xgt[0], xgt[1])), linefmt="C1-", markerfmt="C1o", basefmt="C1-")
plt.stem(snp.concatenate((xsol[0], xsol[1])), linefmt="C2-", markerfmt="C2x", basefmt="C1-")
plt.legend(["Ground Truth", "Recovered"])
plt.xlabel("Index")
plt.title("Coefficients")
ax4 = fig.add_subplot(gs[1, 1:])
plot.plot(
snp.vstack((y, Aop(xgt), Aop(xsol))).T,
title="Fit",
xlbl="Index",
lgnd=("y", "A(x_gt)", "A(x)"),
fig=fig,
ax=ax4,
)
fig.show()
"""
Use default PGMStepSize object, set L0 based on norm of Forward
operator and set up AcceleratedPGM solver object. Run the solver and
plot the recontructed signal and convergence statistics.
"""
L0 = 1e3
str_L0 = "(Specifically chosen so that convergence occurs)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
)
str_ss = type(solver.step_size).__name__
print(f"Solving on {device_info()}\n")
print("============================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use BBStepSize object, set L0 with arbitary initial value and set up
AcceleratedPGM solver object. Run the solver and plot the
recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=BBStepSize(),
)
str_ss = type(solver.step_size).__name__
print("===================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use AdaptiveBBStepSize object, set L0 with arbitary initial value and
set up AcceleratedPGM solver object. Run the solver and plot the
recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=AdaptiveBBStepSize(kappa=0.75),
)
str_ss = type(solver.step_size).__name__
print("===========================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use LineSearchStepSize object, set L0 with arbitary initial value and
set up AcceleratedPGM solver object. Run the solver and plot the
recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=LineSearchStepSize(),
)
str_ss = type(solver.step_size).__name__
print("===========================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use RobustLineSearchStepSize object, set L0 with arbitary initial
value and set up AcceleratedPGM solver object. Run the solver and
plot the recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=RobustLineSearchStepSize(),
)
str_ss = type(solver.step_size).__name__
print("=================================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/sparsecode_poisson_pgm.py
|
sparsecode_poisson_pgm.py
|
r"""
Non-negative Poisson Loss Reconstruction (APGM)
===============================================
This example demonstrates the use of class
[pgm.PGMStepSize](../_autosummary/scico.optimize.pgm.rst#scico.optimize.pgm.PGMStepSize)
to solve the non-negative reconstruction problem with Poisson negative
log likelihood loss
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \left ( A(\mathbf{x}) -
\mathbf{y} \log\left( A(\mathbf{x}) \right) + \log(\mathbf{y}!) \right
) + I(\mathbf{x}^{(0)} \geq 0) \;,$$
where $A$ is the forward operator, $\mathbf{y}$ is the
measurement, $\mathbf{x}$ is the signal reconstruction, and
$I(\mathbf{x}^{(0)} \geq 0)$ is the non-negative indicator.
This example also demonstrates the application of
[numpy.BlockArray](../_autosummary/scico.numpy.rst#scico.numpy.BlockArray),
[functional.SeparableFunctional](../_autosummary/scico.functional.rst#scico.functional.SeparableFunctional),
and
[functional.ZeroFunctional](../_autosummary/scico.functional.rst#scico.functional.ZeroFunctional)
to implement the forward operator
$A(\mathbf{x}) = A_0(\mathbf{x}^{(0)}) + A_1(\mathbf{x}^{(1)})$
and the selective non-negativity constraint that only applies to
$\mathbf{x}^{(0)}$.
"""
import jax
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import scico.numpy as snp
import scico.random
from scico import functional, loss, plot
from scico.numpy import BlockArray
from scico.operator import Operator
from scico.optimize.pgm import (
AcceleratedPGM,
AdaptiveBBStepSize,
BBStepSize,
LineSearchStepSize,
RobustLineSearchStepSize,
)
from scico.typing import Shape
from scico.util import device_info
from scipy.linalg import dft
"""
Construct a dictionary, a reference random reconstruction, and a test
measurement signal consisting of the synthesis of the reference
reconstruction.
"""
m = 1024 # signal size
n = 8 # dictionary size
n0 = 2
n1 = n - n0
# Create dictionary with bump-like features.
D = ((snp.real(dft(m))[1 : n + 1, :m]) ** 12).T
D0 = D[:, :n0]
D1 = D[:, n0:]
# Define composed operator.
class ForwardOperator(Operator):
"""Toy problem non-linear forward operator with different treatment
of x[0] and x[1].
Attributes:
D0: Matrix multiplying x[0].
D1: Matrix multiplying x[1].
"""
def __init__(self, input_shape: Shape, D0, D1, jit: bool = True):
self.D0 = D0
self.D1 = D1
output_shape = (D0.shape[0],)
super().__init__(
input_shape=input_shape,
input_dtype=snp.complex64,
output_dtype=snp.complex64,
output_shape=output_shape,
jit=jit,
)
def _eval(self, x: BlockArray) -> BlockArray:
return 10 * snp.exp(-D0 @ x[0]) + 5 * snp.exp(-D1 @ x[1])
x_gt, key = scico.random.uniform(((n0,), (n1,)), seed=12345) # true coefficients
A = ForwardOperator(x_gt.shape, D0, D1)
lam = A(x_gt)
y, key = scico.random.poisson(lam, shape=lam.shape, key=key) # synthetic signal
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
"""
Set up the loss function and the regularization.
"""
f = loss.PoissonLoss(y=y, A=A)
g0 = functional.NonNegativeIndicator()
g1 = functional.ZeroFunctional()
g = functional.SeparableFunctional([g0, g1])
"""
Define common setup: maximum of iterations and initial estimation of solution.
"""
maxiter = 50
x0, key = scico.random.uniform(((n0,), (n1,)), key=key)
x0 = jax.device_put(x0) # Initial solution estimate
"""
Define plotting functionality.
"""
def plot_results(hist, str_ss, L0, xsol, xgt, Aop):
# Plot signal, coefficients and convergence statistics.
fig = plot.figure(
figsize=(12, 6),
tight_layout=True,
)
gs = gridspec.GridSpec(nrows=2, ncols=3)
fig.suptitle(
"Results for PGM Solver and " + str_ss + r" ($L_0$: " + "{:4.2f}".format(L0) + ")",
fontsize=16,
)
ax0 = fig.add_subplot(gs[0, 0])
plot.plot(
hist.Objective,
ptyp="semilogy",
title="Objective",
xlbl="Iteration",
fig=fig,
ax=ax0,
)
ax1 = fig.add_subplot(gs[0, 1])
plot.plot(
hist.Residual,
ptyp="semilogy",
title="Residual",
xlbl="Iteration",
fig=fig,
ax=ax1,
)
ax2 = fig.add_subplot(gs[0, 2])
plot.plot(
hist.L,
ptyp="semilogy",
title="L",
xlbl="Iteration",
fig=fig,
ax=ax2,
)
ax3 = fig.add_subplot(gs[1, 0])
plt.stem(snp.concatenate((xgt[0], xgt[1])), linefmt="C1-", markerfmt="C1o", basefmt="C1-")
plt.stem(snp.concatenate((xsol[0], xsol[1])), linefmt="C2-", markerfmt="C2x", basefmt="C1-")
plt.legend(["Ground Truth", "Recovered"])
plt.xlabel("Index")
plt.title("Coefficients")
ax4 = fig.add_subplot(gs[1, 1:])
plot.plot(
snp.vstack((y, Aop(xgt), Aop(xsol))).T,
title="Fit",
xlbl="Index",
lgnd=("y", "A(x_gt)", "A(x)"),
fig=fig,
ax=ax4,
)
fig.show()
"""
Use default PGMStepSize object, set L0 based on norm of Forward
operator and set up AcceleratedPGM solver object. Run the solver and
plot the recontructed signal and convergence statistics.
"""
L0 = 1e3
str_L0 = "(Specifically chosen so that convergence occurs)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
)
str_ss = type(solver.step_size).__name__
print(f"Solving on {device_info()}\n")
print("============================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use BBStepSize object, set L0 with arbitary initial value and set up
AcceleratedPGM solver object. Run the solver and plot the
recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=BBStepSize(),
)
str_ss = type(solver.step_size).__name__
print("===================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use AdaptiveBBStepSize object, set L0 with arbitary initial value and
set up AcceleratedPGM solver object. Run the solver and plot the
recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=AdaptiveBBStepSize(kappa=0.75),
)
str_ss = type(solver.step_size).__name__
print("===========================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use LineSearchStepSize object, set L0 with arbitary initial value and
set up AcceleratedPGM solver object. Run the solver and plot the
recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=LineSearchStepSize(),
)
str_ss = type(solver.step_size).__name__
print("===========================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use RobustLineSearchStepSize object, set L0 with arbitary initial
value and set up AcceleratedPGM solver object. Run the solver and
plot the recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=RobustLineSearchStepSize(),
)
str_ss = type(solver.step_size).__name__
print("=================================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
input("\nWaiting for input to close figures and exit")
| 0.944228 | 0.903847 |
r"""
CT Reconstruction with CG and PCG
=================================
This example demonstrates a simple iterative CT reconstruction using
conjugate gradient (CG) and preconditioned conjugate gradient (PCG)
algorithms to solve the problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, and
$\mathbf{x}$ is the reconstructed image.
"""
from time import time
import numpy as np
import jax
import jax.numpy as jnp
from xdesign import Foam, discrete_phantom
from scico import loss, plot
from scico.linop import CircularConvolve
from scico.linop.radon_astra import TomographicProjector
from scico.solver import cg
"""
Create a ground truth image.
"""
N = 256 # phantom size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Configure a CT projection operator and generate synthetic measurements.
"""
n_projection = N # matches the phantom size so this is not few-view CT
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = 1 / N * TomographicProjector(x_gt.shape, 1, N, angles) # Radon transform operator
y = A @ x_gt # sinogram
r"""
Forward and back project a single pixel (Kronecker delta) to compute
an approximate impulse response for $\mathbf{A}^T \mathbf{A}$.
"""
H = CircularConvolve.from_operator(A.T @ A)
r"""
Invert in the Fourier domain to form a preconditioner $\mathbf{M}
\approx (\mathbf{A}^T \mathbf{A})^{-1}$. See
:cite:`clinthorne-1993-preconditioning` Section V.A. for more details.
"""
# γ limits the gain of the preconditioner; higher gives a weaker filter.
γ = 1e-2
# The imaginary part comes from numerical errors in A.T and needs to be
# removed to ensure H is symmetric, positive definite.
frequency_response = np.real(H.h_dft)
inv_frequency_response = 1 / (frequency_response + γ)
# Using circular convolution without padding is sufficient here because
# M is approximate anyway.
M = CircularConvolve(inv_frequency_response, x_gt.shape, h_is_dft=True)
r"""
Check that $\mathbf{M}$ does approximately invert $\mathbf{A}^T \mathbf{A}$.
"""
plot_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, axes = plot.subplots(nrows=1, ncols=3, figsize=(12, 4.5))
plot.imview(x_gt, title="Ground truth, $x_{gt}$", fig=fig, ax=axes[0], **plot_args)
plot.imview(
A.T @ A @ x_gt, title=r"$\mathbf{A}^T \mathbf{A} x_{gt}$", fig=fig, ax=axes[1], **plot_args
)
plot.imview(
M @ A.T @ A @ x_gt,
title=r"$\mathbf{M} \mathbf{A}^T \mathbf{A} x_{gt}$",
fig=fig,
ax=axes[2],
**plot_args,
)
fig.suptitle(r"$\mathbf{M}$ approximately inverts $\mathbf{A}^T \mathbf{A}$")
fig.tight_layout()
fig.colorbar(
axes[2].get_images()[0],
ax=axes,
location="right",
shrink=1.0,
pad=0.05,
label="Arbitrary Units",
)
fig.show()
"""
Reconstruct with both standard and preconditioned conjugate gradient.
"""
start_time = time()
x_cg, info_cg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=1e-5,
info=True,
)
time_cg = time() - start_time
start_time = time()
x_pcg, info_pcg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=2e-5, # preconditioning affects the problem scaling so tol differs between CG and PCG
info=True,
M=M,
)
time_pcg = time() - start_time
"""
Compare CG and PCG in terms of reconstruction time and data fidelity.
"""
f_cg = loss.SquaredL2Loss(y=A.T @ y, A=A.T @ A)
f_data = loss.SquaredL2Loss(y=y, A=A)
print(
f"{'Method':10s}{'Iterations':>15s}{'Time (s)':>15s}{'||ATAx - ATy||':>15s}{'||Ax - y||':>15s}"
)
print(
f"{'CG':10s}{info_cg['num_iter']:>15d}{time_cg:>15.2f}{f_cg(x_cg):>15.2e}{f_data(x_cg):>15.2e}"
)
print(
f"{'PCG':10s}{info_pcg['num_iter']:>15d}{time_pcg:>15.2f}{f_cg(x_pcg):>15.2e}"
f"{f_data(x_pcg):>15.2e}"
)
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_astra_noreg_pcg.py
|
ct_astra_noreg_pcg.py
|
r"""
CT Reconstruction with CG and PCG
=================================
This example demonstrates a simple iterative CT reconstruction using
conjugate gradient (CG) and preconditioned conjugate gradient (PCG)
algorithms to solve the problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, and
$\mathbf{x}$ is the reconstructed image.
"""
from time import time
import numpy as np
import jax
import jax.numpy as jnp
from xdesign import Foam, discrete_phantom
from scico import loss, plot
from scico.linop import CircularConvolve
from scico.linop.radon_astra import TomographicProjector
from scico.solver import cg
"""
Create a ground truth image.
"""
N = 256 # phantom size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Configure a CT projection operator and generate synthetic measurements.
"""
n_projection = N # matches the phantom size so this is not few-view CT
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = 1 / N * TomographicProjector(x_gt.shape, 1, N, angles) # Radon transform operator
y = A @ x_gt # sinogram
r"""
Forward and back project a single pixel (Kronecker delta) to compute
an approximate impulse response for $\mathbf{A}^T \mathbf{A}$.
"""
H = CircularConvolve.from_operator(A.T @ A)
r"""
Invert in the Fourier domain to form a preconditioner $\mathbf{M}
\approx (\mathbf{A}^T \mathbf{A})^{-1}$. See
:cite:`clinthorne-1993-preconditioning` Section V.A. for more details.
"""
# γ limits the gain of the preconditioner; higher gives a weaker filter.
γ = 1e-2
# The imaginary part comes from numerical errors in A.T and needs to be
# removed to ensure H is symmetric, positive definite.
frequency_response = np.real(H.h_dft)
inv_frequency_response = 1 / (frequency_response + γ)
# Using circular convolution without padding is sufficient here because
# M is approximate anyway.
M = CircularConvolve(inv_frequency_response, x_gt.shape, h_is_dft=True)
r"""
Check that $\mathbf{M}$ does approximately invert $\mathbf{A}^T \mathbf{A}$.
"""
plot_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, axes = plot.subplots(nrows=1, ncols=3, figsize=(12, 4.5))
plot.imview(x_gt, title="Ground truth, $x_{gt}$", fig=fig, ax=axes[0], **plot_args)
plot.imview(
A.T @ A @ x_gt, title=r"$\mathbf{A}^T \mathbf{A} x_{gt}$", fig=fig, ax=axes[1], **plot_args
)
plot.imview(
M @ A.T @ A @ x_gt,
title=r"$\mathbf{M} \mathbf{A}^T \mathbf{A} x_{gt}$",
fig=fig,
ax=axes[2],
**plot_args,
)
fig.suptitle(r"$\mathbf{M}$ approximately inverts $\mathbf{A}^T \mathbf{A}$")
fig.tight_layout()
fig.colorbar(
axes[2].get_images()[0],
ax=axes,
location="right",
shrink=1.0,
pad=0.05,
label="Arbitrary Units",
)
fig.show()
"""
Reconstruct with both standard and preconditioned conjugate gradient.
"""
start_time = time()
x_cg, info_cg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=1e-5,
info=True,
)
time_cg = time() - start_time
start_time = time()
x_pcg, info_pcg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=2e-5, # preconditioning affects the problem scaling so tol differs between CG and PCG
info=True,
M=M,
)
time_pcg = time() - start_time
"""
Compare CG and PCG in terms of reconstruction time and data fidelity.
"""
f_cg = loss.SquaredL2Loss(y=A.T @ y, A=A.T @ A)
f_data = loss.SquaredL2Loss(y=y, A=A)
print(
f"{'Method':10s}{'Iterations':>15s}{'Time (s)':>15s}{'||ATAx - ATy||':>15s}{'||Ax - y||':>15s}"
)
print(
f"{'CG':10s}{info_cg['num_iter']:>15d}{time_cg:>15.2f}{f_cg(x_cg):>15.2e}{f_data(x_cg):>15.2e}"
)
print(
f"{'PCG':10s}{info_pcg['num_iter']:>15d}{time_pcg:>15.2f}{f_cg(x_pcg):>15.2e}"
f"{f_data(x_pcg):>15.2e}"
)
input("\nWaiting for input to close figures and exit")
| 0.920222 | 0.973968 |
r"""
Complex Total Variation Denoising with PDHG Solver
==================================================
This example demonstrates solution of a problem of the form
$$\argmin_{\mathbf{x}} \; f(\mathbf{x}) + g(C(\mathbf{x})) \;,$$
where $C$ is a nonlinear operator, via non-linear PDHG
:cite:`valkonen-2014-primal`. The example problem represents total
variation (TV) denoising applied to a complex image with piece-wise
smooth magnitude and non-smooth phase. The appropriate TV denoising
formulation for this problem is
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda \| C(\mathbf{x}) \|_{2,1} \;,$$
where $\mathbf{y}$ is the measurement, $\|\cdot\|_{2,1}$ is the
$\ell_{2,1}$ mixed norm, and $C$ is a non-linear operator that applies a
linear difference operator to the magnitude of a complex array. The
standard TV solution, which is also computed for comparison purposes,
gives very poor results since the difference is applied independently to
real and imaginary components of the complex image.
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, operator, plot
from scico.examples import phase_diff
from scico.optimize import PDHG
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_mag = snp.pad(discrete_phantom(phantom, N - 16), 8) + 1.0
x_mag /= x_mag.max()
# Create reference image with structured magnitude and random phase
x_gt = x_mag * snp.exp(-1j * scico.random.randn(x_mag.shape, seed=0)[0])
"""
Add noise to create a noisy test image.
"""
σ = 0.25 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=1, dtype=snp.complex64)
y = x_gt + σ * noise
"""
Denoise with standard total variation.
"""
λ_tv = 6e-2
f = loss.SquaredL2Loss(y=y)
g = λ_tv * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, input_dtype=snp.complex64, append=0)
solver_tv = PDHG(
f=f,
g=g,
C=C,
tau=4e-1,
sigma=4e-1,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_tv = solver_tv.solve()
hist_tv = solver_tv.itstat_object.history(transpose=True)
"""
Denoise with total variation applied to the magnitude of a complex image.
"""
λ_nltv = 2e-1
g = λ_nltv * functional.L21Norm()
# Redefine C for real input (now applied to magnitude of a complex array)
C = linop.FiniteDifference(input_shape=x_gt.shape, input_dtype=snp.float32, append=0)
# Operator computing differences of absolute values
D = C @ operator.Abs(input_shape=x_gt.shape, input_dtype=snp.complex64)
solver_nltv = PDHG(
f=f,
g=g,
C=D,
tau=4e-1,
sigma=4e-1,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
x_nltv = solver_nltv.solve()
hist_nltv = solver_nltv.itstat_object.history(transpose=True)
"""
Plot results.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_tv.Objective, hist_nltv.Objective)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("PDHG", "NL-PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_tv.Prml_Rsdl, hist_nltv.Prml_Rsdl)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("PDHG", "NL-PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_tv.Dual_Rsdl, hist_nltv.Dual_Rsdl)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("PDHG", "NL-PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=2, ncols=4, figsize=(20, 10))
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.abs(x_gt).min(), snp.abs(y).min(), snp.abs(x_tv).min(), snp.abs(x_nltv).min()),
vmax=max(snp.abs(x_gt).max(), snp.abs(y).max(), snp.abs(x_tv).max(), snp.abs(x_nltv).max()),
)
plot.imview(snp.abs(x_gt), title="Ground truth", cbar=None, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(
snp.abs(y),
title="Measured: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(y)),
cbar=None,
fig=fig,
ax=ax[0, 1],
norm=norm,
)
plot.imview(
snp.abs(x_tv),
title="TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_tv)),
cbar=None,
fig=fig,
ax=ax[0, 2],
norm=norm,
)
plot.imview(
snp.abs(x_nltv),
title="NL-TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_nltv)),
cbar=None,
fig=fig,
ax=ax[0, 3],
norm=norm,
)
divider = make_axes_locatable(ax[0, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[0, 3].get_images()[0], cax=cax)
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.angle(x_gt).min(), snp.angle(x_tv).min(), snp.angle(x_nltv).min()),
vmax=max(snp.angle(x_gt).max(), snp.angle(x_tv).max(), snp.angle(x_nltv).max()),
)
plot.imview(
snp.angle(x_gt),
title="Ground truth",
cbar=None,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
snp.angle(y),
title="Measured: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(y)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
plot.imview(
snp.angle(x_tv),
title="TV: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(x_tv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 2],
norm=norm,
)
plot.imview(
snp.angle(x_nltv),
title="NL-TV: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(x_nltv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 3],
norm=norm,
)
divider = make_axes_locatable(ax[1, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1, 3].get_images()[0], cax=cax)
ax[0, 0].set_ylabel("Magnitude")
ax[1, 0].set_ylabel("Phase")
fig.tight_layout()
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_cplx_tv_pdhg.py
|
denoise_cplx_tv_pdhg.py
|
r"""
Complex Total Variation Denoising with PDHG Solver
==================================================
This example demonstrates solution of a problem of the form
$$\argmin_{\mathbf{x}} \; f(\mathbf{x}) + g(C(\mathbf{x})) \;,$$
where $C$ is a nonlinear operator, via non-linear PDHG
:cite:`valkonen-2014-primal`. The example problem represents total
variation (TV) denoising applied to a complex image with piece-wise
smooth magnitude and non-smooth phase. The appropriate TV denoising
formulation for this problem is
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda \| C(\mathbf{x}) \|_{2,1} \;,$$
where $\mathbf{y}$ is the measurement, $\|\cdot\|_{2,1}$ is the
$\ell_{2,1}$ mixed norm, and $C$ is a non-linear operator that applies a
linear difference operator to the magnitude of a complex array. The
standard TV solution, which is also computed for comparison purposes,
gives very poor results since the difference is applied independently to
real and imaginary components of the complex image.
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, operator, plot
from scico.examples import phase_diff
from scico.optimize import PDHG
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_mag = snp.pad(discrete_phantom(phantom, N - 16), 8) + 1.0
x_mag /= x_mag.max()
# Create reference image with structured magnitude and random phase
x_gt = x_mag * snp.exp(-1j * scico.random.randn(x_mag.shape, seed=0)[0])
"""
Add noise to create a noisy test image.
"""
σ = 0.25 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=1, dtype=snp.complex64)
y = x_gt + σ * noise
"""
Denoise with standard total variation.
"""
λ_tv = 6e-2
f = loss.SquaredL2Loss(y=y)
g = λ_tv * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, input_dtype=snp.complex64, append=0)
solver_tv = PDHG(
f=f,
g=g,
C=C,
tau=4e-1,
sigma=4e-1,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_tv = solver_tv.solve()
hist_tv = solver_tv.itstat_object.history(transpose=True)
"""
Denoise with total variation applied to the magnitude of a complex image.
"""
λ_nltv = 2e-1
g = λ_nltv * functional.L21Norm()
# Redefine C for real input (now applied to magnitude of a complex array)
C = linop.FiniteDifference(input_shape=x_gt.shape, input_dtype=snp.float32, append=0)
# Operator computing differences of absolute values
D = C @ operator.Abs(input_shape=x_gt.shape, input_dtype=snp.complex64)
solver_nltv = PDHG(
f=f,
g=g,
C=D,
tau=4e-1,
sigma=4e-1,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
x_nltv = solver_nltv.solve()
hist_nltv = solver_nltv.itstat_object.history(transpose=True)
"""
Plot results.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_tv.Objective, hist_nltv.Objective)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("PDHG", "NL-PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_tv.Prml_Rsdl, hist_nltv.Prml_Rsdl)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("PDHG", "NL-PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_tv.Dual_Rsdl, hist_nltv.Dual_Rsdl)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("PDHG", "NL-PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=2, ncols=4, figsize=(20, 10))
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.abs(x_gt).min(), snp.abs(y).min(), snp.abs(x_tv).min(), snp.abs(x_nltv).min()),
vmax=max(snp.abs(x_gt).max(), snp.abs(y).max(), snp.abs(x_tv).max(), snp.abs(x_nltv).max()),
)
plot.imview(snp.abs(x_gt), title="Ground truth", cbar=None, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(
snp.abs(y),
title="Measured: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(y)),
cbar=None,
fig=fig,
ax=ax[0, 1],
norm=norm,
)
plot.imview(
snp.abs(x_tv),
title="TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_tv)),
cbar=None,
fig=fig,
ax=ax[0, 2],
norm=norm,
)
plot.imview(
snp.abs(x_nltv),
title="NL-TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_nltv)),
cbar=None,
fig=fig,
ax=ax[0, 3],
norm=norm,
)
divider = make_axes_locatable(ax[0, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[0, 3].get_images()[0], cax=cax)
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.angle(x_gt).min(), snp.angle(x_tv).min(), snp.angle(x_nltv).min()),
vmax=max(snp.angle(x_gt).max(), snp.angle(x_tv).max(), snp.angle(x_nltv).max()),
)
plot.imview(
snp.angle(x_gt),
title="Ground truth",
cbar=None,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
snp.angle(y),
title="Measured: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(y)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
plot.imview(
snp.angle(x_tv),
title="TV: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(x_tv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 2],
norm=norm,
)
plot.imview(
snp.angle(x_nltv),
title="NL-TV: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(x_nltv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 3],
norm=norm,
)
divider = make_axes_locatable(ax[1, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1, 3].get_images()[0], cax=cax)
ax[0, 0].set_ylabel("Magnitude")
ax[1, 0].set_ylabel("Phase")
fig.tight_layout()
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.930387 | 0.929055 |
r"""
Complex Total Variation Denoising with NLPADMM Solver
=====================================================
This example demonstrates solution of a problem of the form
$$\argmin_{\mb{x}} \; f(\mb{x}) + g(\mb{z}) \; \text{such that}\;
H(\mb{x}, \mb{z}) = 0 \;,$$
where $H$ is a nonlinear function, via a variant of the proximal ADMM
algorithm for problems with a non-linear operator constraint
:cite:`benning-2016-preconditioned`. The example problem represents
total variation (TV) denoising applied to a complex image with
piece-wise smooth magnitude and non-smooth phase. (This example is rather
contrived, and was not constructed to represent a specific real imaging
problem, but it does have some properties in common with synthetic
aperture radar single look complex data in which the magnitude has much
more discernible structure than the phase.) The appropriate TV denoising
formulation for this problem is
$$\argmin_{\mb{x}} \; (1/2) \| \mb{y} - \mb{x} \|_2^2 + \lambda
\| C(\mb{x}) \|_{2,1} \;,$$
where $\mb{y}$ is the measurement, $\|\cdot\|_{2,1}$ is the
$\ell_{2,1}$ mixed norm, and $C$ is a non-linear operator consisting of
a linear difference operator applied to the magnitude of a complex array.
This problem is represented in the form above by taking $H(\mb{x},
\mb{z}) = C(\mb{x}) - \mb{z}$. The standard TV solution, which is
also computed for comparison purposes, gives very poor results since
the difference is applied independently to real and imaginary
components of the complex image.
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import function, functional, linop, loss, metric, operator, plot
from scico.examples import phase_diff
from scico.optimize import NonLinearPADMM, ProximalADMM
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_mag = snp.pad(discrete_phantom(phantom, N - 16), 8) + 1.0
x_mag /= x_mag.max()
# Create reference image with structured magnitude and random phase
x_gt = x_mag * snp.exp(-1j * scico.random.randn(x_mag.shape, seed=0)[0])
"""
Add noise to create a noisy test image.
"""
σ = 0.25 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=1, dtype=snp.complex64)
y = x_gt + σ * noise
"""
Denoise with standard total variation.
"""
λ_tv = 6e-2
f = loss.SquaredL2Loss(y=y)
g = λ_tv * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=y.shape, input_dtype=snp.complex64, append=0)
solver_tv = ProximalADMM(
f=f,
g=g,
A=C,
rho=1.0,
mu=8.0,
nu=1.0,
maxiter=200,
itstat_options={"display": True, "period": 20},
)
print(f"Solving on {device_info()}\n")
x_tv = solver_tv.solve()
print()
hist_tv = solver_tv.itstat_object.history(transpose=True)
"""
Denoise with total variation applied to the magnitude of a complex image.
"""
λ_nltv = 2e-1
g = λ_nltv * functional.L21Norm()
# Redefine C for real input (now applied to magnitude of a complex array)
C = linop.FiniteDifference(input_shape=y.shape, input_dtype=snp.float32, append=0)
# Operator computing differences of absolute values
D = C @ operator.Abs(input_shape=x_gt.shape, input_dtype=snp.complex64)
# Constraint function imposing z = D(x) constraint
H = function.Function(
(C.shape[1], C.shape[0]),
output_shape=C.shape[0],
eval_fn=lambda x, z: D(x) - z,
input_dtypes=(snp.complex64, snp.float32),
output_dtype=snp.float32,
)
solver_nltv = NonLinearPADMM(
f=f,
g=g,
H=H,
rho=5.0,
mu=6.0,
nu=1.0,
maxiter=200,
itstat_options={"display": True, "period": 20},
)
x_nltv = solver_nltv.solve()
hist_nltv = solver_nltv.itstat_object.history(transpose=True)
"""
Plot results.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_tv.Objective, hist_nltv.Objective)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("Standard TV", "Magnitude TV"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_tv.Prml_Rsdl, hist_nltv.Prml_Rsdl)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("Standard TV", "Magnitude TV"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_tv.Dual_Rsdl, hist_nltv.Dual_Rsdl)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("Standard TV", "Magnitude TV"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=2, ncols=4, figsize=(20, 10))
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.abs(x_gt).min(), snp.abs(y).min(), snp.abs(x_tv).min(), snp.abs(x_nltv).min()),
vmax=max(snp.abs(x_gt).max(), snp.abs(y).max(), snp.abs(x_tv).max(), snp.abs(x_nltv).max()),
)
plot.imview(snp.abs(x_gt), title="Ground truth", cbar=None, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(
snp.abs(y),
title="Measured: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(y)),
cbar=None,
fig=fig,
ax=ax[0, 1],
norm=norm,
)
plot.imview(
snp.abs(x_tv),
title="Standard TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_tv)),
cbar=None,
fig=fig,
ax=ax[0, 2],
norm=norm,
)
plot.imview(
snp.abs(x_nltv),
title="Magnitude TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_nltv)),
cbar=None,
fig=fig,
ax=ax[0, 3],
norm=norm,
)
divider = make_axes_locatable(ax[0, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[0, 3].get_images()[0], cax=cax)
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.angle(x_gt).min(), snp.angle(x_tv).min(), snp.angle(x_nltv).min()),
vmax=max(snp.angle(x_gt).max(), snp.angle(x_tv).max(), snp.angle(x_nltv).max()),
)
plot.imview(
snp.angle(x_gt),
title="Ground truth",
cbar=None,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
snp.angle(y),
title="Measured: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(y)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
plot.imview(
snp.angle(x_tv),
title="Standard TV: Mean phase diff. %.2f"
% phase_diff(snp.angle(x_gt), snp.angle(x_tv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 2],
norm=norm,
)
plot.imview(
snp.angle(x_nltv),
title="Magnitude TV: Mean phase diff. %.2f"
% phase_diff(snp.angle(x_gt), snp.angle(x_nltv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 3],
norm=norm,
)
divider = make_axes_locatable(ax[1, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1, 3].get_images()[0], cax=cax)
ax[0, 0].set_ylabel("Magnitude")
ax[1, 0].set_ylabel("Phase")
fig.tight_layout()
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_cplx_tv_nlpadmm.py
|
denoise_cplx_tv_nlpadmm.py
|
r"""
Complex Total Variation Denoising with NLPADMM Solver
=====================================================
This example demonstrates solution of a problem of the form
$$\argmin_{\mb{x}} \; f(\mb{x}) + g(\mb{z}) \; \text{such that}\;
H(\mb{x}, \mb{z}) = 0 \;,$$
where $H$ is a nonlinear function, via a variant of the proximal ADMM
algorithm for problems with a non-linear operator constraint
:cite:`benning-2016-preconditioned`. The example problem represents
total variation (TV) denoising applied to a complex image with
piece-wise smooth magnitude and non-smooth phase. (This example is rather
contrived, and was not constructed to represent a specific real imaging
problem, but it does have some properties in common with synthetic
aperture radar single look complex data in which the magnitude has much
more discernible structure than the phase.) The appropriate TV denoising
formulation for this problem is
$$\argmin_{\mb{x}} \; (1/2) \| \mb{y} - \mb{x} \|_2^2 + \lambda
\| C(\mb{x}) \|_{2,1} \;,$$
where $\mb{y}$ is the measurement, $\|\cdot\|_{2,1}$ is the
$\ell_{2,1}$ mixed norm, and $C$ is a non-linear operator consisting of
a linear difference operator applied to the magnitude of a complex array.
This problem is represented in the form above by taking $H(\mb{x},
\mb{z}) = C(\mb{x}) - \mb{z}$. The standard TV solution, which is
also computed for comparison purposes, gives very poor results since
the difference is applied independently to real and imaginary
components of the complex image.
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import function, functional, linop, loss, metric, operator, plot
from scico.examples import phase_diff
from scico.optimize import NonLinearPADMM, ProximalADMM
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_mag = snp.pad(discrete_phantom(phantom, N - 16), 8) + 1.0
x_mag /= x_mag.max()
# Create reference image with structured magnitude and random phase
x_gt = x_mag * snp.exp(-1j * scico.random.randn(x_mag.shape, seed=0)[0])
"""
Add noise to create a noisy test image.
"""
σ = 0.25 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=1, dtype=snp.complex64)
y = x_gt + σ * noise
"""
Denoise with standard total variation.
"""
λ_tv = 6e-2
f = loss.SquaredL2Loss(y=y)
g = λ_tv * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=y.shape, input_dtype=snp.complex64, append=0)
solver_tv = ProximalADMM(
f=f,
g=g,
A=C,
rho=1.0,
mu=8.0,
nu=1.0,
maxiter=200,
itstat_options={"display": True, "period": 20},
)
print(f"Solving on {device_info()}\n")
x_tv = solver_tv.solve()
print()
hist_tv = solver_tv.itstat_object.history(transpose=True)
"""
Denoise with total variation applied to the magnitude of a complex image.
"""
λ_nltv = 2e-1
g = λ_nltv * functional.L21Norm()
# Redefine C for real input (now applied to magnitude of a complex array)
C = linop.FiniteDifference(input_shape=y.shape, input_dtype=snp.float32, append=0)
# Operator computing differences of absolute values
D = C @ operator.Abs(input_shape=x_gt.shape, input_dtype=snp.complex64)
# Constraint function imposing z = D(x) constraint
H = function.Function(
(C.shape[1], C.shape[0]),
output_shape=C.shape[0],
eval_fn=lambda x, z: D(x) - z,
input_dtypes=(snp.complex64, snp.float32),
output_dtype=snp.float32,
)
solver_nltv = NonLinearPADMM(
f=f,
g=g,
H=H,
rho=5.0,
mu=6.0,
nu=1.0,
maxiter=200,
itstat_options={"display": True, "period": 20},
)
x_nltv = solver_nltv.solve()
hist_nltv = solver_nltv.itstat_object.history(transpose=True)
"""
Plot results.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_tv.Objective, hist_nltv.Objective)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("Standard TV", "Magnitude TV"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_tv.Prml_Rsdl, hist_nltv.Prml_Rsdl)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("Standard TV", "Magnitude TV"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_tv.Dual_Rsdl, hist_nltv.Dual_Rsdl)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("Standard TV", "Magnitude TV"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=2, ncols=4, figsize=(20, 10))
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.abs(x_gt).min(), snp.abs(y).min(), snp.abs(x_tv).min(), snp.abs(x_nltv).min()),
vmax=max(snp.abs(x_gt).max(), snp.abs(y).max(), snp.abs(x_tv).max(), snp.abs(x_nltv).max()),
)
plot.imview(snp.abs(x_gt), title="Ground truth", cbar=None, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(
snp.abs(y),
title="Measured: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(y)),
cbar=None,
fig=fig,
ax=ax[0, 1],
norm=norm,
)
plot.imview(
snp.abs(x_tv),
title="Standard TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_tv)),
cbar=None,
fig=fig,
ax=ax[0, 2],
norm=norm,
)
plot.imview(
snp.abs(x_nltv),
title="Magnitude TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_nltv)),
cbar=None,
fig=fig,
ax=ax[0, 3],
norm=norm,
)
divider = make_axes_locatable(ax[0, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[0, 3].get_images()[0], cax=cax)
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.angle(x_gt).min(), snp.angle(x_tv).min(), snp.angle(x_nltv).min()),
vmax=max(snp.angle(x_gt).max(), snp.angle(x_tv).max(), snp.angle(x_nltv).max()),
)
plot.imview(
snp.angle(x_gt),
title="Ground truth",
cbar=None,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
snp.angle(y),
title="Measured: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(y)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
plot.imview(
snp.angle(x_tv),
title="Standard TV: Mean phase diff. %.2f"
% phase_diff(snp.angle(x_gt), snp.angle(x_tv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 2],
norm=norm,
)
plot.imview(
snp.angle(x_nltv),
title="Magnitude TV: Mean phase diff. %.2f"
% phase_diff(snp.angle(x_gt), snp.angle(x_nltv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 3],
norm=norm,
)
divider = make_axes_locatable(ax[1, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1, 3].get_images()[0], cax=cax)
ax[0, 0].set_ylabel("Magnitude")
ax[1, 0].set_ylabel("Phase")
fig.tight_layout()
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.944523 | 0.931711 |
Usage Examples
==============
Organized by Application
------------------------
Computed Tomography
^^^^^^^^^^^^^^^^^^^
- ct_abel_tv_admm.py
- ct_abel_tv_admm_tune.py
- ct_astra_noreg_pcg.py
- ct_astra_3d_tv_admm.py
- ct_astra_tv_admm.py
- ct_astra_weighted_tv_admm.py
- ct_svmbir_tv_multi.py
- ct_svmbir_ppp_bm3d_admm_cg.py
- ct_svmbir_ppp_bm3d_admm_prox.py
- ct_fan_svmbir_ppp_bm3d_admm_prox.py
- ct_astra_modl_train_foam2.py
- ct_astra_odp_train_foam2.py
- ct_astra_unet_train_foam2.py
Deconvolution
^^^^^^^^^^^^^
- deconv_circ_tv_admm.py
- deconv_tv_admm.py
- deconv_tv_padmm.py
- deconv_tv_admm_tune.py
- deconv_microscopy_tv_admm.py
- deconv_microscopy_allchn_tv_admm.py
- deconv_ppp_bm3d_admm.py
- deconv_ppp_bm3d_pgm.py
- deconv_ppp_dncnn_admm.py
- deconv_ppp_dncnn_padmm.py
- deconv_ppp_bm4d_admm.py
- deconv_modl_train_foam1.py
- deconv_odp_train_foam1.py
Sparse Coding
^^^^^^^^^^^^^
- sparsecode_admm.py
- sparsecode_conv_admm.py
- sparsecode_conv_md_admm.py
- sparsecode_pgm.py
- sparsecode_poisson_pgm.py
Miscellaneous
^^^^^^^^^^^^^
- demosaic_ppp_bm3d_admm.py
- superres_ppp_dncnn_admm.py
- denoise_l1tv_admm.py
- denoise_tv_admm.py
- denoise_tv_pgm.py
- denoise_tv_multi.py
- denoise_cplx_tv_nlpadmm.py
- denoise_cplx_tv_pdhg.py
- denoise_dncnn_universal.py
- diffusercam_tv_admm.py
- video_rpca_admm.py
- ct_astra_datagen_foam2.py
- deconv_datagen_bsds.py
- deconv_datagen_foam1.py
- denoise_datagen_bsds.py
Organized by Regularization
---------------------------
Plug and Play Priors
^^^^^^^^^^^^^^^^^^^^
- ct_svmbir_ppp_bm3d_admm_cg.py
- ct_svmbir_ppp_bm3d_admm_prox.py
- ct_fan_svmbir_ppp_bm3d_admm_prox.py
- deconv_ppp_bm3d_admm.py
- deconv_ppp_bm3d_pgm.py
- deconv_ppp_dncnn_admm.py
- deconv_ppp_dncnn_padmm.py
- deconv_ppp_bm4d_admm.py
- demosaic_ppp_bm3d_admm.py
- superres_ppp_dncnn_admm.py
Total Variation
^^^^^^^^^^^^^^^
- ct_abel_tv_admm.py
- ct_abel_tv_admm_tune.py
- ct_astra_tv_admm.py
- ct_astra_3d_tv_admm.py
- ct_astra_weighted_tv_admm.py
- ct_svmbir_tv_multi.py
- deconv_circ_tv_admm.py
- deconv_tv_admm.py
- deconv_tv_admm_tune.py
- deconv_tv_padmm.py
- deconv_microscopy_tv_admm.py
- deconv_microscopy_allchn_tv_admm.py
- denoise_l1tv_admm.py
- denoise_tv_admm.py
- denoise_tv_pgm.py
- denoise_tv_multi.py
- denoise_cplx_tv_nlpadmm.py
- denoise_cplx_tv_pdhg.py
- diffusercam_tv_admm.py
Sparsity
^^^^^^^^
- diffusercam_tv_admm.py
- sparsecode_admm.py
- sparsecode_conv_admm.py
- sparsecode_conv_md_admm.py
- sparsecode_pgm.py
- sparsecode_poisson_pgm.py
- video_rpca_admm.py
Machine Learning
^^^^^^^^^^^^^^^^
- ct_astra_datagen_foam2.py
- ct_astra_modl_train_foam2.py
- ct_astra_odp_train_foam2.py
- ct_astra_unet_train_foam2.py
- deconv_datagen_bsds.py
- deconv_datagen_foam1.py
- deconv_modl_train_foam1.py
- deconv_odp_train_foam1.py
- denoise_datagen_bsds.py
- denoise_dncnn_train_bsds.py
- denoise_dncnn_universal.py
Organized by Optimization Algorithm
-----------------------------------
ADMM
^^^^
- ct_abel_tv_admm.py
- ct_abel_tv_admm_tune.py
- ct_astra_tv_admm.py
- ct_astra_3d_tv_admm.py
- ct_astra_weighted_tv_admm.py
- ct_svmbir_tv_multi.py
- ct_svmbir_ppp_bm3d_admm_cg.py
- ct_svmbir_ppp_bm3d_admm_prox.py
- ct_fan_svmbir_ppp_bm3d_admm_prox.py
- deconv_circ_tv_admm.py
- deconv_tv_admm.py
- deconv_tv_admm_tune.py
- deconv_microscopy_tv_admm.py
- deconv_microscopy_allchn_tv_admm.py
- deconv_ppp_bm3d_admm.py
- deconv_ppp_dncnn_admm.py
- deconv_ppp_bm4d_admm.py
- diffusercam_tv_admm.py
- sparsecode_admm.py
- sparsecode_conv_admm.py
- sparsecode_conv_md_admm.py
- demosaic_ppp_bm3d_admm.py
- superres_ppp_dncnn_admm.py
- denoise_l1tv_admm.py
- denoise_tv_admm.py
- denoise_tv_multi.py
- video_rpca_admm.py
Linearized ADMM
^^^^^^^^^^^^^^^
- ct_svmbir_tv_multi.py
- denoise_tv_multi.py
Proximal ADMM
^^^^^^^^^^^^^
- deconv_tv_padmm.py
- denoise_tv_multi.py
- denoise_cplx_tv_nlpadmm.py
- deconv_ppp_dncnn_padmm.py
Non-linear Proximal ADMM
^^^^^^^^^^^^^^^^^^^^^^^^
- denoise_cplx_tv_nlpadmm.py
PDHG
^^^^
- ct_svmbir_tv_multi.py
- denoise_tv_multi.py
- denoise_cplx_tv_pdhg.py
PGM
^^^
- deconv_ppp_bm3d_pgm.py
- sparsecode_pgm.py
- sparsecode_poisson_pgm.py
- denoise_tv_pgm.py
PCG
^^^
- ct_astra_noreg_pcg.py
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/index.rst
|
index.rst
|
Usage Examples
==============
Organized by Application
------------------------
Computed Tomography
^^^^^^^^^^^^^^^^^^^
- ct_abel_tv_admm.py
- ct_abel_tv_admm_tune.py
- ct_astra_noreg_pcg.py
- ct_astra_3d_tv_admm.py
- ct_astra_tv_admm.py
- ct_astra_weighted_tv_admm.py
- ct_svmbir_tv_multi.py
- ct_svmbir_ppp_bm3d_admm_cg.py
- ct_svmbir_ppp_bm3d_admm_prox.py
- ct_fan_svmbir_ppp_bm3d_admm_prox.py
- ct_astra_modl_train_foam2.py
- ct_astra_odp_train_foam2.py
- ct_astra_unet_train_foam2.py
Deconvolution
^^^^^^^^^^^^^
- deconv_circ_tv_admm.py
- deconv_tv_admm.py
- deconv_tv_padmm.py
- deconv_tv_admm_tune.py
- deconv_microscopy_tv_admm.py
- deconv_microscopy_allchn_tv_admm.py
- deconv_ppp_bm3d_admm.py
- deconv_ppp_bm3d_pgm.py
- deconv_ppp_dncnn_admm.py
- deconv_ppp_dncnn_padmm.py
- deconv_ppp_bm4d_admm.py
- deconv_modl_train_foam1.py
- deconv_odp_train_foam1.py
Sparse Coding
^^^^^^^^^^^^^
- sparsecode_admm.py
- sparsecode_conv_admm.py
- sparsecode_conv_md_admm.py
- sparsecode_pgm.py
- sparsecode_poisson_pgm.py
Miscellaneous
^^^^^^^^^^^^^
- demosaic_ppp_bm3d_admm.py
- superres_ppp_dncnn_admm.py
- denoise_l1tv_admm.py
- denoise_tv_admm.py
- denoise_tv_pgm.py
- denoise_tv_multi.py
- denoise_cplx_tv_nlpadmm.py
- denoise_cplx_tv_pdhg.py
- denoise_dncnn_universal.py
- diffusercam_tv_admm.py
- video_rpca_admm.py
- ct_astra_datagen_foam2.py
- deconv_datagen_bsds.py
- deconv_datagen_foam1.py
- denoise_datagen_bsds.py
Organized by Regularization
---------------------------
Plug and Play Priors
^^^^^^^^^^^^^^^^^^^^
- ct_svmbir_ppp_bm3d_admm_cg.py
- ct_svmbir_ppp_bm3d_admm_prox.py
- ct_fan_svmbir_ppp_bm3d_admm_prox.py
- deconv_ppp_bm3d_admm.py
- deconv_ppp_bm3d_pgm.py
- deconv_ppp_dncnn_admm.py
- deconv_ppp_dncnn_padmm.py
- deconv_ppp_bm4d_admm.py
- demosaic_ppp_bm3d_admm.py
- superres_ppp_dncnn_admm.py
Total Variation
^^^^^^^^^^^^^^^
- ct_abel_tv_admm.py
- ct_abel_tv_admm_tune.py
- ct_astra_tv_admm.py
- ct_astra_3d_tv_admm.py
- ct_astra_weighted_tv_admm.py
- ct_svmbir_tv_multi.py
- deconv_circ_tv_admm.py
- deconv_tv_admm.py
- deconv_tv_admm_tune.py
- deconv_tv_padmm.py
- deconv_microscopy_tv_admm.py
- deconv_microscopy_allchn_tv_admm.py
- denoise_l1tv_admm.py
- denoise_tv_admm.py
- denoise_tv_pgm.py
- denoise_tv_multi.py
- denoise_cplx_tv_nlpadmm.py
- denoise_cplx_tv_pdhg.py
- diffusercam_tv_admm.py
Sparsity
^^^^^^^^
- diffusercam_tv_admm.py
- sparsecode_admm.py
- sparsecode_conv_admm.py
- sparsecode_conv_md_admm.py
- sparsecode_pgm.py
- sparsecode_poisson_pgm.py
- video_rpca_admm.py
Machine Learning
^^^^^^^^^^^^^^^^
- ct_astra_datagen_foam2.py
- ct_astra_modl_train_foam2.py
- ct_astra_odp_train_foam2.py
- ct_astra_unet_train_foam2.py
- deconv_datagen_bsds.py
- deconv_datagen_foam1.py
- deconv_modl_train_foam1.py
- deconv_odp_train_foam1.py
- denoise_datagen_bsds.py
- denoise_dncnn_train_bsds.py
- denoise_dncnn_universal.py
Organized by Optimization Algorithm
-----------------------------------
ADMM
^^^^
- ct_abel_tv_admm.py
- ct_abel_tv_admm_tune.py
- ct_astra_tv_admm.py
- ct_astra_3d_tv_admm.py
- ct_astra_weighted_tv_admm.py
- ct_svmbir_tv_multi.py
- ct_svmbir_ppp_bm3d_admm_cg.py
- ct_svmbir_ppp_bm3d_admm_prox.py
- ct_fan_svmbir_ppp_bm3d_admm_prox.py
- deconv_circ_tv_admm.py
- deconv_tv_admm.py
- deconv_tv_admm_tune.py
- deconv_microscopy_tv_admm.py
- deconv_microscopy_allchn_tv_admm.py
- deconv_ppp_bm3d_admm.py
- deconv_ppp_dncnn_admm.py
- deconv_ppp_bm4d_admm.py
- diffusercam_tv_admm.py
- sparsecode_admm.py
- sparsecode_conv_admm.py
- sparsecode_conv_md_admm.py
- demosaic_ppp_bm3d_admm.py
- superres_ppp_dncnn_admm.py
- denoise_l1tv_admm.py
- denoise_tv_admm.py
- denoise_tv_multi.py
- video_rpca_admm.py
Linearized ADMM
^^^^^^^^^^^^^^^
- ct_svmbir_tv_multi.py
- denoise_tv_multi.py
Proximal ADMM
^^^^^^^^^^^^^
- deconv_tv_padmm.py
- denoise_tv_multi.py
- denoise_cplx_tv_nlpadmm.py
- deconv_ppp_dncnn_padmm.py
Non-linear Proximal ADMM
^^^^^^^^^^^^^^^^^^^^^^^^
- denoise_cplx_tv_nlpadmm.py
PDHG
^^^^
- ct_svmbir_tv_multi.py
- denoise_tv_multi.py
- denoise_cplx_tv_pdhg.py
PGM
^^^
- deconv_ppp_bm3d_pgm.py
- sparsecode_pgm.py
- sparsecode_poisson_pgm.py
- denoise_tv_pgm.py
PCG
^^^
- ct_astra_noreg_pcg.py
| 0.707708 | 0.165863 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.