url
stringlengths
14
1.76k
text
stringlengths
100
1.02M
metadata
stringlengths
1.06k
1.1k
https://share.cocalc.com/share/18a7b0c7670919c84dd41d8447d4dbd6eb63a0c0/Erros.sagews?viewer=share
CoCalc Public FilesErros.sagews Author: Leon Denis Views : 62 Compute Environment: Ubuntu 18.04 (Deprecated) Tratamento de Erros no Sage tg(x) Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> NameError: name 'tg' is not defined k+1 Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> NameError: name 'k' is not defined lista=[1,2,3];lista[3] Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> IndexError: list index out of range ([5,6,7]).union([2,3,4]) Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> AttributeError: 'list' object has no attribute 'union' 1/0 Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> File "sage/rings/integer.pyx", line 1843, in sage.rings.integer.Integer.__div__ (/projects/sage/sage-7.3/src/build/cythonized/sage/rings/integer.c:12742) raise ZeroDivisionError("rational division by zero") ZeroDivisionError: rational division by zero [1,2,3) Error in lines 0-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "<string>", line 1 [Integer(1),Integer(2),Integer(3)) ^ SyntaxError: invalid syntax for i in range(10): if i%2==0: print i Error in lines 1-3 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "<string>", line 3 print i ^ IndentationError: expected an indented block [1,2]+(1,2) Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> TypeError: can only concatenate list (not "tuple") to list for i in x: i=i+1 Error in lines 1-2 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> TypeError: 'sage.symbolic.expression.Expression' object is not iterable load('texto.py') Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_salvus.py", line 3391, in load exec 'salvus.namespace["%s"] = sage.structure.sage_object.load(*__args, **__kwds)'%t in salvus.namespace, {'__args':other_args, '__kwds':kwds} File "<string>", line 1, in <module> File "sage/structure/sage_object.pyx", line 992, in sage.structure.sage_object.load (/projects/sage/sage-7.3/src/build/cythonized/sage/structure/sage_object.c:11186) sage.repl.load.load(filename, globals()) File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/sage/repl/load.py", line 263, in load raise IOError('did not find file %r to load or attach' % filename) IOError: did not find file 'texto.py' to load or attach assume(x>0, x<0) Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/sage/symbolic/assumptions.py", line 515, in assume x.assume() File "sage/symbolic/expression.pyx", line 1743, in sage.symbolic.expression.Expression.assume (/projects/sage/sage-7.3/src/build/cythonized/sage/symbolic/expression.cpp:12576) raise ValueError("Assumption is %s" % str(s._sage_()[0])) ValueError: Assumption is inconsistent 20//2.3 Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> File "sage/structure/element.pyx", line 1896, in sage.structure.element.RingElement.__floordiv__ (/projects/sage/sage-7.3/src/build/cythonized/sage/structure/element.c:16647) return coercion_model.bin_op(self, right, floordiv) File "sage/structure/coerce.pyx", line 1043, in sage.structure.coerce.CoercionModel_cache_maps.bin_op (/projects/sage/sage-7.3/src/build/cythonized/sage/structure/coerce.c:8958) raise File "sage/structure/coerce.pyx", line 1039, in sage.structure.coerce.CoercionModel_cache_maps.bin_op (/projects/sage/sage-7.3/src/build/cythonized/sage/structure/coerce.c:8896) return PyObject_CallObject(op, xy) File "sage/structure/element.pyx", line 1895, in sage.structure.element.RingElement.__floordiv__ (/projects/sage/sage-7.3/src/build/cythonized/sage/structure/element.c:16622) return (<RingElement>self)._floordiv_(right) File "sage/structure/element.pyx", line 1908, in sage.structure.element.RingElement._floordiv_ (/projects/sage/sage-7.3/src/build/cythonized/sage/structure/element.c:16756) raise TypeError(arith_error_message(self, right, floordiv)) TypeError: unsupported operand parent(s) for '//': 'Real Field with 53 bits of precision' and 'Real Field with 53 bits of precision' var('4') Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_salvus.py", line 2922, in var return var0(*args, **kwds) File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_salvus.py", line 2884, in var0 v = sage.all.SR.var(name, **kwds) File "sage/symbolic/ring.pyx", line 759, in sage.symbolic.ring.SymbolicRing.var (/projects/sage/sage-7.3/src/build/cythonized/sage/symbolic/ring.cpp:9295) raise ValueError('The name "'+s+'" is not a valid Python identifier.') ValueError: The name "4" is not a valid Python identifier. sage: x=10 sage: for i in x: Error in lines 2-2 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "<string>", line 1 for i in x: ^ SyntaxError: unexpected EOF while parsing %md ## ** Tratando Exeções ** ** Tratando Exeções ** %md Imagine que um pesquisador necessite avaliar o inverso multiplicativo de valores contidos em uma certa lista de dados obtida atráves de um processo qualquer que inadivertidamente gerou valores iguais a zero. Naturalmente, o valor zero não possui inverso multiplicativo, portanto, um erro de exceção irá interronper o laço. dados=srange(-1.2,1,0.4, universe=QQ)+srange(-1,1,0.2, universe=QQ) print i, ' -> ', 1/i -6/5 -> -5/6 -4/5 -> -5/4 -2/5 -> -5/2 0 -> Error in lines 2-3 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 2, in <module> File "sage/rings/integer.pyx", line 1849, in sage.rings.integer.Integer.__div__ (/projects/sage/sage-7.3/src/build/cythonized/sage/rings/integer.c:12830) raise ZeroDivisionError("rational division by zero") ZeroDivisionError: rational division by zero %md A interrupção de uma rotina por erro de exceção pode ser bastante inconviniênte. Por exemplo, imagine que um pesquisador necessite avaliar o inverso multiplicativo de valores contidos em uma certa lista de dados obtida atráves de um processo qualquer que inadivertidamente gerou valores iguais a zero. É natural que o laço $\verb|for|$ a seguir seja interropido pela exceção $\verb|ZeroDivisionError|$ quando encontra o valor $\verb|i=0|$: A interrupção de uma rotina por erro de exceção pode ser bastante inconviniênte. Por exemplo, imagine que um pesquisador necessite avaliar o inverso multiplicativo de valores contidos em uma certa lista de dados obtida atráves de um processo qualquer que inadivertidamente gerou valores iguais a zero. É natural que o laço $\verb|for|$ a seguir seja interropido pela exceção $\verb|ZeroDivisionError|$ quando encontra o valor $\verb|i=0|$: dados=srange(-1.2,1,0.4, universe=QQ)+srange(0,1,0.3, universe=QQ) print i, ' -> ', 1/i -6/5 -> -5/6 -4/5 -> -5/4 -2/5 -> -5/2 0 -> Error in lines 2-3 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 2, in <module> File "sage/rings/integer.pyx", line 1849, in sage.rings.integer.Integer.__div__ (/projects/sage/sage-7.3/src/build/cythonized/sage/rings/integer.c:12830) raise ZeroDivisionError("rational division by zero") ZeroDivisionError: rational division by zero %md Para evitar interrupções na execução de seus comandos você deve tratar as execeções que podem surgir, isto é, %md Sage possui a estutura $\verb|try...except|$ permite que sua rotina seja executada até o final mesmo com a ocorrência de erros de exceção específicos. A sintaxe sintxe mais simplmes para o tratamento de erros de execeção é a que apresetamos a seguir: Sage possui a estutura $\verb|try...except|$ permite que sua rotina seja executada até o final mesmo com a ocorrência de erros de exceção específicos. A sintaxe sintxe mais simplmes para o tratamento de erros de execeção é a que apresetamos a seguir: for i in dados: try: print i, ' -> ', 1/i except ZeroDivisionError: print 'divisao por zero' -6/5 -> -5/6 -4/5 -> -5/4 -2/5 -> -5/2 0 -> divisao por zero 2/5 -> 5/2 4/5 -> 5/4 0 -> divisao por zero 3/10 -> 10/3 3/5 -> 5/3 9/10 -> 10/9 laço encerrado var('n') n int(1.5) 1 int(e) 2 int('1.0') Error in lines 1-1 Traceback (most recent call last): File "/projects/sage/sage-7.3/local/lib/python2.7/site-packages/smc_sagews/sage_server.py", line 957, in execute exec compile(block+'\n', '', 'single') in namespace, locals File "", line 1, in <module> ValueError: invalid literal for int() with base 10: '1.0'
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 4, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.19702686369419098, "perplexity": 9586.000270468474}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-40/segments/1600402127075.68/warc/CC-MAIN-20200930141310-20200930171310-00209.warc.gz"}
https://mathoverflow.net/questions/106719/nearly-constant-curvature-implies-nearly-isometric-to-a-space-form/106728
# Nearly constant curvature implies “nearly isometric” to a space form? It is well known a Riemannian manifold with constant sectional curvature is a quotient of the Euclidean space, hyperbolic space or sphere. In particular we know how their metric looks like locally. My question is, is there a quantitative version of the above result? By this, I mean for example, given $\varepsilon>0$, there exists $\delta$ such that if $(M,g)$ satisfies $\left|Rm\right|_g<\delta$ `, does there exist a local diffeomorphism $\phi$ to $\mathbb R^n$ such that $|\phi_*g-g_0|_{g_0}< \varepsilon$, where $g_0$ is the standard metric on $\mathbb R^n$? - This question is two-sided, and I'm not sure what you mean by "local diffeomorphism" so I'll treat both aspects. There is a local and a global version : ## Local version : Q1 : Given a point $x$ in a Riemannian manifold $(M,g)$, can we find a constant curvature metric on a neighborhood of $x$ which is close to $g$ ? First remark : since we want a local statement, zero curvature is as good as constant curvature here. The answer to Q1 is "yes", without any restriction on the curvature. This can be seen using normal coordinates centered at $x$ : in these coordinates, the metric at $x$ is euclidean and its distortion from being euclidean as one moves away from $x$ can be controlled (using that the curvature is bounded in a neighborhood of $x$, and that the injectivity radius at $x$ is positive). Edit : The above statement is too complicated. As Anton's say in his comment to Agol's answer, use the exponential map ! The pull back of $g$ by the exponential map defines a riemannian metric on some neighborhood of the origin in $T_xM$ which is equal to $g_x$ at $x$, by continuity, this pullback stay close to the euclidean metric $g_x$ on $T_xM$ and this does the job. We can refine the question then : Q1' : What can be said about the size of the neighborhood we obtained ? In this case we need to impose geometric restrictions on $(M,g)$. For instance, Cheeger and Anderson proved the following : For $n\in\mathbb{N}$, $k\in\mathbb{R}$, $i>0$ and $\varepsilon>0$, one can find $\delta>0$ such that in a $n$-manifold of Ricci curvature greater than $k$ and injectivity radius greater than $i$, any ball of radius $\delta$ admits a flat metric which is $\varepsilon$-close to $g$ in $C^0$-norm. See "$C^\alpha$-compactness for manifolds with Ricci curvature and injectivity radius bounded below." The proof uses more elaborate machinery than just normal coordinates : harmonic coordinates are used. If you stick to normal coordinates you can obtain a similar result but with stronger geometric assumptions. ## Global version : Q2 : Under which condition does a manifold with almost $k$ curvature admit a $C^0$-close metric of constant curvature $k$ ? If you consider large spheres, they have almost zero cuvrvature but don't admit any flat metric, so you need to put some restrictions on the side of the manifolds. An example of theorem you can get is the following : For any $n\in\mathbb{N}$, $k\in\mathbb{R}$, $V>0$, $D>0$ and $\varepsilon>0$, there is a $\delta>0$ such that any $n$-manifold $(M,g)$ of diameter less than $D$, volume more than $V$, and sectional curvature between $k-\delta$ and $k+\delta$ admits a metric af constant sectional curvature $k$ which is $\varepsilon$-close to $g$ in $C^0$-norm. The proof relies on Cheeger-Gromov compactness theorem for sequences of Riemannian manifolds. A (really) sketchy goes like that : we argue by contradiction, you take a sequence $\delta_i$ going to $0$, and you assume you can find a sequence of manifolds $(M_i,g_i)$ satisfying the hypothesis of the theorem with $\delta=\delta_i$ and not satisfying the conclusion of the theorem. Then up to a subsequence, the sequence has a limit which is a manifold of constant curvature $k$, by the very definition of Cheeger-Gromov convergence, this imply the for some $i$ large enough, $M_i$ admit a constant curvature $k$ metric $\varepsilon$-close to $g_i$, a contradiction. The lower bound on the volume is necessary (at least in the $k=0$ case) the so called "infranilmanifolds" admit metrics of curvature as close as wanted to $0$ with diameter bounded above but no flat metric. For the $k=1$ case, the bounds on the diameter is unnecessary because of Myers theorem. For the $k=-1$ case, I don't know if the hypothesis can be weakened. - For $k=-1$ you do not need the lower volume bound, but you need the upper diameter bound. See Pinching constants for hyperbolic manifolds. by Gromov and Thurston math.psu.edu/petrunin/teach-old/minicourse-china-2008/… – Anton Petrunin Sep 9 '12 at 16:15 Thnks ! It's funny that the situation is opposite to that of positive curvaure. Another question came to my mind about the positive case : if we assume that $M$ is simply connected, then the lower bound on the volume isn't necessary (Klingenberg's Lemma). With Synge, it also shows that the volume is not needed in even dimensions. What about odd dimensions ? – Thomas Richard Sep 9 '12 at 16:27 Thanks for the very detailed reply! The question in the "global version" comes close to what's in my mind. Can you suggest a reference for me to look up your stated (or similar) result? (I am indeed not very familiar with Cheeger-Gromov compactness, but I can take it for granted and take a look at the proof of these types of results. ) – Kwong Sep 10 '12 at 13:40 In fact, almost everything is in the proof of the Cheeger-Gromov compactness theorem. The only additional observation is to show that the limit space has constant curvature, which is not that obvious because the limit space is only a $C^{1,\alpha}$ riemannian manifold. The trick is to see that the comparison results with spaceforms of constant curvature $k$ hold in both directions, and to use this to build a local isometry with a constant curvature space. For the proof of Cheeger-Gromov compactness, see the papers by Greene and Wu, and by S. Peters. I don't remember the titles, I'll check. – Thomas Richard Sep 11 '12 at 17:52 There's an unpublished preprint of Tian which gives a criterion on when a manifold is close to being Einstein may actually be deformed to being Einstein (see Theorem 6.1 of his paper - sorry, this copy has only odd pages!). In 3 dimensions, this means that if one is close to being hyperbolic in his sense, then there is a deformation of the metric to a hyperbolic metric. ' There's also the $1/4$ pinching theorem of Brendle-Schoen. - Ian, the answer to Kwong's question is "exponential map" and you are answering different (and more advanced) question. – Anton Petrunin Sep 9 '12 at 13:19 @Anton : I didn't see your comment when I was writing my answer. – Thomas Richard Sep 9 '12 at 13:27 For the global question, and hyperbolic metrics, in dimension > 3 this is a result of Gromov, stated in his 1978 JDG paper, and in dimension 3 it is an unpublished result of Daryl Cooper, from the late nineties, and Gromov, independently, so while Tian might have a more general result, he is far from the first. -
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9713099002838135, "perplexity": 206.90902354161832}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-26/segments/1466783392069.78/warc/CC-MAIN-20160624154952-00092-ip-10-164-35-72.ec2.internal.warc.gz"}
https://mathematica.stackexchange.com/questions/38992/how-do-i-construct-a-non-linear-model-from-experimental-data
How do I construct a non-linear model from experimental data? I want to find a function $w=f(x,y,z)$ that fits experimentally determined values of w. For example, assume x = {1, 1, 1, 2, 2, 2} y = {1, 2, 3, 1, 2, 3} z = {1, 2, 3, 4, 5, 6} and experiments have given w = {.531, .341, .163, .641, .713, -0.40e-1} I want to fit this data according to this relationship: w = x^a + (b x^2)/y + c y z How can I find the constants a, b and c and estimate the error the model produces? • NonlinearModelFit will help you out. You have to take care with your symbol definitions, though, if the data you are passing to NonlinearModelFit is in the symbol x, then the value should not also be called x. Dec 17 '13 at 21:42 This is certainly answered in another elsewhere on the site, but I can't find a simple example. 1. Ensure proper formatting If you wish to have your experimental data in the symbols x, y, and z, then these symbols cannot show up in your model. Adjust the model slightly such as this: x = {1, 1, 1, 2, 2, 2} y = {1, 2, 3, 1, 2, 3} z = {1, 2, 3, 4, 5, 6} w = {.531, .341, .163, .641, .713, -0.040} model = x1^a + (b x1^2)/y1 + c y1 z1 Also make sure that your numbers are interpreted as numbers by Mathematica. The last term in w needs to be fixed. The documentation for NonlinearModelFit is pretty good and provides you with examples as well as the summary statistics that can be easily extracted after the fitting is complete. NonlinearModelFit wants the data entered in the format {{xi, yi, zi, wi}, {...},...} which can be achieved easily with Transpose nlm = NonlinearModelFit[Transpose[{x,y,z,w}],model,{a,b,c},{x1,y1,z1}] 3. View Results By assigning NonlinearModelFit to a symbol, you can easily access summary statistics as well as the information in which you are interested: nlm["BestFitParameters"] nlm["ParameterTable"] • Hey, man! You should teach science! :) Dec 17 '13 at 22:57
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.38848698139190674, "perplexity": 703.9333120914628}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-39/segments/1631780057615.3/warc/CC-MAIN-20210925082018-20210925112018-00124.warc.gz"}
http://math.stackexchange.com/questions/145239/a-dynamic-programming-problem-with-continuous-states-and-observations
# A dynamic programming problem with continuous states and observations I have a dynamic programming expressed in the following Bellman backup equation form, $$V(\boldsymbol{\theta},T)=\max_{i \in N} \mathbb{E} \left[ x_i + V(\boldsymbol{\theta}_{x_i}, T-1) \right]$$ where $\theta_i$ is the expectation of $x_i$, and the $\boldsymbol{\theta}$ vector is updated with observation $x_i$ using Bayes rules. So the backup could be expanded recursively as, \begin{align} V(\boldsymbol{\theta},T)&=\max_{i \in N} \theta_i + \int p(x_i) V(\boldsymbol{\theta}_{x_i}, T-1) dx\\ &=\max_{i \in N} \theta_i + \int p(x_i) \max_{j \in N} \left( \theta_{j,x_i} + V(\boldsymbol{\theta}_{x_i, x_j}, T-2) \right) dx\\ &=\dots \end{align} The integral part, for example, $$\int p(x) \max_{j\in N} \theta_{j, x} dx$$ is complicated because the integrand is changing with different $x$, although $\theta$ is simply a linear function of $x$. I can surely find a section where certain $\theta$ should be choosed via max operator for the above equation, by solving a group of inequalities. But for the whole $T$ iteration it becomes impossible. I don't know how I can solve this equation, or approximate the optimal solution. Any help is appreciated! Thanks! - An inequity is an injustice; you mean inequalities. –  joriki May 15 '12 at 0:35 @joriki Typo edited. Thanks. –  shuaiyuancn May 16 '12 at 8:07
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 1, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9988376498222351, "perplexity": 1424.1569844381352}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-32/segments/1438042987034.19/warc/CC-MAIN-20150728002307-00290-ip-10-236-191-2.ec2.internal.warc.gz"}
https://www.math.nyu.edu/dynamic/calendars/seminars/analysis-seminar/1279/
# Analysis Seminar #### Instability, Index Theorems, and Exponential Dichotomy of Hamiltonian PDEs Speaker: Chongchun Zeng, Georgia Tech Location: Warren Weaver Hall 1302 Date: Thursday, April 6, 2017, noon Synopsis: Motivated by the stability/instability analysis of coherent states (standing waves, traveling waves, etc.) in nonlinear Hamiltonian PDEs such as BBM, GP, and 2-D Euler equations, we consider a general linear Hamiltonian system $$u_t = JL u$$ in a real Hilbert space $$X$$ -- the energy space. The main assumption is that the energy functional $$\frac 12 \langle Lu, u\rangle$$ has only finitely many negative dimensions -- $$n^-(L) < \infty$$. Our first result is an $$L$$-orthogonal decomposition of $$X$$ into closed subspaces so that $$JL$$ has a nice structure. Consequently, we obtain an index theorem which relates $$n^-(L)$$ and the dimensions of subspaces of generalized eigenvectors of some eigenvalues of $$JL$$, along with some information on such subspaces. Our third result is the linear exponential trichotomy of the group $$e^{tJL}$$. This includes the nonexistence of exponential growth in the finite co-dimensional invariant center subspace and the optimal bounds on the algebraic growth rate there. Next we consider the robustness of the stability/instability under small Hamiltonian perturbations. In particular, we give a necessary and sufficient condition on whether a purely imaginary eigenvalues may become hyperbolic under small perturbations. Finally we revisit some nonlinear Hamiltonian PDEs. This is a joint work with Zhiwu Lin.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9534855484962463, "perplexity": 470.4902471047502}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-13/segments/1521257650685.77/warc/CC-MAIN-20180324132337-20180324152337-00100.warc.gz"}
http://www.zora.uzh.ch/16558/
# Formation and accretion history of terrestrial planets from runaway growth through to late time: implications for orbital eccentricity - Zurich Open Repository and Archive Morishima, R; Schmidt, M W; Stadel, J; Moore, B (2008). Formation and accretion history of terrestrial planets from runaway growth through to late time: implications for orbital eccentricity. Astrophysical Journal, 685(2):1247-1261. ## Abstract Remnant planetesimals might have played an important role in reducing the orbital eccentricities of the terrestrial planets after their formation via giant impacts. However, the population and the size distribution of remnant planetesimals during and after the giant impact stage are unknown, because simulations of planetary accretion in the runaway growth and giant impact stages have been conducted independently. Here we report results of direct N-body simulations of the formation of terrestrial planets beginning with a compact planetesimal disk. The initial planetesimal disk has a total mass and angular momentum as observed for the terrestrial planets, and we vary the width (0.3 and 0.5 AU) and the number of planetesimals (1000-5000). This initial configuration generally gives rise to three final planets of similar size, and sometimes a fourth small planet forms near the location of Mars. Since a sufficient number of planetesimals remains, even after the giant impact phase, the final orbital eccentricities are as small as those of the Earth and Venus. ## Abstract Remnant planetesimals might have played an important role in reducing the orbital eccentricities of the terrestrial planets after their formation via giant impacts. However, the population and the size distribution of remnant planetesimals during and after the giant impact stage are unknown, because simulations of planetary accretion in the runaway growth and giant impact stages have been conducted independently. Here we report results of direct N-body simulations of the formation of terrestrial planets beginning with a compact planetesimal disk. The initial planetesimal disk has a total mass and angular momentum as observed for the terrestrial planets, and we vary the width (0.3 and 0.5 AU) and the number of planetesimals (1000-5000). This initial configuration generally gives rise to three final planets of similar size, and sometimes a fourth small planet forms near the location of Mars. Since a sufficient number of planetesimals remains, even after the giant impact phase, the final orbital eccentricities are as small as those of the Earth and Venus. ## Citations 35 citations in Web of Science® 33 citations in Scopus® ## Altmetrics Detailed statistics Item Type: Journal Article, refereed, original work 07 Faculty of Science > Institute for Computational Science 530 Physics English October 2008 06 Mar 2009 10:36 05 Apr 2016 13:06 Institute of Physics Publishing 0004-637X Publisher DOI. An embargo period may apply. https://doi.org/10.1086/590948 http://arxiv.org/abs/0806.1689
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9642753601074219, "perplexity": 2020.7978258671155}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-17/segments/1492917124478.77/warc/CC-MAIN-20170423031204-00597-ip-10-145-167-34.ec2.internal.warc.gz"}
http://matthew-brett.github.io/teaching/fdr.html
$$\newcommand{L}[1]{\| #1 \|}\newcommand{VL}[1]{\L{ \vec{#1} }}\newcommand{R}[1]{\operatorname{Re}\,(#1)}\newcommand{I}[1]{\operatorname{Im}\, (#1)}$$ # Thresholding with false discovery rate¶ Written with J-B Poline. The false discovery rate is a different type of correction than family-wise correction. Instead of controlling for the risk of any tests falsely being declared significant under the null hypothesis, FDR will control the number of tests falsely declared significant as a proportion of the number of all tests declared significant. A basic idea on how the FDR works is the following. We have got a large number of p values from a set of individual tests. These might be p values from tests on a set of brain voxels. We are trying to a find a p value threshold $$\theta$$ to do a reasonable job of distinguishing true positive tests from true negatives. p values that are less than or equal to $$\theta$$ are detections and $$\theta$$ is a detection threshold. We want to choose a detection threshold that will only allow a small number of false positive detections. A detection can also be called a discovery; hence false discovery rate. For the FDR, we will try to find a p value within the family of tests (the set of p values), that we can use as a detection threshold. Let’s look at the p value for a particular test. Let’s say there are $$N$$ tests, indexed with $$i \in 1 .. N$$. We look at a test $$i$$, and consider using p value from this test as a detection threshold; $$\theta = p(i)$$. The expected number of false positives (FP) in N tests at this detection threshold would be: $E(FP) = N p(i)$ For example, if we had 100 tests, and the particular p value $$p(i)$$ was 0.1, then the expected number of false positive detections, thresholding at 0.1, is 0.1 * 100 = 10. Let’s take some data from a random normal distribution to illustrate: >>> from __future__ import print_function, division >>> import numpy as np >>> import matplotlib.pyplot as plt Hint If running in the IPython console, consider running %matplotlib to enable interactive plots. If running in the Jupyter Notebook, use %matplotlib inline. >>> np.random.seed(42) # so we always get the same random numbers >>> N = 100 >>> z_values = np.random.normal(size=N) Turn the Z values into p values: >>> import scipy.stats as sst >>> normal_distribution = sst.norm(loc=0,scale=1.) #loc is the mean, scale is the variance. >>> # The normal CDF >>> p_values = normal_distribution.cdf(z_values) To make it easier to show, we sort the p values from smallest to largest: >>> p_values = np.sort(p_values) >>> i = np.arange(1, N+1) # the 1-based i index of the p values, as in p(i) >>> plt.plot(i, p_values, '.') [...] >>> plt.xlabel('$i$') <...> >>> plt.ylabel('p value') <...> (png, hires.png, pdf) Notice the (more or less) straight line of p value against $$i$$ index in this case, where there is no signal in the random noise. We want to find a p value threshold $$p(i)$$ where there is only a small proportion of false positives among the detections. For example, we might accept a threshold such that 5% of all detections (discoveries) are likely to be false positives. If $$d$$ is the number of discoveries at threshold $$\theta$$, and $$q$$ is the proportion of false positives we will accept (e.g. 0.05), then we want a threshold $$\theta$$ such that $$E(FP) / d < q$$ where $$E(x)$$ is the expectation of $$x$$, here the number of FP I would get on average if I was to repeat my experiment many times. So - what is $$d$$ in the plot above? Now that we have ordered the p values, for any index $$i$$, if we threshold at $$\theta \le p(i)$$ we will have $$i$$ detections ($$d = i$$). Therefore we want to find the largest $$p(i)$$ such that $$E(FP) / i < q$$. We know $$E(FP) = N p(i)$$ so we want the largest $$p(i)$$ such that: $N p(i) / i < q \implies p(i) < q i / N$ Let’s take $$q$$ (the proportion of false discoveries = detections) as 0.05. We plot $$q i / N$$ (in red) on the same graph as $$p(i)$$ (in blue): >>> q = 0.05 >>> plt.plot(i, p_values, 'b.', label='$p(i)$') [...] >>> plt.plot(i, q * i / N, 'r', label='$q i / N$') [...] >>> plt.xlabel('$i$') <...> >>> plt.ylabel('$p$') <...> >>> plt.legend() <...> (png, hires.png, pdf) Our job is to look for the largest $$p(i)$$ value (blue dot) that is still underneath $$q i / N$$ (the red line). The red line $$q i / N$$ is the acceptable number of false positives $$q i$$ as a proportion of all the tests $$N$$. Further to the right on the red line corresponds to a larger acceptable number of false positives. For example, for $$i = 1$$, the acceptable number of false positives $$q * i$$ is $$0.05 * 1$$, but at $$i = 50$$, the acceptable number of expected false positives $$q * i$$ is $$0.05 * 50 = 2.5$$. Notice that, if only the first p value passes threshold, then $$p(1) < q \space 1 \space / \space N$$. So, if $$q = 0.05$$, $$p(1) < 0.05 / N$$. This is the Bonferroni correction for $$N$$ tests. The FDR becomes more interesting when there is signal in the noise. In this case there will be p values that are smaller than expected on the null hypothesis. This causes the p value line to start below the diagonal on the ordered plot, because of the high density of low p values. >>> N_signal = 20 >>> N_noise = N - N_signal >>> noise_z_values = np.random.normal(size=N_noise) >>> # Add some signal with very low z scores / p values >>> signal_z_values = np.random.normal(loc=-2.5, size=N_signal) >>> mixed_z_values = np.sort(np.concatenate((noise_z_values, signal_z_values))) >>> mixed_p_values = normal_distribution.cdf(mixed_z_values) >>> plt.plot(i, mixed_p_values, 'b.', label='$p(i)$') [...] >>> plt.plot(i, q * i / N, 'r', label='$q i / N$') [...] >>> plt.xlabel('$i$') <...> >>> plt.ylabel('$p$') <...> >>> plt.legend() <...> (png, hires.png, pdf) The interesting part is the beginning of the graph, where the blue p values stay below the red line: >>> first_i = i[:30] >>> plt.plot(first_i, mixed_p_values[:30], 'b.', label='$p(i)$') [...] >>> plt.plot(first_i, q * first_i / N, 'r', label='$q i / N$') [...] >>> plt.xlabel('$i$') <...> >>> plt.ylabel('$p$') <...> >>> plt.legend() <...> (png, hires.png, pdf) We are looking for the largest $$p(i) < qi/N$$, which corresponds to the last blue point below the red line. >>> below = mixed_p_values < (q * i / N) # True where p(i)<qi/N >>> max_below = np.max(np.where(below)[0]) # Max Python array index where p(i)<qi/N >>> print('p_i:', mixed_p_values[max_below]) p_i: 0.00323007466783 >>> print('i:', max_below + 1) # Python indices 0-based, we want 1-based i: 9 The Bonferroni threshold is: >>> 0.05 / N 0.0005 In this case, where there is signal in the noise, the FDR threshold adapts to the presence of the signal, by taking into account that some values have small enough p values that they can be assumed to be signal, so that there are fewer noise comparisons to correct for, and the threshold is correspondingly less stringent. As the FDR threshold becomes less stringent, the number of detections increases, and the expected number of false positive detections increases, because the FDR controls the proportion of false positives in the detections. In our case, the expected number of false positives in the detections is $$q i = 0.05 * 9 = 0.45$$. In other words, at this threshold, we have a 45% chance of seeing a false positive among the detected positive tests. So, there are a number of interesting properties of the FDR - and some not so interesting if you want to do brain imaging. • In the case of no signal at all, the FDR threshold will be the Bonferroni threshold • Under some conditions (see Benjamini and Hochberg, JRSS-B 1995), the FDR threshold can be applied to correlated data • FDR is an “adaptive” threshold Not so “interesting” • FDR can be very variable • When there are lots of true positives, and many detections, the number of false positive detections increases. This can make FDR detections more difficult to interpret.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 2, "mathjax_display_tex": 1, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6976743340492249, "perplexity": 1123.745837671846}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-13/segments/1490218189534.68/warc/CC-MAIN-20170322212949-00182-ip-10-233-31-227.ec2.internal.warc.gz"}
http://mathhelpforum.com/pre-calculus/201833-logarithmic-expressions.html
# Math Help - Logarithmic expressions 1. ## Logarithmic expressions Attached Thumbnails 2. ## Re: Logarithmic expressions Note that $\log_8 {64} = 2$. Also, $\log_8 {64} = 3 \log_8 {4}$ by a logarithmic identity. Therefore $3 \log_8 {4} = 2 \Rightarrow \log_8 {4} = \frac{2}{3}$. 3. ## Re: Logarithmic expressions There is no exponent 3 though. How did you end up with log64 base 8? 4. ## Re: Logarithmic expressions I'm using 64 because $64 = 4^3 = 8^2$. 5. ## Re: Logarithmic expressions Originally Posted by dan713 $x = {\log _8}(4)\; \Rightarrow {8^x} = 4\; \Rightarrow \;x = \frac{2}{3}$. $x = {\log _8}(4)\; \Rightarrow {8^x} = 4\; \Rightarrow \;x = \frac{2}{3}$. That is correct but it might not be immediately apparent to dan713 that x = 2/3. Unless you note that $\sqrt[3]{8^2} = 4$, or you write it as $2^{3x} = 2^2$.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 8, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9939131140708923, "perplexity": 5827.027998457858}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-22/segments/1464049276564.72/warc/CC-MAIN-20160524002116-00180-ip-10-185-217-139.ec2.internal.warc.gz"}
https://www.physicsforums.com/threads/help-with-an-integral.17706/
# Help with an integral 1. Apr 2, 2004 ### MathNerd $$\int \frac { dx } { ( a + x )^2 + sin^{2} ( \pi x ) }$$ where a is just an arbitrary constant... Last edited by a moderator: Apr 2, 2004 2. Apr 2, 2004 Oh my. That is ugly. What kind of evil person is making you do that? 3. Apr 3, 2004 ### meister The Integrator can't do it, therefore it's impossible. (I know this isn't true, but when Mathematica can't do it I get kind of scared.) http://integrals.wolfram.com/index.en.cgi It just spits this out: http://integrals.wolfram.com/graphics.cgi?format=StandardForm&FontSize=Medium&expr=1/Sqrt%5B%28a%2Bx%29%5E2%20%2B%20%28Sin%5BPi%2Ax%5D%29%5E2%5D [Broken] Last edited by a moderator: May 1, 2017 4. Apr 3, 2004 ### ShawnD Maple 8 says it cannot be done. 5. Apr 3, 2004 ### Zurtex I can never get that to do a difficult integration when I have one. But yes this does look like a really nasty one. Edit: I put it in and got this: http://integrals.wolfram.com/graphics.cgi?format=StandardForm&FontSize=Medium&x=24&expr=1%20/%20%28%20%28a%20%2B%20x%29%5E2%20%2B%20sin%5E2%28%28Pi%29x%29%20%29&y=21 [Broken] Last edited by a moderator: May 1, 2017 6. Apr 3, 2004 Both my version of Mathematica and the Integrator return the integral. The first one you had, MathNerd, Mathematica could do, but this new one doesn't work out. 7. Apr 4, 2004 ### meister How do you get a sin^2 with no argument? Last edited by a moderator: May 1, 2017 8. Apr 4, 2004 ### himanshu121 U can calculate for certain sets of values of x where sin(pi x)=0 or 1 9. Apr 4, 2004 Yeah, really. The integral of a constant is a piece of cake. Similar Discussions: Help with an integral
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.5727240443229675, "perplexity": 5680.381419543774}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-30/segments/1500549436316.91/warc/CC-MAIN-20170728002503-20170728022503-00229.warc.gz"}
https://www.physicsforums.com/threads/does-the-probability-collapse-theory-of-qm-imply-backward-in-time.861651/
I Does the probability collapse theory of QM imply backward in time 1. Mar 11, 2016 fredt17 In the thought experiment known as Schrodinger's cat a cat is placed in a sealed box, and its life or death is tied to an uncertain quantum event such as radioactive decay. If the radioactive particle decays, the cat dies. If not, nothing happens. According to probability collapse QM, as I understand it, the cat is in an uncertain state until we open the box and collapse the probability wave created by the quantum event. But what if we wait ten days to open the box? Will we discover that, if the cat died, its body has decomposed for up to ten days? But when did the ten days of decomposition occur (or become certain)? Does probability collapse theory claim that the death of the cat does not occur (or become certain) until we open the box and the decay too is uncertain even though the biological process will appear to have taken up to ten days? Or does the probability collapse theory of QM imply a backward in time causation? 2. Mar 11, 2016 Staff: Mentor Sure. They occured during the ten days - in parallel to the cat living there for 10 days. They became certain when you opened the box. Anyway, living systems are too complex for such a superposition to happen. With some atoms, that scenario is possible. No. 3. Mar 11, 2016 StevieTNZ In 'Quantum Enigma' by Bruce Rosenblum and Fred Kuttner, they describe a measurement occurring. Using your example, ten days after setting up the experiment, upon measurement the appropriate history of the quantum systems is created. In principle, macroscopic objects (despite experiencing decoherence, which does -not- cause a definite reality to arise [only -apparent- collapse) are quantum systems also. 4. Mar 12, 2016 the_pulp Depending on the interpretation. In time symmetric interpretation the answer is yes. The beta decay interact with a decomposed cat and that decomposed cat transforms continuously backward in time into a present alive cat. Of course in not being too precise but this is the general idea within this interpretation. The"no"answer is also possible and it is related to the more traditional collapse interpretation. I tend to like more the time symmetric interpretation because it preserves locality and determinism (but loses causality -as the cat"first decomposes and then goes back to life backward in time", you know what I mean-). Anyway is just a matter of taste. 5. Mar 12, 2016 the_pulp Sorry I wrote something wrong. What I tried to s say, generally speaking, is that we open the box and we interact with a decomposed dead cat which transforms continuously backward in time in a dead not decomposed cat which interacts with beta decay and transforms backward in time in a present alive carry. Sorry!! 6. Mar 12, 2016 Demystifier No, we are in an uncertain state. The cat is in a certain state. 7. Mar 12, 2016 fredt17 : Well, my understanding (like the pulp and StevieTNZ) is that the point of the Schrodinger's Cat experiment was to tie a micro event (such as the death of a cat) to a quantum event, and so the complexity of the biological event is irrelevant. Thus, the probability theory of QM does imply backward in time causation, or perhaps more accurately, that time is suspended in the quantum system until we measure the system. Have I got that right? 8. Mar 12, 2016 David Lewis The backward-in-time interpretation for Schroedinger's Cat Paradox was detailed in a book by John Gribbin (Schroedinger's Kittens). 9. Mar 12, 2016 Staff: Mentor It is still relevant. You get decoherence. The time-symmetric interpretation is one of many. You do not need backwards causation and most interpretations do not have that. 10. Mar 13, 2016 AlexCaledin Perhaps it's better to think of the quantum system as of an essentially spatiotemporal (existing in its space-time) object ? It can be considered as a superposition of histories - until it (or our uncertainty?) is reduced by observation to one "actual" decoherent history. Our problem seems to be the habit of imposing temporal evolution on Nature too much... Last edited: Mar 13, 2016 11. Mar 13, 2016 Demystifier That is right only in the interpretation of QM that stipulates that everything (not only time) is suspended until we measure the system. 12. Mar 13, 2016 eloheim Which interpretation is this? Doesn't evolution within the isolated system continue until you open the box? 13. Mar 14, 2016 Demystifier Draft saved Draft deleted Similar Discussions: Does the probability collapse theory of QM imply backward in time
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8066986799240112, "perplexity": 1778.2973847943813}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-43/segments/1508187825147.83/warc/CC-MAIN-20171022060353-20171022080353-00300.warc.gz"}
https://mathzsolution.com/compact-sets-are-closed/
# Compact sets are closed? I feel really ignorant in asking this question but I am really just don’t understand how a compact set can be considered closed. By definition of a compact set it means that given an open cover we can find a finite subcover the covers the topological space. I think the word “open cover” is bothering me because if it is an open cover doesn’t that mean it consists of open sets in the topology? If that is the case how can we have a “closed compact set”? I know a topology can be defined with the notion of closed sets rather than open sets but I guess I am just really confused by this terminology. Please any explanation would be helpful to help clear up this confusion. Thank you! I think that what you’re missing is that an open cover of a compact set can cover more than just that set. Let $X$ be a topological space, and let $K$ be a compact subset of $X$. A family $\mathscr{U}$ of open subsets of $X$ is an open cover of $K$ if $K\subseteq\bigcup\mathscr{U}$; it’s not required that $K=\bigcup\mathscr{U}$. You’re right that $\bigcup\mathscr{U}$, being a union of open sets, must be open in $X$, but it needn’t be equal to $K$. For example, suppose that $X=\Bbb R$ and $K=[0,3]$; the family $\{(-1,2),(1,4)\}$ is an open cover of $[0,3]$: it’s a family of open sets, and $[0,3]\subseteq(-1,2)\cup(1,4)=(-1,4)$. And yes, $(-1,4)$ is certainly open in $\Bbb R$, but $[0,3]$ is not. Note, by the way, that it’s not actually true that a compact subset of an arbitrary topological space is closed. For example, let $\tau$ be the cofinite topology on $\Bbb Z$: the open sets are $\varnothing$ and the sets whose complements in $\Bbb Z$ are finite. It’s a straightforward exercise to show that every subset of $\Bbb Z$ is compact in this topology, but the only closed sets are the finite ones and $\Bbb Z$ itself. Thus, for example, $\Bbb Z^+$ is a compact subset that isn’t closed.
{"extraction_info": {"found_math": true, "script_math_tex": 26, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9215176105499268, "perplexity": 55.263594546376915}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-40/segments/1664030337490.6/warc/CC-MAIN-20221004085909-20221004115909-00185.warc.gz"}
https://rdrr.io/cran/robsel/f/inst/doc/using-robsel.Rmd
# Using the robsel package In robsel: Robust Selection Algorithm knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) This vignette illustrate the basic usage of the robsel package to estimate of the regularization parameter for Graphical Lasso. ## Data We use a 100-by-5 matrix from generate from normal distribution. x <- matrix(rnorm(100*5),ncol=5) ## Using robsel functions ### Estimate of the regularization parameter for Graphical Lasso The function robsel estimates$\lambda$, a regularization parameter for Graphical Lasso at a prespecified confidence level$\alpha$. library(robsel) lambda <- robsel(x) lambda ### Graphical Lasso algorithm with$\lambda$from Robust Selection The function robsel.glasso returns estimates a sparse inverse covariance matrix using Graphical Lasso with regularization parameter estimated from Robust Selection A <- robsel.glasso(x) A ### Use RobSel with multiple prespecified confidence levels We can use multiple$\alpha\$ simultaneously with Robust Selection alphas <- c(0.1, 0.5, 0.9) lambdas <- robsel(x, alphas) lambdas robsel.fits <- robsel.glasso(x, alphas) robsel.fits ## Try the robsel package in your browser Any scripts or data that you put into this service are public. robsel documentation built on May 25, 2021, 5:08 p.m.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.37416142225265503, "perplexity": 18250.04913632333}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-25/segments/1623487616657.20/warc/CC-MAIN-20210615022806-20210615052806-00076.warc.gz"}
http://talkstats.com/search/583729/
# Search results 1. ### Logistic Regression: Evaluate model & Check for Multicollinearity Thank you! Where can I find the calibration and c-statistics? 2. ### Mean center binary categorical variables for logistic regression? I am wondering if mean-centering is possible, makes sense and needs to be or should not be done for a logistic regression with interaction term and only binary categorical variables. They all have values of 0 or 1. Is it technically possible to mean center (because there is no mean for... 3. ### Logistic Regression: Evaluate model & Check for Multicollinearity Hello, I have conducted a logistic regression with a binary categorical outcome and a binary categorical moderator + binary categorical independent variable (or more specific: two independent variables and in the logistic regression entered the interaction factor). How do I now check how good... 4. ### Using PROCESS macro vs. logistic regression Ok, thank you for your answer. I was not sure if it yields the same results as in the PROCESS tool that officially measures moderation effects. Do you have any info about that? 5. ### Using PROCESS macro vs. logistic regression Hi all, I am trying to investigate a moderation effect of a binary categorical moderator variable on the effect of another independent variable (categorical, binary) on a categorical binary outcome variable. With binary categorical I mean that the variables can have basically two values (0 and...
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8445343375205994, "perplexity": 1602.3953087126254}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-26/segments/1560628000266.39/warc/CC-MAIN-20190626094111-20190626120111-00212.warc.gz"}
https://study.com/academy/answer/a-2-50-kg-fireworks-shell-is-fired-straight-up-from-a-mortar-and-reaches-a-height-of-110-m-a-neglecting-air-resistance-a-poor-assumption-but-we-will-make-it-for-this-example-calculate-the-shell-s-velocity-when-it-leaves-the-mortar-b-the-mortar.html
# A 2.50 kg fireworks shell is fired straight up from a mortar and reaches a height of 110. m. (a)... ## Question: A 2.50 kg fireworks shell is fired straight up from a mortar and reaches a height of 110. m. (a) Neglecting air resistance (a poor assumption, but we will make it for this example), calculate the shell's velocity when it leaves the mortar. (b) The mortar itself is a tube 0.450 m long. Calculate the average acceleration of the shell in the tube as it goes from zero to the velocity found in (a). (c) What is the average net force on the shell in the mortar? How does this force compare to the weight of the shell? ## Acceleration: Assume a particle on which a force is imposed and the velocity of the particle changes constantly. In this scenario, the rate of change of velocity for the particle would be termed as its acceleration. Given data: • Mass of the shell, {eq}m = 2.50 \ kg {/eq} • Height, {eq}h = 110 \ m {/eq} • Length of the barrel, {eq}d = 0.450 \ m {/eq} • Initial speed of the shell in the barrel, {eq}u = 0 {/eq} Part (a): Let the lauched speed of the sheel be v. From the conservation law o mechanical energy, {eq}\begin{align*} \frac{1}{2}mv^{2} &= mgh\\ \Rightarrow \ v &= \sqrt{2gh}\\ v &= \sqrt{2 \times 9.80 \times 110}\\ v &= 46.43 \ m/s.\\ \end{align*} {/eq} Part (b): The average acceleration of the sheel can be given as, {eq}\begin{align*} a &= \frac{v^{2}-u^{2}}{2d}\\ a &= \frac{46.43^{2}-0^{2}}{2 \times 0.450}\\ a &= 2395.27 \ m/s^{2}.\\ \end{align*} {/eq} Part (c): The average net force on sheel can be given as, {eq}\begin{align*} F &= ma\\ F &=2.50 \times 2395.27 \\ F &= 5988.18 \ \rm N.\\ \end{align*} {/eq} On comparing this force with the weight of the sheel, {eq}\begin{align*} \frac{F}{W} &= \frac{5988.18 }{2.50 \times 9.80}\\ F &= 244.41 W.\\ \end{align*} {/eq}
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 1.000004768371582, "perplexity": 3028.9997328279783}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-24/segments/1590347388758.12/warc/CC-MAIN-20200525130036-20200525160036-00111.warc.gz"}
http://stats.stackexchange.com/questions/41891/find-the-partial-correlation-coefficient-r-1p-2468
# Find the partial correlation coefficient $r_{1p.2468}.$ Suppose all the simple correlations between $x_i$ and $x_j$ are $r$ for all $i,j=1,2,\dots,p, i\neq j.p>8$. Find the partial correlation coefficient $r_{1p.2468}.$ By definition, $$r_{1p.2468}=\frac{cov(e_{1.2468},e_{p.2468})}{\sqrt{var(e_{1.2468})}\sqrt{var(e_{p.2468})}}$$ how can I find $cov(e_{1.2468},e_{p.2468}),var(e_{1.2468}),var(e_{p.2468})$ in terms of $r$? -
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8662495017051697, "perplexity": 155.1374746629457}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2013-48/segments/1386164936474/warc/CC-MAIN-20131204134856-00059-ip-10-33-133-15.ec2.internal.warc.gz"}
http://www.reddit.com/r/math/comments/17n0ql/i_tried_to_explain_square_roots_to_my_five_year/
[–]Statistics 218 points219 points  (70 children) sorry, this has been archived and can no longer be voted on I taught my 5-year-old brother about the square root of negative one once. His kindergarten teacher was so confused. [–] 242 points243 points  (6 children) sorry, this has been archived and can no longer be voted on I taught my seven year old nephew about countable and uncountable sets. Took me about an hour. Now when someone says "Na-uh, you are times INFINITY!" He counters with something like "you are times the amount of numbers in the reals!" Hes a pretty nerdy kid. I like him. [–] 193 points194 points  (5 children) sorry, this has been archived and can no longer be voted on But he's leaving himself open to the classic "power set of the reals" rebuttal! [–]Dirty, Dirty Engineer 45 points46 points  (1 child) sorry, this has been archived and can no longer be voted on My Discrete Maths prof said that his daughter would often look at his notes. She once was drawing in her class, and her teacher said, "Your A is upside down, dear," to which she responded, "It's a Universal Quantifier!" [–] 8 points9 points  (0 children) sorry, this has been archived and can no longer be voted on The teacher's head promptly exploded. [–] 45 points46 points  (55 children) sorry, this has been archived and can no longer be voted on The teacher probably disregarded it as nonsense that kids say, like how if you flush the toilet three times in a row that ghosts will appear, etc. I wouldn't trust a 5 year old explaining math concepts to me if I were a kindergarten teacher. [–] 42 points43 points  (5 children) sorry, this has been archived and can no longer be voted on Especially since it probably wasn't explained in the clearest sense. [–] 60 points61 points  (4 children) sorry, this has been archived and can no longer be voted on So there's a number that's imaginary like Larry's friend but if you times it by itself you have one less can I go outside? [–] 11 points12 points  (3 children) sorry, this has been archived and can no longer be voted on The next time someone asks me about imaginary numbers, this is the explanation of j I'm giving them. [–]Analysis 0 points1 point  (2 children) sorry, this has been archived and can no longer be voted on What's the history of the i/j convention? I was taught that i was the "usual" complex number such that +/-i is the solution to x2 +1=0. I was also told that j was a hyper complex number (such as quaternion or tessarine). Just curious. [–] 8 points9 points  (0 children) sorry, this has been archived and can no longer be voted on For EE, my Diff Eq. textbook put it clearly and I'll paraphrase. Electrical engineers use j for the imaginary number, since they use i for current because current starts with c. [–] 2 points3 points  (0 children) sorry, this has been archived and can no longer be voted on Among other things, we use j in electrical engineering because i is already current. [–] 13 points14 points  (48 children) sorry, this has been archived and can no longer be voted on Really ought to be general knowledge, though. [–] 18 points19 points  (43 children) sorry, this has been archived and can no longer be voted on Why? How woud it be useful for kindergarten teaching to be able to calculate with complex numbers? I would prefer if they used that extra bit of cognitive capacity to learn more about child pedagogy. [–] 16 points17 points  (12 children) sorry, this has been archived and can no longer be voted on I'm preeeetty sure it ought to be general knowledge for an elementary teacher to have at LEAST passed and understood high school math,of which imaginary numbers are a part. Honestly, would you want a high school teacher who didn't understand calculus teaching algebra? No way - you want someone sufficiently skilled in at least the next "step up" because then they can sufficiently answer any questions and challenge the more advanced students. Similarly you don't want an elementary teacher who can't do algebra. Algebraic reasoning is essential to begin to develop in an elementary schooler, and a teacher needs to UNDERSTAND algebra in order to help the students along. [–] 8 points9 points  (1 child) sorry, this has been archived and can no longer be voted on Complex numbers are not in the high school curriculum all over the world, apparently. And I would much rather want a kindergarten teacher to be skilled in communicating algebraic reasoning to children, than having an intimate personal understanding of the subject. These are two different, albeit correlated, skills. If a kid asks a surprising question like, "I understand all this squaring business, and I get that the reverse operation is a square root, but what is the square root of a negative number?" I expect the unknowing teacher to say, "That's a really good question, and I don't know. I will look it up until tomorrow. Maybe you can ask your parents meanwhile, and if they know, you can teach me?" There's no reason to prepare kindergarten teachers for the most tricky questions the kids can possibly ask, when it's more effective to spend the time teaching them how to communicate the basic concepts so that the kids can figure out things on their own. If a kid happens to ask a tricky question, I'm sure the kindergarten teacher is as capable of learning the answer as they were before the kid asked the question. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Ah, yes. Unfortunately, most of the time, what happens is "You can't get a square root of a negative number, the end." because elementary teachers have a tendency to be math-phobic. But I definitely agree with you - the ideal for a elementary teacher who doesn't happen to know, is acknowledging the question and looking it up when you got home. [–] 2 points3 points  (9 children) sorry, this has been archived and can no longer be voted on But what exactly are imaginary numbers useful for? I remember learning a little about them in high school, but now I'm a sophomore in college, majoring in computer engineering, and they still haven't popped up again. I thought we'd use them at least once in calc or physics, but nope. [–] 11 points12 points  (0 children) sorry, this has been archived and can no longer be voted on Got to the computer engineering part and was a little surprised (they'll come at you soon enough). They're to transformations (like rotations, reflections, and your fancy shmancy transforms, etc) like natural numbers are to counting (for the most part). The only thing imaginary about them is some imaginary explanation for naming them that way. [–] 6 points7 points  (1 child) sorry, this has been archived and can no longer be voted on Computer engineer here, you should be taking an intro DSP course soon (or for that matter an intro circuits course). You'll either learn to love imaginary numbers or slowly be driven insane. [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on I'm on the verge of finishing my lower-div requirements, so I expect them to come back soon. Looking forward to seeing them again. [–] 3 points4 points  (4 children) sorry, this has been archived and can no longer be voted on I've used them in physical chemistry (others take modern physics, but my second major is chemistry.) They're used for the wave function, which is essentially (someone correct me if I'm wrong) the spherical harmonic functions, analogous to sine and cosine. http://en.wikipedia.org/wiki/Spherical_harmonics Other places imaginary numbers are useful involve circuit analysis, though I'm not exactly sure how. Imaginary numbers show up in solutions to ODE's as well. [–] 1 point2 points  (3 children) sorry, this has been archived and can no longer be voted on You gonna need them in signal processing in electrical engineering a lot. I think I started to use them in the second year of my studies and almost never got rid of them anymore. Not that I minded them, the alternative looks horrible. [–] 0 points1 point  (2 children) sorry, this has been archived and can no longer be voted on Quantum Mechanics as well. Physics grad reporting in. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Yeah, used them actually in Physics III when we looked at simple things in quantum mechanics. Pretty soon after looking at waves you need to use i and j to describe more complex wave forms. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Yup yup - the first quarter of physical chemistry, for me. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on [–] 1 point2 points  (21 children) sorry, this has been archived and can no longer be voted on Didn't say anything about usefulness. But (I would assume) you'd be hard-pressed to find someone who knew about that but had never heard of complex numbers. [–] 7 points8 points  (19 children) sorry, this has been archived and can no longer be voted on You said it ought to be general knowledge. People have a tendency to think that generally unnecessary topics of their field ought to be general knowledge. I just wanted to see if you had an actual argument as for why complex numbers ought to be general knowledge, while something equally unnecessary, like the basic gaits of horses, a few common sailors knots, or the world record times in track and field, shouldn't. [–] 14 points15 points  (2 children) sorry, this has been archived and can no longer be voted on Unrelated, but each one of your "ought to be general knowledge"s are offset by one character, and looks really cool. Its an ought staircase. [–] 2 points3 points  (0 children) sorry, this has been archived and can no longer be voted on I wish they were on my end too. Screenshot? (That's called a "river" in typography by the way, and it's undesireable because it catches attention more than the actual text.) Edit: Never mind, another comment posted the same thing along with a screenshot. It could be RES that's making it. [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on Thought it was just me. UV. [–] 7 points8 points  (6 children) sorry, this has been archived and can no longer be voted on To be fair, kindergarten teachers aren't only kindergarten teachers. They are also people, and parents, and have friends. Maybe complex numbers don't come up in life. But most people will never need to know what 563*394 is but they can probably still calculate it with some difficulty. Knowing how to multiply is a tool. To be able to use tools, you need to know some important concepts too. This is like that. You don't need to know how six-cylinder engines work either, but you might want to know a bit about it because when the salesman tries to sell you six spark plugs for a four-cylinder engine, you're getting ripped off. You might be interested in science news out of personal curiosity but don't have the tools to recognize the difference between the Heisenberg Uncertainty Principle and measurement uncertainties, which is actually a whole world of difference in understanding Physics. Maybe, you grow up and have a kid who learns this stuff and is interested in it, but you lack the capacity to communicate on that level. TLDR: Knowledge is awesome. Trying to think of reasons to not know something is idiotic. [–] 2 points3 points  (5 children) sorry, this has been archived and can no longer be voted on While we agree on knowledge being awesome, one of the goals of a grade school is to equip its pupils with a basic set of knowledge and skills necessary for living. One of those life-skills is a hunger for knowledge, and you can't instill that by teaching people random facts. I learned about imaginary numbers when I was young, and while I thought they were cool, imaginary numbers didn't empower me. Learning to build a simple oscillator circuit that flashed an LED was empowering -- here was six-year-old-me, making billions of little electrons doing my bidding. I felt more capable by learning to build things out of wood, and to cook, and to be able to handle my own money. Every fact that you learn has this potential, but it requires a meaningful context. Just knowing that an imaginary number is the square root of negative one isn't very powerful, and it doesn't become powerful until you can understand rotations in the complex plane, and in turn how those give rise to complex transformations. Most children are never going to get to the point that they need these tools, but we currently try to educate everybody as if they're going to be a rocket scientist, at the expense of teaching kids some very important lessons, like how to manage money and the effects of compound interest, or how to buy groceries and use them to cook, or how to critically analyze a speech, or how to understand concepts greater than yourself. All of these would be a better use of classroom time than complex numbers or calculus, and I say this as as a (somewhat former) kid that loved math, got a degree in it, and who spends a lot of his spare time writing computer software. It's not that we should't provide access to this material; if a child wants to learn about math, don't stop them! But I think we need a deeper focus on building effective citizens and individuals, rather than preparing everybody for advanced math or physics. [–] 0 points1 point  (3 children) sorry, this has been archived and can no longer be voted on That's what I meant. Sorry if I implied that simply memorizing an arbitrary fact was helpful. But everyone should learn the basic stuff. It's a disaster that quantum physics is not even touched on in high school. This is mostly likely the most important reason for there being an enormous number of misconceptions surrounding it. No one will need it, but knowing where the human race is at is still important if you wish to be part of the world today. Besides, teaching quantum specifically has a very important use in teaching: It breaks a student's preconceived notions of what is possible in reality and how a completely non-intuitive philosophy of science is most likely the truth about the universe. You are however, confusing education and training. Learning how to manage your money is "training". It's a particular skill that you learn and then you use it until it becomes obsolete or something else. Mathematics, on the other hand, is education. It provides you with the fundamentals and the foundations of the knowledge you'll require to be able to learn a huge ARRAY of skills much more easily because you get the fundamentals of how values, numbers, quantities, objects works together. This is much more useful because it never becomes obsolete, you can always use it to learn new skills. We are trying to educate children. Not teach them targeted lessons for what WE think is important. Think about how many skills that would've been considered important in the 80s are completely obsolete today. Think about how many kids who went school in the 80s are grown adults today. Hell, I am a 90s child and went to high school not more than 5 years ago - but if the teachers of that time were allowed to decide what is important for me to know, I would be useless today as a student. We educate with fundamentals because no matter what happens a student can adapt to the changes of the future. Further Reference: http://en.wikipedia.org/wiki/Half-life_of_knowledge [–] 0 points1 point  (2 children) sorry, this has been archived and can no longer be voted on I don't see any practical difference between education and training; you learn by doing, whether the subject is carpentry (building a chair) or topology (proving that a space is connected). We need to prepare kids for a future that we can't possibly even understand, which means giving them the skills they'll need to deal with the unknown. That means practical skills, like how to survive in the world on your own, as well as a toolkit of skills that empower children to make sense of and work with the world around them: critical thinking, problem deconstruction, and so on. Quantum physics is far from either of these. It's neither a practical survival skill (like money management), nor is it a skill that helps them to make sense of the world in any tangible way, because it's almost impossible to interact with quantum effects without expensive equipment, and yes, this includes the Casmir effect. For anybody other than the hardcore physics nerds, quantum physics doesn't give the kids any "eureka" moment that they can own. They don't get to experience the thrill of exploration, or see how turning the universe on its head leads to a clearer understanding. All they see is another book, telling them something as meaningful as "i = sqrt(-1)" If a child is interested in deep-diving into a sea of quarks and muons, by all means they should be supported in doing so, but as a part of the general curriculum, you'd be just as well off mandating a class in Aramaic literature. A class designed to break children of preconceived notions needs to present them with a playground of infinite possibility, where they are required to think far outside the box to come up with innovative solutions to tough problems. Such a class could be constructed from the fundamentals of almost any discipline, be it robotics, mathematics, physics, computer science, literature, or psychology. But it needs to be a playground that the kids can play in, a bucket of legos, rather than a prefab kit that can only fit together one way. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Wow. You said what I wanted to in a much better way. I just need to add that I'm not saying knowledge is dumb. I just don't think everyone should learn everything, because there's simply not space and time for that. People have to prioritise between all the things they coud learn. Society is where it is because people can specialise. A kindergarten teacher are a kindergarten teacher because they chose to focus and become specialised in care for children. Had they instead chosen to learn all about sailing, about combustion engines, about complex transformations, they might not have been as good of a kindergarten teacher, despite being an awesome polymath. Sure, if you have an interest in something or think you can gain from knowing it, nobody's stopping you. I just don't think anybody get to tell anybody else what they ought to know unless they can present a really good argument as for why everybody ought to know that -- how it is helpful in their lives. Polymaths are the ideals of the 16th century. Today, we are space monkeys. [–]Discrete Math 2 points3 points  (2 children) sorry, this has been archived and can no longer be voted on Complex numbers are generally taught in high school or so, right? I'd expect a kindergarten teacher to have graduated from high school. [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on When i did high school, it was only taught in the very highest math class, and only people who did the 2nd highest would probably have any conceptual idea of what complex numbers are (roughly 10% of the school population, at a school that does reasonably well). Based on that, i see no reason why many primary school teachers would have heard of complex numbers. US school syllabus is probably different though. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Yes, but where I live, you can choose to focus on different kinds of subjects in high school. People going into social sciences, which makes up for roughly 50% of the population or more, doesn't have to take as much maths. There are six common math courses for natural science students, of which social science people only take two or three. They learn to solve quadratic equations, and perhaps some trigonometry and a little about differentials, but that's it. [–] 1 point2 points  (1 child) sorry, this has been archived and can no longer be voted on I just wanted to point out my eyes were really drawn to the cool diagonal pattern in your comment I've circled here. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on That is brilliant, that is. It doesn't look that way on my end, and it makes me a little sad. [–] 4 points5 points  (1 child) sorry, this has been archived and can no longer be voted on the basic gaits of horses Very necessary for a lot of literature. It's the same reason why the Bible is still relevant in my country. Very few still believe as of the previous generation, but the references are everywhere. a few common sailors knots Always useful! Just possessing the knowledge that there are knots with specific purposes is useful. Learn kids some knots, tell 'em they can find more on the internet. the world record times in track and field, shouldn't. Eh. Don't know about that. IMHO. The point being, for math, some qualitative knowledge of concepts can be useful. Knowing that math isn't limited by rules of what you can or can't do, but that math requires context, and that you can almost always invent more rules so that things make sense. Realizing that math is man-made, and that it is his to play with. Everything in mathematics is a choice. It took way too long for me to realize that. The rules we impose on math are there to keep things usable! It's not because the teacher says so, it's because sqrt(-1) apples doesn't make sense, and we haven't come to anything practical like that... yet! That's cool and exciting. [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on I totally agree with you on that! I just don't think that the understanding you speak of magically comes once you know complex numbers. ;) As a side note, I have been lobbying personally quite hard for the understanding you speak of. I probably brought it up a little too often back when I attended high school, and I still speak to current high school students every now and then about their perceptions of maths and what they think it is. Personally, I think the reason people don't get that is because maths education is frakked up. Students are indeed having rules imposed on them, and not getting to experiment on their own, invent rules and realise limitations when they try to bring things back to the real world. [–] 0 points1 point  (1 child) sorry, this has been archived and can no longer be voted on Not quite what I was going for. More like, "this is a reasonably commonly known thing, I would have expected this to be something elementary teachers would have heard of". [–] 4 points5 points  (0 children) sorry, this has been archived and can no longer be voted on Oh, alright then. I don't think it's that common to know, though. But it depends a little on where you look. At least where I live, roughly half of the population stops taking math classes before they get to complex numbers, so all they are taught is that there is no negative square. [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on Primary school education as a degree offers nothing in the way of complex numbers (nor should it). The only chance most people would come up against them is if they ever did a reasonable level of maths at school, which isn't necessary and from experience, unlikely for primary school teachers. I think given their background, they'd be far more likely to know about child pedagogy than complex numbers. [–] 0 points1 point  (7 children) sorry, this has been archived and can no longer be voted on Perhaps not yet, but did you know at some point in history the idea of negative numbers was so abstract that most common folk couldn't explain them? How could one possibly have a negative number of something? I think in due time that not understanding what complex numbers are intuitively will be similar to someone of today's generation having no clue to what a negative number is. [–] 2 points3 points  (6 children) sorry, this has been archived and can no longer be voted on Not just negative numbers, but ancient Greeks really didn't consider zero a number. (Or, at least, as wikipedia reports, they were "unsure about the status of zero as a number".) I can kinda see how that mental block would exist... If you have 5 apples, and I take 5 apples from you, how many apples do you have? What do you mean how many? I don't have any apples! [–] 1 point2 points  (5 children) sorry, this has been archived and can no longer be voted on Exactly, it's difficult not to believe that one day complex numbers too will become just as intuitive to the point of not understanding how they ever were not. [–] 1 point2 points  (4 children) sorry, this has been archived and can no longer be voted on I'm not certain. Sure, everyone considers zero a number but that's just because they were taught that. The reason zero was under so much doubt before is because it is the odd one out in standard arithmetic; i.e. real numbers are closed under addition, subtraction, and multiplication; and division until we let zero into the bunch.... witness today how many people, some of them are even educated, rational adults, still try to justify x / 0 in terms of regular arithmetic. So we might get a common consensus of people parroting "i == sqrt(-1)" but never really getting it.... [–] 1 point2 points  (2 children) sorry, this has been archived and can no longer be voted on Fair enough but I think the OP's original point was that teachers should have a usable understanding of complex numbers. The core understanding of 0 and how it is similar/different to other numbers is still not so intuitive. But for most practical purposes the majority of people will know how to properly use it. Similarly, I'm guessing that the connection between rotations and complex numbers will become intuitive for practical purposes, even if people don't really understand it. [–] 0 points1 point  (1 child) sorry, this has been archived and can no longer be voted on Understood. I agree that a teacher should understand the math of at least one level beyond what they are teaching. But even though most people know how to work with zero, it is only because we have had hundreds of years in perfecting how to teach it to them so they don't have to think about it - i.e.: by rote memorization. I foresee a similar future for i. Sure understanding it as rotations makes a lot of intuitive sense, but I think that if it passes into common knowledge, that just means we finally got good at teaching people by rote a few rules for working with complex numbers in some well defined, easy to recognize and apply cases, and that it doesn't necessarily mean it will pass into common understanding. Or maybe you are right. Only time will tell :) [–] 1 point2 points  (2 children) sorry, this has been archived and can no longer be voted on well yes, but what should be and what actually is are rarely the same I've worked with many elementary school teachers (I teach at an education school) I'm lucky that a good portion of them can simplify positive radicals [–] 0 points1 point  (1 child) sorry, this has been archived and can no longer be voted on As in, sqrt(8) to 2*sqrt(2)? I'd consider that advanced in comparison. [–] 2 points3 points  (0 children) sorry, this has been archived and can no longer be voted on from an algebraic viewpoint yes from a conceptual one no sqrt(8) is asking someone what number times itself gives you 8? sqrt(-1) is asking what number times itself gives you negative 1, which violates a lot of the rules you'd learn before you get to it [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on pretty sure there's more important shit to know [–] 13 points14 points  (3 children) sorry, this has been archived and can no longer be voted on As if his imaginary friends were not enough. Also, how did you teach him that? I would have trouble explaining i to my imaginary n-year old (n < 15). [–]Logic 14 points15 points  (1 child) sorry, this has been archived and can no longer be voted on Try using this, it's fantastic. [–] 2 points3 points  (0 children) sorry, this has been archived and can no longer be voted on Fantastic read. Thanks for this. [–]Statistics 6 points7 points  (0 children) sorry, this has been archived and can no longer be voted on Good question. I was 10 at the time, and I myself learned it from my 14-year-old brother, who I assume learned about it in his math class (he was a year ahead in school, so in 10th grade at 14). As far as I was concerned it was just a magical number that was the square root of negative one, and I didn't really understand it any further than that. I knew about the multiplicative rules for squares, so I understood that e.g. 3i was the square root of -9, but I'm pretty sure I didn't know how to multiply general complex numbers or anything more complicated. I doubt my 5-year-old brother understood anything other than that it was the square root of -1. I'm not sure he even knew what that meant, but he knew how to repeat it. Around the same time my older brother taught me binary, which he had learned in his computer science course. He had been taught how to add and subtract binary numbers, and the two of us figured out how to do multiplication and division by analogy to the regular base-10 system. He also memorized the first 50 digits or so of pi, and we imitated him in that too. I had about 20 digits down when I was ten. In high school I memorized up to 100, but I can only recite the first 35 or so now. I have a photocopy of an assignment my younger sister turned in when she was in first grade, a drawing of horses with speech bubbles reciting the first 15 or so digits of pi (this was a drawing she did in class, so she had to have written it out from memory). When I was even younger--5 or 6--I learned that there were n(n-1)(n-2)...(2)(1) ways to put n objects in order, though I didn't know it was called a factorial or the notation for it. My older sister (7 years older than me) learned it at school, and I remember her talking to our mom about it and counting arrangements of 2, 3, 4 toys in a row. I grew up in a large family where our parents (successfully!) encouraged us to share what we learned at school and in books. I mostly picked up math because that was what I liked, but there were lots of other conversations going on about other subjects that I wasn't interested in. Some day I hope I can teach my kids to do the same... [–] 54 points55 points  (2 children) sorry, this has been archived and can no longer be voted on You are awesome. My dad taught me square roots when I was maybe 6 and it launched a lifelong interest in mathematics. It didn't take hold immediately, but the amazement I felt at how something could be easy in one direction (like squaring) but hard in the other (taking square roots) really stuck with me, and ultimately evolved into a passion for cryptography and computational complexity. Thanks for reminding me of my roots! [–] 32 points33 points  (0 children) sorry, this has been archived and can no longer be voted on Thanks for reminding me of my roots! Nice. :) [–][S] 7 points8 points  (0 children) sorry, this has been archived and can no longer be voted on Thanks. [–] 21 points22 points  (10 children) sorry, this has been archived and can no longer be voted on I x-posted this to r/funmath. I hope you don't mind- I'm definitely going to use this. [–][S] 18 points19 points  (0 children) sorry, this has been archived and can no longer be voted on I don't mind at all. I write stuff so people can read it. :) [–] 1 point2 points  (1 child) sorry, this has been archived and can no longer be voted on I like this subreddit, I just subscribed to it. I hope it gets rolling. Little math tricks like some of these can be incredibly useful for napkin calculations in science. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on thanks. Please feel free to add stuff you like. [–] 0 points1 point  (1 child) sorry, this has been archived and can no longer be voted on That's just a text post. You didn't post the actual link. D: [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on woops! thanks. Fixed it. [–] -3 points-2 points  (4 children) sorry, this has been archived and can no longer be voted on Wait, which maths is fun is highly subjective. How is the content meant to differ from /r/math? [–] 2 points3 points  (3 children) sorry, this has been archived and can no longer be voted on r/math seems to be more for technical stuff. r/funmath should be things that make math interesting or fun for you. Its focus isn't on mathematical rigor, but rather on mathematical fun. [–] -2 points-1 points  (2 children) sorry, this has been archived and can no longer be voted on But rigor is what I find fun. Don't worry, I'm merely being a pedantic ass :). [–] -1 points0 points  (1 child) sorry, this has been archived and can no longer be voted on You must be fun at parties. [–] -1 points0 points  (0 children) sorry, this has been archived and can no longer be voted on This is /r/math, not a party. [–] 59 points60 points  (14 children) sorry, this has been archived and can no longer be voted on Nice explanation. [–][S] 19 points20 points  (13 children) sorry, this has been archived and can no longer be voted on Thanks. :) [–] 25 points26 points  (1 child) sorry, this has been archived and can no longer be voted on Now he's ready for those imaginary negative blocks. [–] 6 points7 points  (0 children) sorry, this has been archived and can no longer be voted on Nah, he's ready for Cauchy-Goursat [–] 14 points15 points  (0 children) sorry, this has been archived and can no longer be voted on You can enhance it at some point and add 1 to 25 and start putting them around to see if you can make a square out of it. Obviously you can't, it's almost a square, but with a little extra. make him plug in sqrt(26) and he'll see 5.09 < explain that the 5 is that nice square, and that extra one you can't fit makes up all that extra junk. Now you can give him any number and he'll be able to tell make you a perfect square where the sides equal the first digit and the junk makes up the rest :D [–] -3 points-2 points  (8 children) sorry, this has been archived and can no longer be voted on Now ask him what the square root of 30 is. Or even 29 if you're in a bad mood. See if he really understood anything. [–][S] 24 points25 points  (6 children) sorry, this has been archived and can no longer be voted on He's still five. [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on well, you an bridge to that step by "liquifying" the leftover block(s) until you get a nice proper square, and measuring the sides.... depends on the little tyke's imagination I suppose. But it might be worth a shot. [–] 1 point2 points  (3 children) sorry, this has been archived and can no longer be voted on OMG you're Dan Lewis? I really enjoy the emails from subbing with you. That egg in a egg thing was fascinating. [–][S] 3 points4 points  (2 children) sorry, this has been archived and can no longer be voted on I am Dan Lewis. Thanks. And it was. Eggs shouldn't do that. [–] 1 point2 points  (1 child) sorry, this has been archived and can no longer be voted on please enlighten; what is with the egg in an egg? thanks [–][S] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on I know! Be evil for once! :D [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on I have a feeling the votes (and comments) on this post aren't just from /r/math subscribers. [–] -1 points0 points  (0 children) sorry, this has been archived and can no longer be voted on What about Right-Handed rules!? :-D [–] 9 points10 points  (1 child) sorry, this has been archived and can no longer be voted on Hey Dan! I get daily emails from you! You're great! [–][S] 8 points9 points  (0 children) sorry, this has been archived and can no longer be voted on Thanks! And to the rest of you, he's talking about this. [–] 9 points10 points  (2 children) sorry, this has been archived and can no longer be voted on I really think we can teach kids things at a much faster rate than we do now in schools. [–] 2 points3 points  (0 children) sorry, this has been archived and can no longer be voted on Just got out of high school, couldn't agree more. [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on Theoretically you could, but it might result in more students being held back, and the teachers would actually have to know more, resulting in a deficit of teachers. Kids learning by rote is good for teachers who can only teach by rote. [–] 6 points7 points  (2 children) sorry, this has been archived and can no longer be voted on Sounds great and you could teach him about the approximate square roots of numbers that are not perfect squares using the same technique. For example, the square root of 20 is between 4 and 5, but closer to 4, because 20 squares won't make a square, but it is more than 16 and less than 25. Use 20 squares to show the relationship. [–] 0 points1 point  (1 child) sorry, this has been archived and can no longer be voted on I still use this technique to this day. Also, let's say that you have a non-square integer x where a2 < x < b2, then the limit of x1/2 as x approaches infinity is (a + (x-a)/(x-b)). Pretty good for coming up with approximations of square roots of non-perfect squares. edit: realized thanks to another redditor that my limit is wrong. [–] 2 points3 points  (0 children) sorry, this has been archived and can no longer be voted on That would evaluate to a+1 [–] 20 points21 points  (14 children) sorry, this has been archived and can no longer be voted on If you can teach a 5 year old square roots. You can teach me Calc II in less than a week. Seriously, I have an upcoming exam. Help? [–] 26 points27 points  (4 children) sorry, this has been archived and can no longer be voted on Calc II? Is that the one that teaches integrals? Okay. Look at Paul's Notes and Khan Academy. Especially, from the latter, the part where he does questions from AP Calc BC AP Tests, since those have problems with more creative applications of functions and integrals. Also if there's a concept you don't understand, there's probably an MIT OpenCourseWare lecture for it. Take the MIT OCW exam on the topic as practice. [–] 3 points4 points  (0 children) sorry, this has been archived and can no longer be voted on Paul's Notes are the reason I didn't fail linear algebra and calculus too badly last semester... [–] 1 point2 points  (2 children) sorry, this has been archived and can no longer be voted on I used to look up YouTube videos, they helped a lot. [–] 8 points9 points  (1 child) sorry, this has been archived and can no longer be voted on Gotta emphasize Paul's Online Math Notes. They are very good. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on not at home. no res. saving for later. [–] 15 points16 points  (8 children) sorry, this has been archived and can no longer be voted on Do you kind of "get" the concepts when they're explained in class but tend to bomb the exams? If so, you don't need tutoring, you need practice. Math has more in common with learning to play the piano than studying for, say, a history class. Even the most brilliant piano prodigy can't just look at a piano and become a virtuoso; he or she has to practice... a lot. If it's the concepts themselves you're struggling with, try some YouTube lectures (including Khan Academy videos). Then come back to /r/learnmath and ask your specific questions. If you're having trouble with a specific problem, do as much as you can then post to /r/cheatatmathhomework. However: note that, unlike the tongue-in-cheek name of the subreddit, they will appreciate it if you demonstrate that you've given the problem a try and are stuck in a specific place. Good luck, and good studies! [–] 4 points5 points  (2 children) sorry, this has been archived and can no longer be voted on I was decent at Calc I. I understood the concepts. Got B's in most of my exams. So I lacked practice. Took a semester off, and got into Calc II. I had no memory of the concepts. So I tried to self teach them. While doing this, lost focus on class. My first exam is in a week. And I don't know how I am going to do. I wish I had some sort of recap of Calc I and the basis of Calc II. [–] 6 points7 points  (0 children) sorry, this has been archived and can no longer be voted on No need to wish. Head thee over to Khan Academy, MITs OCW lectures, and DEFINITELY patrickjmt's stuff. Absorb, practice, repeat; it's on you buddy! [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Great piano analogy. I wished I had understood this when I was still taking math classes. Lecture was always a breeze, but I couldn't figure out why that never fully transferred to tests. [–] 0 points1 point  (2 children) sorry, this has been archived and can no longer be voted on Where can I go to practice? I know Python, but it takes too long to write something to generate problems for me. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Khan Academy up through parts of calc 1. You might want to put up a general "where can I go for exercises" question here or in /r/learnmath. Could be interesting. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on [–] 6 points7 points  (1 child) sorry, this has been archived and can no longer be voted on Kids are awesome. [–] 10 points11 points  (19 children) sorry, this has been archived and can no longer be voted on Is that where the term "square root" actually comes from? Makes sense, never thought of it like that. However I have to say it bugs me how people are taught "roots", as if it's something very unique and strange (perhaps because we have a symbol for square root), when really it's just a fractional power. If you ask someone "what is 52" they probably remember it's 5*5 = 25. But then if you ask the square root of 25, they go off of memorization instead of thinking: well x2 = 25, solve for x. x = 251/2 = 5. Heaven forbid you ask for the cubed root of 125! I don't think people are taught fractional powers properly, instead they just learn square root and then any other root / fractional power is a scary notion because they don't know how to begin, when really it's just as easy as squared root. That being said: I think it would be interesting if you taught your five year old using that diagram how to multiple 5x5, and then how to inverse it 251/2. But I do love the explanation you gave, works well for power(1/2). I just have a 1.5y old but I am so eager to start teaching him math when he can understand it! [–]Number Theory 18 points19 points  (8 children) sorry, this has been archived and can no longer be voted on But then if you ask the square root of 25, they go off of memorization instead of thinking: well x2 = 25, solve for x. x = 251/2 = 5. Is 251/2 any easier to solve mentally than sqrt(25)? [–] 8 points9 points  (7 children) sorry, this has been archived and can no longer be voted on I'll answer it this way: Yes it is, because then if I ask someone to solve 251/3 they can use what they did in 251/2 to help solve the problem. If someone can solve sqrt(25) it doesn't help them to solve 251/3 because they don't understand sqrt(x) is x1/2 [–]Number Theory 2 points3 points  (5 children) sorry, this has been archived and can no longer be voted on I think that's valid, but the strength of the fractional notation is when using it in an algebraic expression that can then be simplified somehow. Seeing that ( x1/6 )3 = x1/2 is difficult when using the typical "root" symbols. But for solving these mentally, with actual values... I know I can't do them. 1231/2 ? You've got me. I'm sure there's a method for getting the first few significant digits at least, but not one that's commonly used. And in this instance, the fractional notation has little-to-no benefit over the root symbol, since I'm probably reaching for my calculator anyway. :) [–] 1 point2 points  (1 child) sorry, this has been archived and can no longer be voted on actually, at least where i was from in the states, it's taught in school. briefly. but until recently I didn't know why it works the way that it does, and consequently, i have only very hazy half-recollections of ever doing it. edit yes, this is probably a special case for x1/2, I wonder if there's a general method that works in a similar mechanical fashion? [–] -1 points0 points  (0 children) sorry, this has been archived and can no longer be voted on The most general way to at least get close is to start breaking the numbers down into least common multiples. i.e., if you want 201/2 = (4*5)1/2 = 41/2 * 51/2 The trick to trying to do it quick in your head or on paper without going into complicated calculations would be to try to find integer roots. Square root is going to be the easiest because when you break numbers into least common multiples, you may have numbers with integer roots. With higher roots you have broken it down too far with LCM and have to improvise some. So if you wanted 91/3 breaking it into least common multiple you would have (3 * 3 * 3)1/3 which hopefully should pop out that 3 is what you are looking for. Or let's say you have 271/3 = (3 * 3 * 3 * 3)1/3 = (3 * 3 * 3)1/3 + 31/3 = 3 + 31/3 [–] 1 point2 points  (2 children) sorry, this has been archived and can no longer be voted on Well I'm not saying it will help them to mentally do it. However like you said, if someone is grabbing a calculator, they still have to know what to type in to find, say, the fourth root. They have to know to type in 1231/4. I just mean that someone wouldn't even know what to type in a calculator if it's not the sqrt button. [–] 0 points1 point  (1 child) sorry, this has been archived and can no longer be voted on Except a lot of calculators have a x√ button that lets you type, for example, 3√64 to get 4. I guess it's missing from TI calculators though. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on TI calculators have this by pressing "2nd, math" [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Also, putting things in fractional powers is helpful (for me at least) when trying to integrate or derive equations [–] 9 points10 points  (0 children) sorry, this has been archived and can no longer be voted on However I have to say it bugs me how people are taught "roots", as if it's something very unique and strange (perhaps because we have a symbol for square root), when really it's just a fractional power. NO. NO. IT'S NOT OK WHEN MATH PEOPLE SAY THINGS LIKE THIS. ahem. Sorry. Seriously though, I feel that there's a tendency in math when someone has a question about some issue A) for people to go "oh what A) is is simple, it's a specific case of B)!" A lot of the time B is more difficult to understand than A and is arrived at by generalizing A. Like here. Telling a 5 year old that square roots are a specific case of fractional powers is a waste of time. Once you teach him square roots though, it would be quite possible to extend his understanding to include fractional powers and give him a deeper grasp of what's going on. [–] 5 points6 points  (7 children) sorry, this has been archived and can no longer be voted on Can you explain what it means to take something to a fractional power for us [–] 8 points9 points  (2 children) sorry, this has been archived and can no longer be voted on You can kinda work backwards by remembering that: an * am = an+m So, a1/2 * a1/2 = a1 And you already know sqrt(a) * sqrt(a) = a So now you know sqrt(a) = a1/2 And since multiplication is associative: a1/3 * a1/3 * a1/3 = a a1/3 = cuberoot(a) And if you add in this identity, you can also break apart non-unit fractions: anm = (an )m So, a2/3 = a2 * 1/3 = (a2 )1/3 [–] 13 points14 points  (1 child) sorry, this has been archived and can no longer be voted on It's a good way of thinking about it but I don't know how that would help to calculate it [–] 5 points6 points  (0 children) sorry, this has been archived and can no longer be voted on Another way to think about it is that a1/n is the solution to the polynomial equation xn - a = 0. There are a number of algorithms for finding the root of such a polynomial. Newton's method is one example. [–] 1 point2 points  (3 children) sorry, this has been archived and can no longer be voted on The best way to think about it is simply: what number do I have to multiply by itself X times to arrive at this value? So: 1251/3 is simply: What number do I multiply itself 3 times (i.e., x3 ) to arrive at 125? [–] 3 points4 points  (2 children) sorry, this has been archived and can no longer be voted on Yeah, but again, that doesn't really help to calculate it. [–] 2 points3 points  (0 children) sorry, this has been archived and can no longer be voted on well that's not what you asked for [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on explain what it means to take something to a fractional power to calculate it Those are two very different things. I explained what it means just like you asked. Being able to calculate non-trivial numbers is a whole different story. But you should understand the calculation is merely details. If I ask you to explain 381381/3 you should be able to tell me that means the result is a number which, if you multiply it by itself three times, will equal 38138. That's what is important. If you want to calculate it just put it in your calculator because that's not the important part. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Yeah, more complicated analysis became much easier once I thought about it in fractional powers. It requires you however to understand powers and fractions already, which the 5 year old probably didn't. [–] 2 points3 points  (5 children) sorry, this has been archived and can no longer be voted on Works for cube roots too [–] 0 points1 point  (4 children) sorry, this has been archived and can no longer be voted on Was wondering when a dimension or two would be added. We can "see" 2D squares: 1, 4, 9, 16, 25 We can "see" 3D cubes: 1, 8, 27, 64, 125 4D would be: 1, 16, 81... Hang on... 16 things in 4D? What does that mean? [–] 1 point2 points  (3 children) sorry, this has been archived and can no longer be voted on I guess you could just have n n-by-n-by-n cubes? And say it represents the cube at different times. [–] 3 points4 points  (0 children) sorry, this has been archived and can no longer be voted on Not really at different times - although time is often referred to as the "4th dimension", (I prefer to think of it as the "0th Dimension") here we're talking physical dimensions, like hypercubes (of which there are some fun ways of representing in both 2 and 3D). [–] 1 point2 points  (1 child) sorry, this has been archived and can no longer be voted on Time is irrelevant, just show them that each n-by-n square is a slice out of the n-by-n-by-n cube. Once they understand that, tell them the n-by-n-by-n cubes are slices out of the n-by-n-by-n hypercube. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on his 5 yo is probably pretty bright, so many he would understand "hypercube" but I think time is more intuitive. [–] 2 points3 points  (0 children) sorry, this has been archived and can no longer be voted on How about circles instead of little cubes/squares to ensure the point is not missed or confused? Cool. I'll take this up with my 6 year old. [–] 2 points3 points  (0 children) sorry, this has been archived and can no longer be voted on You could use a similar method to teach him some intuition about prime numbers. A composite number is equivalent to a collection of equally sized 'unit squares' that can be arranged into a rectangle with sides greater than 1. Anything else is prime or unity. [–] 2 points3 points  (1 child) sorry, this has been archived and can no longer be voted on I remember pestering my parents about square roots and they never gave me an answer. [–] 3 points4 points  (0 children) sorry, this has been archived and can no longer be voted on I remember my father forcing advanced math down my throat from a very early age. Decades later I got a degree in math and then decades after that I realized I never wanted one. So, you know, it goes both ways. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Great parenting. My dad also began to teach me math at an early age. He came up with a new card game, an symplified version of Pasur. Here's how to play it: (i) One standard pack of 52 cards and 2, 3 or 4 players who take turns being dealer. Object of the game is to get the most number of cards. (ii) Four cards face-down to each player and four face-up to form the “pool” in the middle of the table. If one of the cards in the pool is a Jack or a ten, it gets cut back into the deck and is replaced with a new card, and if this is a Jack or a ten as well or if there are multiple Jacks or ten in pool, dealer reshuffles and deals again. (iii) Beginning at dealer’s left, players take turns playing cards to the table until there are no cards left in their hands. Dealer then deals four more cards to each player (but not to the pool) and play continues until the deck is exhausted. A play consists of playing one card in one of two ways: (1) Either by adding it to the pool of face-up cards (2) Or by using that card to pick up one or more cards in the pool. A player may not add a card to the pool if that card is capable of picking up one or more cards in the pool. The player must pick up the cards or play a different card. (iv) Cards may be picked up as follows: (1) Number cards (including Aces) pick up one or more other cards with which they combine to form a sum of ten. (2) A King picks up one King, a Queen picks up one Queen. (3) A Jack picks up all Jacks and number cards on the table, but not Kings and Queens. (4) When a player only has a single card left in the last hand of the deck, all remaining cards in the pool are picked up when the player is able to match: a) a king-king b) queen-queen c) combine to form a sum of 10. (v) Whoever has the most number of cards win the game. It's a fairly easy game, that I've played since I'm 3 years old. I think this is a great way to introduce your child to counting and operations. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on This is excellent. Well done! [–] 0 points1 point  (1 child) sorry, this has been archived and can no longer be voted on Dan, I hope you know that your daily emails are better than the whole of /r/TIL [–][S] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Thanks! But /r/TIL is pretty stellar. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on you should explain multiplying first (like you did, fill out a row of length n, then a column of length m, based at a corner, and fill in, now count!). Then dividing (x/m = find the length of "the" row of a rectangle with x boxes in it where "the" column has m boxes). And then finally, explain that the square root of x is equivalent to finding the square (rectangle where length of "the" row and "the" column" are equal) with x total boxes. [–] 0 points1 point  (2 children) sorry, this has been archived and can no longer be voted on I'm probably going down the wrong avenue when imagining visual representations to solve math, but... So when I first began learning square roots, I learnt it exactly as you illustrated. When I first saw cube-roots, I already knew the properties of a cube so I used the same concept from square-roots to understand it. But how would you illustrate fourth-roots or roots greater than 3 for that matter? [–]Applied Math 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on You just say "the pattern continues the same way, but I can't draw it anymore." :) [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on You can pull in time for the fourth dimension (eg. they stay there for N days - how many blocks will someone count if they count each block every day), though that just pushes the problem one dimension on. However, you could also show that the cube can be "flattened" by cutting each square layer out and putting them beside each other, and there will be the same number of blocks in all the the flattened squares as there are in the cube. (And similarly, you can cut each line out of the square and each block out of a line without changing the total number of blocks). Then you can say that, while it might be hard to visualise a 4d shape, if you had one you could similarly flatten it by cutting it into N cubes, which normally connect together in this 4th dimension just like the squares do in the 3rd. You explain that you're splitting up into a bunch of 3d cubes so you can show it in a 3d world (or further splitting each into squares to fit onto 2d paper). [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Another part of the understanding I suppose is to be able to go in both directions, X2 and X1/2 [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on You are awesome! I subscribed to Now I Know a few months ago and I always love receiving it. Keep it up! [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Good job. I wish someone had done this for me when I was a child. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Thanks for sharing. My guy is a bit young for this, but I am trying to keep tabs on all kinds of ways to make math interesting to him. cheers [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on I wish my dad did this to me when I was that young. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Yeah but now your kid is going to be all what when you start talking about square roots of non integers and negative numbers. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Yep nice one, this is how they were explained to me all those years ago. That's actually why they are called 'Square' numbers. [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on I remember learning it as simply the opposite of squaring a number, and not having much trouble with that. My dad used a similar drawing to teach me the concept of square meters as a unit of measure for surfaces. For some reason I was having a hard time making the logical connection between square roots and square meters until I saw it drawn like this. (actually my dad used sugar cubes on a table surface, then proceeded to stack some and explain cubic meters as well) [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Congratulations on giving your son a wonderful formative experience! This may be (way) too much too soon, but I wonder if he is ready for a brush with the Pythagorean theorem? He already knows a bit about using the calculator, and maybe you can show him how to use a ruler. Here is what I have in mind: Lay a ruler diagonally across a standard rectangular sheet of paper and make a mark on, say, the left edge and one on the bottom edge. Now measure the distances along the sides from the marks to the corner (use the metric decimal system!). Maybe you have already showed him how squaring is the opposite of taking the square root. Anyway, together you can use the calculator to compute the square root of the sum of the squares of the two sides. Then you can measure the diagonal distance with the ruler and see that the measured distance is just what you calculated. I'm not sure about the proper order of presentation for maximum effect. Do you measure the diagonal first, and then see the distance pop up on the calculator, or do you calculate it first and then measure it? By the way, you ought to think about making videos of these experiences. I see huge quantities of various sorts of karma in your future! p.s. You're obviously doing GREAT, but don't push too hard or you risk ruining everything (e.g. the case of William James Sidis). [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on You seem like a really good dad. [–]Probability 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on Really interesting. I was taught it the much less intuitive way as "the way to get the answer to Pythagorean Theorem". [–] 0 points1 point  (5 children) sorry, this has been archived and can no longer be voted on sounds great, but I'm worried about the trend now that we verify our own correctness (or even just teach our kids to verify their own) by pushing buttons into a machine and reading the answer. yes for 5yo's and calculators that works fine, but when that kid grows up and lives in a world full of technology, he won't know how to verify correctness without a computer, and more to the point he won't be very good at figuring out which things he can trust and which he can't. Unless he has a really awesome dad that shows him how things work behind the scenes, which this kind probably does, but I would say most don't. ninja edit: not saying that everyone should know how calculators (and by extension, computers) work, but they should be able to perform the same calculation by hand and understand what it means, at least for simple cases. Doing surface-integrals by hand you do once as a student and after that leave it to the computer... [–] 7 points8 points  (4 children) sorry, this has been archived and can no longer be voted on I think you're reading into this wrong what they're doing here is coming up with math to verify the button not using a button to verify the math [–] 1 point2 points  (3 children) sorry, this has been archived and can no longer be voted on really? And to demonstrate we were right, I asked him to put the number 25 into his calculator and hit the square root button. When the five popped up, he screamed “WE’RE RIGHT! IT’S FIVE!” the button is a shortcut, not a verifier. [–] 2 points3 points  (2 children) sorry, this has been archived and can no longer be voted on but this all started because the kid found a button, and asked what the button did the button became the motivator for the knowledge all of the math that was done, was done because the kid wanted to know what the button meant the quote you picked is neither of them using the button to see that they took the square root properly it's them checking that the activity they did, represents what the button does [–] 1 point2 points  (1 child) sorry, this has been archived and can no longer be voted on fair enough. [–] 1 point2 points  (0 children) sorry, this has been archived and can no longer be voted on I teach math to future teachers...I may have never studied my multilinear algebra well enough...but I try to notice certain subtleties I'm also very anticalculator...but in this scenario I deem it harmless [–] 0 points1 point  (0 children) sorry, this has been archived and can no longer be voted on High school math teacher here. Nice job. I like that you drew blocks, I have used actual blocks with kids who did not understand the process. Favorite part... roots -> big trees. [–] -1 points0 points  (0 children) sorry, this has been archived and can no longer be voted on Slightly off topic I guess, but coincidentally I had just come across this picture while cleaning up my harddrive about 20 minutes before seeing this. [–] -1 points0 points  (0 children) sorry, this has been archived and can no longer be voted on I taught the basics of calculus to a few third graders. Kids are smarter than you think. [–] -1 points0 points  (0 children) sorry, this has been archived and can no longer be voted on You are my hero. When I have kids I'm going to have my wife pissed from doing this.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.44904473423957825, "perplexity": 1339.8811100915848}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-41/segments/1410657129229.10/warc/CC-MAIN-20140914011209-00233-ip-10-196-40-205.us-west-1.compute.internal.warc.gz"}
https://stacks.math.columbia.edu/tag/01CX
Definition 17.24.9. Let $(X, \mathcal{O}_ X)$ be a ringed space. The Picard group $\mathop{\mathrm{Pic}}\nolimits (X)$ of $X$ is the abelian group whose elements are isomorphism classes of invertible $\mathcal{O}_ X$-modules, with addition corresponding to tensor product. In your comment you can use Markdown and LaTeX style mathematics (enclose it like $\pi$). A preview option is available if you wish to see how it works out (just click on the eye in the toolbar).
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 2, "x-ck12": 0, "texerror": 0, "math_score": 0.9648633003234863, "perplexity": 370.1134911831945}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-49/segments/1637964362891.54/warc/CC-MAIN-20211203151849-20211203181849-00637.warc.gz"}
http://mathhelpforum.com/geometry/25167-circle-theorems-print.html
# Circle theorems • Dec 21st 2007, 09:43 AM Geometor Circle theorems Hi, could anybody help me with this? the question: what is the distance from the centre (O) to C http://img45.imageshack.us/img45/678/21870685kw9.png if you need any clarification just ask Any help appreciated! • Dec 21st 2007, 10:09 AM Plato 1 Attachment(s) Looking at the modified drawing, recall the secant theorem. (AC)(BC)=(EC)(DC). Then solve for x. • Dec 21st 2007, 10:28 AM Soroban Hello, Geometor! I have a solution . . . hope it's acceptable. Quote: What is the distance from the centre (O) to C? http://img45.imageshack.us/img45/678/21870685kw9.png Let $x \:=\:OC.$ Draw an altitude from $O$ to $AB$; call it $OD.$ In right triangle $ODA,\:OA = 5,\:AD = 4\quad\Rightarrow\quad \cos A \:=\:\frac{4}{5}$ Law of Cosines: . $OC^2 \;=\;AC^2 + OA^2 - 2(AC)(OA)\cos A$ So we have: . $x^2 \;=\;18^2 + 5^2 - 2(18)(5)\left(\frac{4}{5}\right) \;=\;205$ Therefore: . $x\;=\;\sqrt{205}$ • Dec 21st 2007, 10:33 AM Geometor thank you plato! and soroban! however i find using plato's secant theorem being the easier: since: (AC)(BC)=(EC)(DC). (18)(10)=(10+x)(x) =x^2+10x x^2+10x-180=0 (-10 +/- sq.root 82) / 2 and since we need a positive value we do: (-10 + sq.root 82) / 2 = 9.317821063.... and add the 5cm radius to get OC = 14.3cm(1dp) =sq. root 205 as soroban said :D Thanks for the help! • Dec 21st 2007, 12:05 PM JaneBennet Quote: Originally Posted by Soroban Hello, Geometor! I have a solution . . . hope it's acceptable. Let $x \:=\:OC.$ Draw an altitude from $O$ to $AB$; call it $OD.$ In right triangle $ODA,\:OA = 5,\:AD = 4\quad\Rightarrow\quad \cos A \:=\:\frac{4}{5}$ Law of Cosines: . $OC^2 \;=\;AC^2 + OA^2 - 2(AC)(OA)\cos A$ So we have: . $x^2 \;=\;18^2 + 5^2 - 2(18)(5)\left(\frac{4}{5}\right) \;=\;205$ Therefore: . $x\;=\;\sqrt{205}$ That is also my method except that I didn’t use trigonometry. By Pythagoras’ theorem on triangle ODA, OD = $\sqrt{5^2-4^2}$ = 3 cm. By Pythagoras’ theorem on triangle ODC, OC = $\sqrt{(4+10)^2+3^2}=\sqrt{205}$ cm. Quote: Originally Posted by Geometor thank you plato! and soroban! however i find using plato's secant theorem being the easier: since: (AC)(BC)=(EC)(DC). (18)(10)=(10+x)(x) =x^2+10x x^2+10x-180=0 (-10 +/- sq.root 82) / 2 and since we need a positive value we do: (-10 + sq.root 82) / 2 = 9.317821063.... and add the 5cm radius to get OC = 14.3cm(1dp) =sq. root 205 as soroban said Thanks for the help! You made a mistake there; it should be 820, not 82. It’s also easier to make mistakes with your calculations using the secant method. My recommendation: use the method that Soroban and I used. :rolleyes: • Dec 21st 2007, 12:58 PM Geometor haha thanks for pointing it out :D i wrote 820 in my calculations though phew • Dec 21st 2007, 04:04 PM loui1410 Quote: Originally Posted by Soroban Hello, Geometor! I have a solution . . . hope it's acceptable. Let $x \:=\:OC.$ Draw an altitude from $O$ to $AB$; call it $OD.$ In right triangle $ODA,\:OA = 5,\:AD = 4\quad\Rightarrow\quad \cos A \:=\:\frac{4}{5}$ Law of Cosines: . $OC^2 \;=\;AC^2 + OA^2 - 2(AC)(OA)\cos A$ So we have: . $x^2 \;=\;18^2 + 5^2 - 2(18)(5)\left(\frac{4}{5}\right) \;=\;205$ Therefore: . $x\;=\;\sqrt{205}$ How do you know AD=4? • Dec 21st 2007, 04:08 PM JaneBennet Because OAB is an isosceles triangle (so the perpendicular from O to AB bisects AB). • Dec 21st 2007, 04:09 PM loui1410 Oh right, sorry :o it's 2:07 AM here lol • Dec 21st 2007, 05:25 PM Plato Quote: Originally Posted by JaneBennet My recommendation: use the method that Soroban and I used. Reading the title of the posting “Circle Theorems” why would you recommend against using a very basic theorem about circles? The approach that you advocate is not unique to theorems about circles but rather belongs to the general discussion about triangles. • Dec 21st 2007, 06:04 PM JaneBennet I didn’t realize you had to use the theorem to solve this problem. My apologies.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 26, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9321427345275879, "perplexity": 4371.221648990578}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-50/segments/1480698541864.24/warc/CC-MAIN-20161202170901-00139-ip-10-31-129-80.ec2.internal.warc.gz"}
https://huggingface.co/naver-clova-ix/donut-base-finetuned-docvqa
# Donut (base-sized model, fine-tuned on DocVQA) Donut model fine-tuned on DocVQA. It was introduced in the paper OCR-free Document Understanding Transformer by Geewok et al. and first released in this repository. Disclaimer: The team releasing Donut did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description Donut consists of a vision encoder (Swin Transformer) and a text decoder (BART). Given an image, the encoder first encodes the image into a tensor of embeddings (of shape batch_size, seq_len, hidden_size), after which the decoder autoregressively generates text, conditioned on the encoding of the encoder. ## Intended uses & limitations This model is fine-tuned on DocVQA, a document visual question answering dataset. We refer to the documentation which includes code examples. ### BibTeX entry and citation info @article{DBLP:journals/corr/abs-2111-15664, author = {Geewook Kim and Teakgyu Hong and Moonbin Yim and Jinyoung Park and Jinyeong Yim and Wonseok Hwang and Sangdoo Yun and Dongyoon Han and Seunghyun Park}, title = {Donut: Document Understanding Transformer without {OCR}}, journal = {CoRR}, volume = {abs/2111.15664}, year = {2021}, url = {https://arxiv.org/abs/2111.15664}, eprinttype = {arXiv}, eprint = {2111.15664}, timestamp = {Thu, 02 Dec 2021 10:50:44 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2111-15664.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} }
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7714160084724426, "perplexity": 26270.284418440475}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-14/segments/1679296949009.11/warc/CC-MAIN-20230329151629-20230329181629-00522.warc.gz"}
https://stats.stackexchange.com/questions/147490/sequential-probability-ratio-test-or-other-sequential-sampling-techniques-for
# Sequential Probability Ratio Test (or other Sequential Sampling techniques) for testing difference I have the results from running two algorithms and I want to be able to say that there is, say, a 95% probability that one of the sets of results is different to the other where different means A > B or B > A ("better" or "worse" in practical terms). Basically I want to try and reject the null hypothesis that both sets of results are drawn from the same distribution in the same manner as a 2 tailed T Test or Wilcoxon test (yes I know there is a slight difference in the null hypothesis between parametric and non parametric but that's not important right now). I want to do this with sequential sampling in which you run an initial, for example, 20 runs, carry out the test, and if there's no significant difference yet you run another run of each and repeat. The sequential sampling technique I can find the most information on is the Sequential Probability Ratio Test: Although if people know of an alternative way of achieving the same basic goal of minimizing number of runs to prove significance that would also be helpful. For SPRT Log L(...)/L(....) in those slides is the log likelihood ratio and my problem is I have no idea how to calculate it. It seems to be the probability of your data given the alternative hypothesis divided by the prob given the null hypothesis - but when your alternative hypothesis is just that the two data sets are different there's not enough info for you to actually calculate this probability. I'm getting the feeling that I was misled to believe SPRT was designed for this sort of hypothesis testing. So if anyone would be so kind as to either confirm that this isn't what SPRT is for or give me a concrete example for how to do this with SPRT or suggest an alternative then any info appreciated! I should point out that in this case I am assuming that you have a rough idea of the distribution of the data (e.g. normally distributed). Many thanks! • For Binomial data, formulas and a simple worked example are provided by William Q. Meeker Jr., A Conditional Sequential Test for the Equality of Two Binomial Proportions. Appl. Stat. (1981) 30, No. 2, pp 109-115 (available at JSTOR). I recently applied this to a Web "AB test" involving very long sequences; it performed as claimed and was reasonably efficient to compute. – whuber Apr 21, 2015 at 15:54
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.885973334312439, "perplexity": 256.2847708259822}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-21/segments/1652662521041.0/warc/CC-MAIN-20220518021247-20220518051247-00261.warc.gz"}
http://mismatchtea.co.uk/blog/2014/substitution-bingo
# substitution bingo 26 Jan 2014 This short post is the first of 2014; having moved house and changed school this is the first opportunity I have had to write this up. ### basic game The lesson simply starts as a game of bingo - played on a 3 by 3 grid with students picking their nine numbers from 1 to 30. This slide can be used as a template and displays the four letters that will be used for substitution. Here is a set of numbers to call out at random, with students first aiming for a line (any row, column or diagonal) and then a full house. ### the rest of the lesson At this point I tell the students that I created the numbers in September 2009 (the truth) and that I am bored of calling out the same numbers every time I do this lesson (not quite so true, I’m just happy that I’ve already done the work). I challenge students to create a new set of numbers and tell them that we will play a game at the end of the lesson with someone’s numbers. As a class we choose four new letters with values between 2 and 9. They then write out numbers 1 to 30 in three columns of ten and we find a few examples to start them off. I find that telling them we will use their numbers at the end of the lesson gives them ownership of the task and hence the motivation to be thorough and accurate. ### differentiation I’ve done this lesson many times with a large range of abilities. It can be differentiated by • only allowing letters in calculations (harder) or allowing letters and numbers • making the substitutions more difficult, e.g, $x = 7.2$, $y = 5$ • restricting the number of operations or types of operation • selecting the fourth number based on the choice for the first three numbers (e.g if the students choose 7,8,9 then selecting a 2 may be more useful to them that selecting 7).
{"extraction_info": {"found_math": true, "script_math_tex": 2, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.4596686363220215, "perplexity": 573.2100120483482}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-05/segments/1516084893629.85/warc/CC-MAIN-20180124090112-20180124110112-00404.warc.gz"}
https://pub.uni-bielefeld.de/publication/2316147
# Linear hyperfinite Lévy integrals Herzberg F (2008) Working Papers. Institute of Mathematical Economics; 404. Bielefeld: IMW. Working Paper | Published | English Author Abstract This article shows that the nonstandard approach to stochastic integration with respect to (C^2 functions of) Lévy processes is consistent with the classical theory of pathwise stochastic integration with respect to (C^2 functions of) jump-diffusions with finite-variation jump part. It is proven that internal stochastic integrals with respect to hyperfinite Lévy processes possess right standard parts, and that these standard parts coincide with the classical pathwise stochastic integrals, provided the integrator's jump part is of finite variation. If the integrator's Lévy measure is bounded from below, one can obtain a similar result for stochastic integrals with respect to C^2 functions of Lévy processes. As a by-product, this yields a short, direct nonstandard proof of the generalized Itô formula for stochastic differentials of smooth functions of Lévy processes. Keywords Publishing Year ISSN PUB-ID ### Cite this Herzberg F. Linear hyperfinite Lévy integrals. Working Papers. Institute of Mathematical Economics. Vol 404. Bielefeld: IMW; 2008. Herzberg, F. (2008). Linear hyperfinite Lévy integrals (Working Papers. Institute of Mathematical Economics, 404). Bielefeld: IMW. Herzberg, F. (2008). Linear hyperfinite Lévy integrals. Working Papers. Institute of Mathematical Economics, 404, Bielefeld: IMW. Herzberg, F., 2008. Linear hyperfinite Lévy integrals, Working Papers. Institute of Mathematical Economics, no.404, Bielefeld: IMW. F. Herzberg, Linear hyperfinite Lévy integrals, Working Papers. Institute of Mathematical Economics, vol. 404, Bielefeld: IMW, 2008. Herzberg, F.: Linear hyperfinite Lévy integrals. Working Papers. Institute of Mathematical Economics, 404. IMW, Bielefeld (2008). Herzberg, Frederik. Linear hyperfinite Lévy integrals. Bielefeld: IMW, 2008. Working Papers. Institute of Mathematical Economics. 404. All files available under the following license(s): This Item is protected by copyright and/or related rights. [...] Main File(s) File Name Access Level Open Access
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9408949017524719, "perplexity": 7646.072079304999}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-34/segments/1502886104204.40/warc/CC-MAIN-20170818005345-20170818025345-00635.warc.gz"}
https://kar.kent.ac.uk/view/publisher/Academic_Press_Ltd_Elsevier_Science_Ltd.default.html
# Browse by Publisher Group by: Item Type | Date | No Grouping Number of items: 17. Key, Alastair, Fisch, Michael, Eren, Metin I. (2018) Early stage blunting causes rapid reductions in stone tool performance. Journal of Archaeological Science, 91 . pp. 1-11. ISSN 0305-4403. (doi:10.1016/j.jas.2018.01.003) (KAR id:65824) Acquaye, Adolf, Feng, Kuishuang, Oppon, Eunice, Salhi, Said, Ibn-Mohammed, Taofeeq, Genovese, Andrea, Hubacek, Klaus (2016) Measuring the Environmental Sustainability Performance of Global Supply Chains: a Multi-Regional Input-Output analysis for Carbon, Sulphur Oxide and Water Footprints. Journal of Environmental Management, 187 . pp. 571-585. ISSN 0301-4797. (doi:10.1016/j.jenvman.2016.10.059) (KAR id:58296) Cantia, Catalin, Tunaru, Radu (2016) A factor model for joint default probabilities, pricing of CDS, index swaps and index tranches. Insurance: Mathematics and Economics, 72 . pp. 21-35. ISSN 0167-6687. (doi:10.1016/j.insmatheco.2016.10.004) (KAR id:57915) Alai, Daniel H., Landsman, Zinoviy, Sherris, Michael (2015) A multivariate Tweedie lifetime model: Censoring and truncation. Insurance: Mathematics and Economics, 64 . pp. 203-213. ISSN 0167-6687. (doi:10.1016/j.insmatheco.2015.05.011) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:48716) Key, Alastair J. M. (2014) Are bigger flakes always better? An experimental assessment of flake size variation on cutting efficiency and loading. Journal of Archaeological Science, 41 . pp. 140-146. ISSN 0305-4403. (doi:10.1016/j.jas.2013.07.033) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:50475) O'Hanley, J.R., Wright, J., Diebel, M., Fedora, M.A., Soucy, C.L. (2013) Restoring stream habitat connectivity: A proposed method for prioritizing the removal of resident fish passage barriers. Journal of Environmental Management, 125 . pp. 19-27. ISSN 0301-4797. (doi:10.1016/j.jenvman.2013.02.055) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:34016) Alai, Daniel H., Landsman, Zinoviy, Sherris, Michael (2013) Lifetime Dependence Modelling using a Truncated Multivariate Gamma Distribution. Insurance: Mathematics and Economics, 52 (3). pp. 542-549. ISSN 0167-6687. (doi:10.1016/j.insmatheco.2013.03.011) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:38167) O'Hanley, J.R. (2011) Open rivers: Barrier removal planning and the restoration of free-flowing rivers. Journal of Environmental Management, 92 (12). pp. 3112-3120. ISSN 0301-4797. (doi:10.1016/j.jenvman.2011.07.027) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:28205) Key, Alastair J. M., Lycett, Stephen J. (2011) Technology based evolution? A biometric test of the effects of handsize versus tool form on efficiency in an experimental cutting task. Journal of Archaeological Science, 38 (7). pp. 1663-1670. ISSN 0305-4403. (doi:10.1016/j.jas.2011.02.032) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:50473) Hardiman, Nigel, Burgin, Shelley (2010) Recreational impacts on the fauna of Australian coastal marine ecosystems. Journal of Environmental Management, 91 (11). pp. 2096-2108. ISSN 0301-4797. (doi:10.1016/j.jenvman.2010.06.012) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:33067) Albrecher, Hansjoerg, Constantinescu, Corina, Pirsic, Gottlieb, Regensburger, Georg, Rosenkranz, Markus (2010) An algebraic operator approach to the analysis of Gerber-Shiu functions. Insurance: Mathematics and Economics, 46 (1). pp. 42-51. ISSN 0167-6687. (doi:10.1016/j.insmatheco.2009.02.002) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:29601) Chadwick, Alan V., Newport, Robert J., Pickup, David M., Wetherall, Karen, Moss, Rob M., Jones, M.A., Goatham, S.W., Skinner, T. (2008) Sulfur and iron speciation in recently recovered timbers of the Mary Rose revealed via X-ray absorption spectroscopy. Journal of Archaeological Science, 35 (5). pp. 1317-1328. ISSN 0305-4403. (doi:10.1016/j.jas.2007.09.007) (Access to this publication is currently restricted. You may be able to access a copy if URLs are provided) (KAR id:8531) Nanere, Marthin, Fraser, Iain M, Quazi, Ali M, d'Souza, Clare (2007) Environmentally adjusted productivity measurement: An Australian case study. Journal of Environmental Management, 85 (2). pp. 350-362. ISSN 0301-4797. (doi:10.1016/j.jenvman.2006.10.004) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:2659) Dobcsanyi, P., Preece, Donald A., Soicher, L.H. (2007) On balanced incomplete-block designs with repeated blocks. European Journal of Combinatorics, 28 (7). pp. 1955-1970. ISSN 0195-6698. (doi:10.1016/j.ejc.2006.08.007) (Access to this publication is currently restricted. You may be able to access a copy if URLs are provided) (KAR id:2070) Lemmens, Bas, Scheutzow, Michael, Sparrow, Colin (2007) Transitive actions of finite abelian groups of sup-norm isometries. European Journal of Combinatorics, 28 (4). pp. 1163-1179. ISSN 0195-6698. (doi:10.1016/j.ejc.2006.02.003) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:28442) Shaw, Peter J., Lyas, Joanne K., Maynard, Jon, Van Vugt, Mark (2007) On the relationship between set-out rates and participation ratios as a tool for enhancement of kerbside household waste recycling. Journal of Environmental Management, 83 (1). pp. 34-43. ISSN 0301-4797. (doi:10.1016/j.jenvman.2006.01.012) (The full text of this publication is not currently available from this repository. You may be able to access a copy if URLs are provided) (KAR id:2306) MacMillan, Douglas C. (2004) Tradeable hunting obligations - a new approach to regulating red deer numbers in the Scottish Highlands? Journal of Environmental Management, 71 (3). pp. 261-270. ISSN 0301-4797. (doi:10.1016/j.jenvman.2004.03.005) (Access to this publication is currently restricted. You may be able to access a copy if URLs are provided) (KAR id:8551) This list was generated on Thu Feb 2 02:03:58 2023 GMT.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8422420024871826, "perplexity": 15123.429133427207}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-06/segments/1674764499967.46/warc/CC-MAIN-20230202070522-20230202100522-00472.warc.gz"}
http://mathhelpforum.com/differential-geometry/125783-complex-integrals.html
# Math Help - complex integrals 1. ## complex integrals hello, I wonder if someone can help me solve: 1.what are the singularic points of f(z) 2.how to solve the integral thank's Attached Thumbnails 2. Originally Posted by avazim hello, I wonder if someone can help me solve: 1.what are the singularic points of f(z) 2.how to solve the integral thank's z = 0 is a pole of order 3. 3. i still not so understand why it is a pol in order 3? and after that to sole the intgeral i just need to find 2*pi*i*RES(f,0); 0 as order 3 ? 4. Originally Posted by avazim i still not so understand why it is a pol in order 3? and after that to sole the intgeral i just need to find 2*pi*i*RES(f,0); 0 as order 3 ? To see why z = 0 is a pole of order 3, substitute the Maclaurin series for sin z and simplify ....
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8048449754714966, "perplexity": 1807.812230862885}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-32/segments/1438042989331.34/warc/CC-MAIN-20150728002309-00136-ip-10-236-191-2.ec2.internal.warc.gz"}
http://math.stackexchange.com/questions/31749/explanation-of-notation-a-probability-space-equipped-with-measure-p/31754
# Explanation of notation: a probability space equipped with measure P( . ) In a lecture I attended today, the professor made an off-hand comment of: "Suppose we have the set $S_n$ of permutations of $\{1, 2, ..., n\}$, which we can think of as a probability space equipped with measure $P( . )$." I'm not sure what this means - does it mean we have a probability of picking a random permutation with some probability, or something different...? - I assume it means that the probability of picking a given permutation is $\frac{1}{n!}$. – Qiaochu Yuan Apr 8 '11 at 15:54 Any finite set $S$ can be equipped with a natural probability measure $P\$ by setting, for any subset $A\subseteq S$, $$P(A)={\mbox{number of elements in }A\over \mbox{number of elements in }S}.$$ This corresponds to selecting an item from $S$ uniformly or at random. I suspect that your professor was thinking of applying this idea to the set of permutations $S_n$. @Undercover Mathematician Yes, some people use a dot as a place holder for a variable. Occasionally you see $f(\cdot)$ instead of $f(x)$, for instance. – Byron Schmuland Apr 9 '11 at 13:37
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9862378239631653, "perplexity": 258.3140440315493}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-18/segments/1461860110356.23/warc/CC-MAIN-20160428161510-00188-ip-10-239-7-51.ec2.internal.warc.gz"}
http://www.maa.org/publications/periodicals/convergence/problems-another-time?page=9&device=mobile
# Problems from Another Time Individual problems from throughout mathematics history, as well as articles that include problem sets for students. I found a stone but did not weigh it; after I added to it 1/7 of its weight and then 1/11 of this new weight, I weighed the total at 1 mina. What was the weight of the stone? A merchant bought 50,000 pounds of pepper in Portugal for 10,000 scudi and paid a tax of 500 scudi. Find a number having remainder 29 when divided by 30 and remainder 3 when divided by 4. A man died leaving 3 sons, to whom he bequeathed his estate in the following manner: to the eldest he gave 184 dollars; to the second 155 dollars and to the third 96 dollars; I owe a man the following notes: one of $800 due May 16; one of$660 due on July 1; one of $940 due Sept. 29. He wishes to exchange them for two notes of$1200 each and wants one to fall due June 1. When should the other be due? The authors recount the 'great tale' of Napier's and Burgi's parallel development of logarithms and urge you to use it in class. An oblong garden is a half yard longer than it is wide and consists entirely of a gravel walk... A certain man had in his trade four weights with which he could weigh integral pounds from one up to 40. How many pounds was each weight? Given a semicircle, Prove that if O is the circle's center, DO=OE. Discussion of 15th century French manuscript, with translation of its problems, including one with negative solutions
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.47649261355400085, "perplexity": 2425.278804488541}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-52/segments/1418802770815.118/warc/CC-MAIN-20141217075250-00113-ip-10-231-17-201.ec2.internal.warc.gz"}
https://training.digitalearthafrica.org/en/latest/session_5/02_vegetation_exercise.html
# Exercise: Vegetation change detection¶ ## Overview¶ In this exercise, we will create a notebook to detect vegetation change. To compare change, we need to have data from two different times: an older dataset, and a newer dataset. The notebook will include the following steps: • Calculate a vegetation index for the loaded data • Split the vegetation index data into half, based on when the data was collected — an older half and a newer half • Compute the mean composite for each half; and • Compare the older and newer averages to check for vegetation change. At the conclusion of this exercise, you will have performed a vegetation analysis which can be used to report on changes in the selected area. ## Set up notebook¶ In your Training folder, create a new Python 3 notebook. Name it Vegetation_exercise.ipynb. For more instructions on creating a new notebook, see the instructions from Session 2. In the first cell, type the following code and then run the cell to import necessary Python dependencies. import matplotlib.pyplot as plt %matplotlib inline import datacube from deafrica_tools.plotting import display_map from deafrica_tools.bandindices import calculate_indices Note As of June 2021, the deafrica_tools package has replaced the deprecated sys.path.append('../Scripts') file import. For more information on deafrica_tools, visit the DE Africa Tools module documentation. In this exercise, we import one new function, calculate_indices. Instead of calculating band indices such as NDVI by defining a formula, we can call upon preset index calculations in the Sandbox. calculate_indices contains definitions for over a dozen different indices, from NDVI to the Bare Soil Index (BSI) to Tasselled Cap Wetness (TCW), and can apply them to your dataset. Using this function to select the index we want might seem like a lot of effort. However, there are some benefits to using the calculate_indices function, as we will see in this exercise. • Reduce chances of error in typing out the formula, as they are already defined in the calculate_indices script — this is great for more complicated formulae • Compare different index results without manually defining many different formulae • Reduce the number of definitions you have to type — for instance, there is no need for red= or nir= as we used in Session 4 We will use calculate_indices after we load the dataset. ### Connect to the datacube¶ Enter the following code and run the cell to create our dc object, which provides access to the datacube. dc = datacube.Datacube(app="Vegetation_exercise") ### Select area of interest¶ We need to select an area of interest. Some areas to check for vegetation changes include mining sites, where there might be devegetation, or crops, where seasonal changes in vegetation greenness occur. Previously, we have provided a longitude range and a latitude range. However, it is more common to define a central point, and provide a buffer around it. We previously used the x=(lon1, lon2), y=(lat1, lat2) method (left) to define our area of interest. In this exercise, we will use the buffer zone method (right). The advantage is that you can define one buffer to use in all four directions, and it is much easier to explore different areas by changing the central point and/or the buffer width. We will be selecting an area around a centre point. Enter the following code and run the cell to select an area and time range of interest. The parameters are: • latitude: The latitude at the centre of your area of interest. • longitude: The longitude at the centre of your area of interest. • buffer: The number of degrees to load around the central latitude and longitude. • time: The date range to analyse. For reasonable results, the range should span at least two years to prevent detecting seasonal changes. • time_baseline: The date at which to split the total dataset into two non-overlapping samples. For this exercise, we choose a date halfway in our time range. Its value here, '2015-12-31', is halfway between '2013-01-01' and '2018-12-31' - the time range in time. So our two time periods will be 2013 to 2015, and 2016 to 2018. In the next cell, enter the following code, and then run it to select an area. # Define the area of interest latitude = 0.02 longitude = 35.425 buffer = 0.1 # Combine central lat,lon with buffer to get area of interest lat_range = (latitude-buffer, latitude+buffer) lon_range = (longitude-buffer, longitude+buffer) # Set the range of dates for the complete sample time = ('2013-01-01', '2018-12-01') # Set the date to separate the data into two samples for comparison time_baseline = '2015-12-31' In the next cell, enter the following code, and then run it to show the area on a map. Since we have defined our area using the variables lon_range and lat_range, we can use those instead of typing out (latitude-buffer, latitude+buffer) and (longitude-buffer, longitude+buffer) again. display_map(x=lon_range, y=lat_range) Since we want to look at a longer time range, Landsat 8 data is suitable. In the new cell below, enter the following code, and then run it to load Landsat 8 data. Notice lat_range, lon_range and time were all defined in the previous cell, so we can use them as variables here. Note If you are unsure where we defined lat_range, lon_range and time, scroll up to the previous cell and look for the lines starting with lat_range = ..., lon_range = ... and time = .... landsat_ds = load_ard( dc=dc, products=["ls8_sr"], lat=lat_range, lon=lon_range, time=time, output_crs="EPSG:6933", resolution=(-30, 30), align=(15, 15), group_by='solar_day', measurements=['nir', 'red', 'blue'], min_gooddata=0.7) The output from running the load_ard() function should include a statement that says Loading 46 time steps. Note As of June 2021, DE Africa Landsat data has been upgraded to Collection 2. Datacube names have been updated to ls5_sr, ls7_sr and ls8_sr. Deprecated naming conventions such as ls8_usgs_sr_scene will no longer work. For more information on Landsat Collection 2, visit the DE Africa Landsat documentation. ## Calculate indices¶ Now we need to calculate a vegetation index. Until now, we have used NDVI, which uses the ratio of the red and near-infrared (NIR) bands to identify live green vegetation. The formula is: \begin{aligned} \text{NDVI} = \frac{\text{NIR} - \text{Red}}{\text{NIR} + \text{Red}} \end{aligned} This time we will use the Enhanced Vegetation Index (EVI). EVI uses the red, near-infrared (NIR) and blue bands to identify vegetation, and is particularly sensitive to high biomass regions, which is why it can be superior to NDVI. The formula for EVI is more complicated than NDVI as it uses three different bands and some empirical scaling constants. \begin{aligned} \text{EVI} = \frac{2.5 \times (\text{NIR} - \text{Red})}{\text{NIR} + 6 \times \text{Red} - 7.5 \times \text{Blue} + 1} \end{aligned} Instead of typing out that whole formula, we can use the calculate_indices function to calculate EVI. calculate_indices requires three inputs: • The dataset name, e.g. landsat_ds • The name of the index to calculate, e.g. index='EVI' • The Landsat collection number, e.g. collection='c2' c2 stands for ‘Collection 2’, which is the currently-available Landsat collection of data as named by its publishers, US Geological Survey. In the next cell, enter the following code, and then run it to calculate the EVI vegetation index for this data. landsat_ds = calculate_indices(landsat_ds, index='EVI', collection='c2') This adds a variable called EVI to our landsat_ds dataset. ## Detect changes¶ We want to determine what changed in vegetation between the older and newer halves of the data. First, we will split the vegetation index data into the older half and newer half. Data that was collected in the first half of our time range (2013 to 2015) will go in the older half, and data collected in the second half (2016 to 2018) will be in the newer half. The split is done using sel() and slice(). • sel() stands for ‘selection’ and tells us we are taking a selection of the dataset. We have to define which coordinate we are selecting by. In this case, we will use the time coordinate. • slice() specifies which part of the coordinate we are taking. In this case, we want to slice time between 2013 – 2015, and then again 2016 – 2018. Recall we named the halfway point time_baseline. We use sel() and slice() to create two new datasets: • baseline_sample: EVI from 2013 to 2015 • postbaseline_sample: EVI from 2016 to 2018 To do this, enter the following code in the next cell. baseline_sample = landsat_ds.EVI.sel(time=slice(time[0], time_baseline)) postbaseline_sample = landsat_ds.EVI.sel(time=slice(time_baseline, time[1])) Here, using time[0] will give us the first date that we stored in the time variable ('2013-01-01'), and time[1] will give us the second date we stored in the time variable ('2018-12-0'). By using the time variable directly, we ensure that the code will work if those dates are changed and the notebook is rerun. Note Carefully check all your ., ,, () pairs, and ' ' pairs in the above code to avoid generating errors. ### Detect per-pixel changes¶ Now we have our ‘before’ and ‘after’ datasets, we can compare them for change in EVI. To do this, we will form a composite for each half of the dataset. Then we will calculate the differences in the average EVI for each pixel. The composite method we are using is the mean (average). In the next cell, enter the following code, and then run it to create mean composites for the two time periods. baseline_composite = baseline_sample.mean('time') postbaseline_composite = postbaseline_sample.mean('time') Now we need to subtract the first time period EVI mean composite, baseline_composite, from the second time period EVI mean composite, postbaseline_composite. This will determine the change in EVI between the two time periods. In the next cell, enter the following code, and then run it to determine the change in the vegetation index. diff_mean_composites = postbaseline_composite - baseline_composite In the next cell, enter the following code, and then run it to show the difference between the mean composites for the time periods. This will allow us to see where the vegetation index increased or decreased and by how much. plt.figure(figsize=(9, 8)) diff_mean_composites.plot.imshow(cmap='RdBu') plt.title("Mean Composite Difference (Older to Newer)") plt.show() Your plot should look like the image below. ### Interpreting the plot¶ In the code above, the colour for the plot is set using the cmap='RdBu' setting in the diff_mean_composites.plot.imshow() function call. Here, RdBu corresponds to a Red-Blue colour-map, with lower values appearing as red, and higher values appearing as blue. The areas in blue (positive change) correspond to vegetation increase as measured by EVI. There is more vegetation in these areas in the 2016-2018 sample than the 2013-2015 sample. The areas in red (negative change) correspond to vegetation decrease as measured by EVI. There is less vegetation in these areas in the 2016-2018 sample than the 2013-2015 sample. What conclusions can you draw about the changes in the landscape from this plot? What other information might you need to help you assess it? ## Conclusion¶ Congratulations! You have made your own vegetation change detection notebook. It is comparable to the existing Sandbox vegetation change detection notebook. The existing notebook may look daunting, but it includes many of the steps that you have just done! In addition to setting a ‘before’ and ‘after’ scene, the existing notebook: • Plots some true-colour maps (RGB) to inspect the area of interest • Quantifies change using statistical tests (Welch’s t-test for areas of unequal variance) • Identifies statistically significant change A difference plot, like the one we made above, is a good way to start. You can then decide if you need more complicated analysis or not. You now understand how to structure a complete case study — you can calculate a relevant band index and identify meaningful changes in that index over time. ## Optional activity¶ If you’re curious about how the existing case study works, you can open and run it in the Sandbox: 1. From the main Sandbox folder, open the Real_world_examples folder 2. Double-click the Vegetation_change_detection.ipynb notebook to open it The notebook has already been run, so you can read through it step by step. However, you may find it valuable to clear the outputs and run each cell step by step to see how it works. You can do this by clicking Kernel -> Restart Kernel and Clear All Outputs. When asked whether you want to restart the kernel, click Restart. There are many similarities between the notebook you built in this session, and the existing Sandbox notebook. Make a note of what is similar and what is different, and spend some time inspecting the different code. If you have any questions about how the existing notebook works, please ask the instructors during a Live Session.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.34174463152885437, "perplexity": 2139.893688690515}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-25/segments/1623487635724.52/warc/CC-MAIN-20210618043356-20210618073356-00607.warc.gz"}
https://matplotlib.org/devdocs/gallery/text_labels_and_annotations/angle_annotation.html
# Scale invariant angle label# This example shows how to create a scale invariant angle annotation. It is often useful to mark angles between lines or inside shapes with a circular arc. While Matplotlib provides an Arc, an inherent problem when directly using it for such purposes is that an arc being circular in data space is not necessarily circular in display space. Also, the arc's radius is often best defined in a coordinate system which is independent of the actual data coordinates - at least if you want to be able to freely zoom into your plot without the annotation growing to infinity. This calls for a solution where the arc's center is defined in data space, but its radius in a physical unit like points or pixels, or as a ratio of the Axes dimension. The following AngleAnnotation class provides such solution. The example below serves two purposes: • It provides a ready-to-use solution for the problem of easily drawing angles in graphs. • It shows how to subclass a Matplotlib artist to enhance its functionality, as well as giving a hands-on example on how to use Matplotlib's transform system. If mainly interested in the former, you may copy the below class and jump to the Usage section. ## AngleAnnotation class# The essential idea here is to subclass Arc and set its transform to the IdentityTransform, making the parameters of the arc defined in pixel space. We then override the Arc's attributes _center, theta1, theta2, width and height and make them properties, coupling to internal methods that calculate the respective parameters each time the attribute is accessed and thereby ensuring that the arc in pixel space stays synchronized with the input points and size. For example, each time the arc's drawing method would query its _center attribute, instead of receiving the same number all over again, it will instead receive the result of the get_center_in_pixels method we defined in the subclass. This method transforms the center in data coordinates to pixels via the Axes transform ax.transData. The size and the angles are calculated in a similar fashion, such that the arc changes its shape automatically when e.g. zooming or panning interactively. The functionality of this class allows to annotate the arc with a text. This text is a Annotation stored in an attribute text. Since the arc's position and radius are defined only at draw time, we need to update the text's position accordingly. This is done by reimplementing the Arc's draw() method to let it call an updating method for the text. The arc and the text will be added to the provided Axes at instantiation: it is hence not strictly necessary to keep a reference to it. import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Arc from matplotlib.transforms import IdentityTransform, TransformedBbox, Bbox class AngleAnnotation(Arc): """ Draws an arc between two vectors which appears circular in display space. """ def __init__(self, xy, p1, p2, size=75, unit="points", ax=None, text="", textposition="inside", text_kw=None, **kwargs): """ Parameters ---------- xy, p1, p2 : tuple or array of two floats Center position and two points. Angle annotation is drawn between the two vectors connecting *p1* and *p2* with *xy*, respectively. Units are data coordinates. size : float Diameter of the angle annotation in units specified by *unit*. unit : str One of the following strings to specify the unit of *size*: * "pixels": pixels * "points": points, use points instead of pixels to not have a dependence on the DPI * "axes width", "axes height": relative units of Axes width, height * "axes min", "axes max": minimum or maximum of relative Axes width, height ax : matplotlib.axes.Axes The Axes to add the angle annotation to. text : str The text to mark the angle with. textposition : {"inside", "outside", "edge"} Whether to show the text in- or outside the arc. "edge" can be used for custom positions anchored at the arc's edge. text_kw : dict Dictionary of arguments passed to the Annotation. **kwargs Further parameters are passed to matplotlib.patches.Arc. Use this to specify, color, linewidth etc. of the arc. """ self.ax = ax or plt.gca() self._xydata = xy # in data coordinates self.vec1 = p1 self.vec2 = p2 self.size = size self.unit = unit self.textposition = textposition super().__init__(self._xydata, size, size, angle=0.0, theta1=self.theta1, theta2=self.theta2, **kwargs) self.set_transform(IdentityTransform()) self.kw = dict(ha="center", va="center", xycoords=IdentityTransform(), xytext=(0, 0), textcoords="offset points", annotation_clip=True) self.kw.update(text_kw or {}) self.text = ax.annotate(text, xy=self._center, **self.kw) def get_size(self): factor = 1. if self.unit == "points": factor = self.ax.figure.dpi / 72. elif self.unit[:4] == "axes": b = TransformedBbox(Bbox.unit(), self.ax.transAxes) dic = {"max": max(b.width, b.height), "min": min(b.width, b.height), "width": b.width, "height": b.height} factor = dic[self.unit[5:]] return self.size * factor def set_size(self, size): self.size = size def get_center_in_pixels(self): """return center in pixels""" return self.ax.transData.transform(self._xydata) def set_center(self, xy): """set center in data coordinates""" self._xydata = xy def get_theta(self, vec): vec_in_pixels = self.ax.transData.transform(vec) - self._center def get_theta1(self): return self.get_theta(self.vec1) def get_theta2(self): return self.get_theta(self.vec2) def set_theta(self, angle): pass # Redefine attributes of the Arc to always give values in pixel space _center = property(get_center_in_pixels, set_center) theta1 = property(get_theta1, set_theta) theta2 = property(get_theta2, set_theta) width = property(get_size, set_size) height = property(get_size, set_size) # The following two methods are needed to update the text position. def draw(self, renderer): self.update_text() super().draw(renderer) def update_text(self): c = self._center s = self.get_size() angle_span = (self.theta2 - self.theta1) % 360 angle = np.deg2rad(self.theta1 + angle_span / 2) r = s / 2 if self.textposition == "inside": r = s / np.interp(angle_span, [60, 90, 135, 180], [3.3, 3.5, 3.8, 4]) self.text.xy = c + r * np.array([np.cos(angle), np.sin(angle)]) if self.textposition == "outside": def R90(a, r, w, h): if a < np.arctan(h/2/(r+w/2)): return np.sqrt((r+w/2)**2 + (np.tan(a)*(r+w/2))**2) else: c = np.sqrt((w/2)**2+(h/2)**2) T = np.arcsin(c * np.cos(np.pi/2 - a + np.arcsin(h/2/c))/r) xy = r * np.array([np.cos(a + T), np.sin(a + T)]) xy += np.array([w/2, h/2]) return np.sqrt(np.sum(xy**2)) def R(a, r, w, h): aa = (a % (np.pi/4))*((a % (np.pi/2)) <= np.pi/4) + \ (np.pi/4 - (a % (np.pi/4)))*((a % (np.pi/2)) >= np.pi/4) return R90(aa, r, *[w, h][::int(np.sign(np.cos(2*a)))]) bbox = self.text.get_window_extent() X = R(angle, r, bbox.width, bbox.height) trans = self.ax.figure.dpi_scale_trans.inverted() offs = trans.transform(((X-s/2), 0))[0] * 72 self.text.set_position([offs*np.cos(angle), offs*np.sin(angle)]) ## Usage# Required arguments to AngleAnnotation are the center of the arc, xy, and two points, such that the arc spans between the two vectors connecting p1 and p2 with xy, respectively. Those are given in data coordinates. Further arguments are the size of the arc and its unit. Additionally, a text can be specified, that will be drawn either in- or outside of the arc, according to the value of textposition. Usage of those arguments is shown below. fig, ax = plt.subplots() fig.canvas.draw() # Need to draw the figure to define renderer ax.set_title("AngleLabel example") # Plot two crossing lines and label each angle between them with the above # AngleAnnotation tool. center = (4.5, 650) p1 = [(2.5, 710), (6.0, 605)] p2 = [(3.0, 275), (5.5, 900)] line1, = ax.plot(*zip(*p1)) line2, = ax.plot(*zip(*p2)) point, = ax.plot(*center, marker="o") am1 = AngleAnnotation(center, p1[1], p2[1], ax=ax, size=75, text=r"$\alpha$") am2 = AngleAnnotation(center, p2[1], p1[0], ax=ax, size=35, text=r"$\beta$") am3 = AngleAnnotation(center, p1[0], p2[0], ax=ax, size=75, text=r"$\gamma$") am4 = AngleAnnotation(center, p2[0], p1[1], ax=ax, size=35, text=r"$\theta$") # Showcase some styling options for the angle arc, as well as the text. p = [(6.0, 400), (5.3, 410), (5.6, 300)] ax.plot(*zip(*p)) am5 = AngleAnnotation(p[1], p[0], p[2], ax=ax, size=40, text=r"$\Phi$", linestyle="--", color="gray", textposition="outside", text_kw=dict(fontsize=16, color="gray")) ## AngleLabel options# The textposition and unit keyword arguments may be used to modify the location of the text label, as shown below: # Helper function to draw angle easily. def plot_angle(ax, pos, angle, length=0.95, acol="C0", **kwargs): xy = np.c_[[length, 0], [0, 0], vec2*length].T + np.array(pos) ax.plot(*xy.T, color=acol) return AngleAnnotation(pos, xy[0], xy[2], ax=ax, **kwargs) fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True) fig.suptitle("AngleLabel keyword arguments") fig.canvas.draw() # Need to draw the figure to define renderer # Showcase different text positions. ax1.margins(y=0.4) ax1.set_title("textposition") kw = dict(size=75, unit="points", text=r"$60°$") am6 = plot_angle(ax1, (2.0, 0), 60, textposition="inside", **kw) am7 = plot_angle(ax1, (3.5, 0), 60, textposition="outside", **kw) am8 = plot_angle(ax1, (5.0, 0), 60, textposition="edge", text_kw=dict(bbox=dict(boxstyle="round", fc="w")), **kw) am9 = plot_angle(ax1, (6.5, 0), 60, textposition="edge", text_kw=dict(xytext=(30, 20), arrowprops=dict(arrowstyle="->", for x, text in zip([2.0, 3.5, 5.0, 6.5], ['"inside"', '"outside"', '"edge"', '"edge", custom arrow']): ax1.annotate(text, xy=(x, 0), xycoords=ax1.get_xaxis_transform(), bbox=dict(boxstyle="round", fc="w"), ha="left", fontsize=8, annotation_clip=True) # Showcase different size units. The effect of this can best be observed # by interactively changing the figure size ax2.margins(y=0.4) ax2.set_title("unit") kw = dict(text=r"$60°$", textposition="outside") am10 = plot_angle(ax2, (2.0, 0), 60, size=50, unit="pixels", **kw) am11 = plot_angle(ax2, (3.5, 0), 60, size=50, unit="points", **kw) am12 = plot_angle(ax2, (5.0, 0), 60, size=0.25, unit="axes min", **kw) am13 = plot_angle(ax2, (6.5, 0), 60, size=0.25, unit="axes max", **kw) for x, text in zip([2.0, 3.5, 5.0, 6.5], ['"pixels"', '"points"', '"axes min"', '"axes max"']): ax2.annotate(text, xy=(x, 0), xycoords=ax2.get_xaxis_transform(), bbox=dict(boxstyle="round", fc="w"), ha="left", fontsize=8, annotation_clip=True) plt.show() References The use of the following functions, methods, classes and modules is shown in this example: Total running time of the script: ( 0 minutes 1.456 seconds) Gallery generated by Sphinx-Gallery
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.48238247632980347, "perplexity": 13640.339820759173}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-06/segments/1674764499888.62/warc/CC-MAIN-20230131154832-20230131184832-00188.warc.gz"}
https://koreauniv.pure.elsevier.com/en/publications/search-for-bsupsup-esupsup%CE%BDsubesub-and-bsupsup-%CE%BCsupsup%CE%BDsub%CE%BCsub-de
# Search for B+ → e+νe and B+ → μ+νμ decays using hadronic tagging Belle Collaboration Research output: Contribution to journalArticlepeer-review 11 Citations (Scopus) ## Abstract We present a search for the rare leptonic decays B+ → e+νe and B+ → μ+νμ, using the full Υ(4S) data sample of 772 × 106 B B¯ pairs collected with the Belle detector at the KEKB asymmetric-energy e+e- collider. One of the B mesons from the Υ(4S) → B B¯ decay is fully reconstructed in a hadronic mode, while the recoiling side is analyzed for the signal decay. We find no evidence of a signal in any of the decay modes. Upper limits of the corresponding branching fractions are determined as B(B+ → e+νe) < 3.5 × 10-6 and B(B+ → μ+νμ) < 2.7 × 10-6 at 90% confidence level. Original language English 052016 Physical Review D - Particles, Fields, Gravitation and Cosmology 91 5 https://doi.org/10.1103/PhysRevD.91.052016 Published - 2015 Mar 19 ## ASJC Scopus subject areas • Nuclear and High Energy Physics • Physics and Astronomy (miscellaneous)
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9273697733879089, "perplexity": 10961.706045388679}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-21/segments/1620243992159.64/warc/CC-MAIN-20210517084550-20210517114550-00461.warc.gz"}
http://math.stackexchange.com/questions/116881/evaluate-sum-n-1-infty-ln-left-frac7n17n-right
# Evaluate $\sum_{n=1}^{\infty }\ln \left (\frac{7^n+1}{7^n} \right )$ Evaluate $\sum_{n=1}^{\infty }\ln \left (\frac{7^n+1}{7^n} \right )$ . Found this question on Art of Problem Solving. It was stuck in the "solved" section, but I couldn't find a solution, and I myself am stumped. Apparently it could also be simplified to $\sum_{k=0}^{\infty }\frac{\left ( -1 \right )^{k+1}}{k\left ( 7^{k}-1 \right )}$ , but I don't follow this either. - AOPS say $$\sum_{n=1}^{\infty} \ln \left( \frac{7^n+1}{7^n} \right)$$ – user17762 Mar 6 '12 at 0:01 @SivaramAmbikasaran: I see. Very strange of OP to miss that! – Aryabhata Mar 6 '12 at 0:10 You'll want $k$ to go from $1$ to $\infty$, not from $0$. – Robert Israel Mar 6 '12 at 2:51 @SivaramAmbikasaran I'm new to LaTeX. Give me a bit of leniency. :P – badreferences Mar 6 '12 at 17:54 ## 2 Answers This is $$\log \prod_{n=1}^\infty (1 + 7^{-n}) = \log \phi(1/49) - \log \phi(1/7)$$ where $$\phi(q) = \prod_{n=1}^\infty (1 - q^n)$$ is the Euler function. I doubt that you can get a much simpler "closed form" than that. - Using the inequality $$\log (1+x) \ge x - \frac{x^2}{2}$$ we see that the series diverges: $$\log\left(\frac{7n + 1}{7n}\right) \ge \frac{1}{7n} - \frac{1}{98n^2}$$ EDIT: If the series is $\sum_{n=1}^{\infty}\log\left(\frac{7^n + 1}{7^n}\right)$ (as pointed out in the comments), then, using the Taylor series expansion of $\log(1+x)$: $$\log(1+x) = x - \frac{x^2}{2} + \frac{x^3}{3} - \dots$$ and the geometric series sum $$\sum_{n=1}^{\infty} r^n = \frac{r}{1-r}$$ we get the sum which you state. (Of course, that would need some justification, but I believe it is doable). -
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8415527939796448, "perplexity": 527.0016659947404}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": false}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-30/segments/1469257823133.4/warc/CC-MAIN-20160723071023-00003-ip-10-185-27-174.ec2.internal.warc.gz"}
https://eprints.lancs.ac.uk/id/eprint/63441/
Q-ball formation in the wake of Hubble-induced radiative corrections Allahverdi, Rouzbeh and Mazumdar, Anupam and Ozpineci, Altug (2002) Q-ball formation in the wake of Hubble-induced radiative corrections. Physical Review D, 65 (12). ISSN 1550-7998 Preview PDF PhysRevD.65.125003.pdf - Published Version Abstract We discuss some interesting aspects of the $\rm Q$-ball formation during the early oscillations of the flat directions. These oscillations are triggered by the running of soft $({\rm mass})^2$ stemming from the nonzero energy density of the Universe. However, this is quite different from the standard $\rm Q$-ball formation. The running in presence of gauge and Yukawa couplings becomes strong if $m_{1/2}/m_0$ is sufficiently large. Moreover, the $\rm Q$-balls which are formed during the early oscillations constantly evolve, due to the redshift of the Hubble-induced soft mass, until the low-energy supersymmtery breaking becomes dominant. For smaller $m_{1/2}/m_0$, $\rm Q$-balls are not formed during early oscillations because of the shrinking of the instability band due to the Hubble expansion. In this case the $\rm Q$-balls are formed only at the weak scale, but typically carry smaller charges, as a result of their amplitude redshift. Therefore, the Hubble-induced corrections to the flat directions give rise to a successful $\rm Q$-ball cosmology. Item Type: Journal Article Journal or Publication Title: Physical Review D
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8407973647117615, "perplexity": 1858.213251685402}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-45/segments/1603107866404.1/warc/CC-MAIN-20201019203523-20201019233523-00499.warc.gz"}
http://www.koreascience.or.kr/article/ArticleFullRecord.jsp?cn=PMGHBJ_2009_v42n4_157
Photolytic Characteristics of Ni-TiO2 Composite Coating from Electroless Plating Title & Authors Photolytic Characteristics of Ni-TiO2 Composite Coating from Electroless Plating Choi, Chul-Young; Han, Gil-Soo; Jo, Il-Guk; Kim, Young-Seok; Kim, Yang-Do; Abstract Many fundamental studies have been carried out regarding waste water and hazardous gas treatments technologies using the photolysis effect of $\small{TiO_2}$. However, a permanent use of $\small{TiO_2}$ particles immobilized using organic or organic-inorganic binders is impossible. In this study, Ni-$\small{TiO_2}$ composite coating was produced by electroless plating to trap $\small{TiO_2}$ particles in the Ni coating layer. The electroless plating was performed in the bath solutions with three different concentrations of $\small{TiO_2}$ particles : 10 g/l, 20 g/l, and 40 g/l. The surface and photolytic characteristics of the coating layer was investigated by the use of SEM, a scratch tester, and an UV-Visible spectrophotometer. The results showed that the amounts of immobilized $\small{TiO_2}$ particles and the photolytic rate of the coating increased with the initial content of $\small{TiO_2}$ particles in the electroless bath. In addition, the photolytic rate of the Ni-$\small{TiO_2}$ composite coating was remarkably promoted by etching process in 10% HCl solution. Keywords Electroless plating;Photolysis;Ni-$\small{TiO_2}$;Composite coating; Language Korean Cited by 1. Construction of an evaluation system for selecting an appropriate waterproofing method for the roof of a building, Canadian Journal of Civil Engineering, 2012, 39, 12, 1264 References 1. K. N. Kim, D. H. Kim, T. K. Lee, Y. C. Kim, Y. G. Shul, Appl. Chem. Eng., 1 (1995) 681 2. H. S. Shim, J. L. Lim, J. KSEE, 17 (1995) 1079 3. J. J. Shah, H. B. Singh, Environ. Sci. Technol., 22 (1987) 843 4. K. D. Liu, Enaluation of VOC Managemen and Control. Industrial Pollution Prevention Control, 15 (1993) 48 5. H. D. Chun, J. KSEE, 16 (1994) 809 6. J. J. Shah, H. B. Singh, Environ. Sci. Technol., 22 (1988) 1381 7. 안복엽, 석상일, 서태수, 이동석, 김종부, 대한환경공학회지, 23(7) (2001) 1205 8. 주현규, 전명석, 이태규, 대한환경공학회지, 21 (1999) 6 9. D. H. Kim, T. K. Lee, K. B. Kim, S. W. Lee, Korean Journal of Materials Reaserch, 6 (1996) 3 10. J. P. Celis, J. R. Roos, C. Buelens, J. Electrochem. Soc., 134 (1987) 142
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 9, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.5531781911849976, "perplexity": 22662.71631450434}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-34/segments/1534221209585.18/warc/CC-MAIN-20180814205439-20180814225439-00511.warc.gz"}
https://physicscatalyst.com/magnetism/diamagnetism.php
# Diamagnetism ## Diamagnetism • Diamagnetic effects occurs in materials where magnetic field due to electronic motions i.e orbiting and spinning completely cancels each other • Thus for diamagnetic materials intrinsic magnetic moments of all the atoms is zero and such materials are weakly affected by the magnetic field • The diamagnetic effects in material is a result of inductive action of the externally applied field on the molecular currents • To explain the occurrence of this effect ,we first consider the Lenz law accordingly to which, whenever there is a change in a flux in a circuit, an induced current is setup to oppose the change in flux linked by the circuit • Here the circuit under consideration is orbiting electrons in an atom, ions or molecules constituting the material under consideration • we know that moving electron are equivalent to current and when there is a current ,there is a flux • On application of external field ,the current changes to oppose the change in flux and this appear as a change in the frequency of the revolution • The change in frequency gives rise to magnetization as a result of which each atom will get additional magnetic moment ,aligned opposite to the external field causing it • it is this additional magnetic moment which gives diamagnetic susceptibility a negative sign which is order of 10-5 for most diamagnetic material (e g. bismith,lead,copper,silicon,diamond etc) • All substances are diamagnetic ,although diamagnetism may vary frequently be masked by a stronger positive paramagnetic effect on the part of external magnetic field and as a result of internal interactions • Diamagnetic susceptibility is independent of temperature as effect of thermal motion is very less on electron orbits as long as it deform them Note to our visitors :- Thanks for visiting our website.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9352778196334839, "perplexity": 837.6938338492005}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-40/segments/1600400238038.76/warc/CC-MAIN-20200926071311-20200926101311-00105.warc.gz"}
https://www.physicsforums.com/threads/the-universes-size-always-infinite.507327/
# The universe's size : always infinite? 1. Jun 15, 2011 ### pedersean I came across a startling position on more than one occasion while reading "The Fabric of the Cosmos: Space, Time, and the Texture of Reality" by Brian Greene. The position is that our immeasurable universe is infinite. He continues by writing that any mathematical modification to the size of the universe will always result infinite. Perhaps my understanding of infinity is misleading, but I've always held the position of infinity being obtainably intangible and impossible. Instead I prefer to hold that infinity is instead a special numerical placeholder of the extraordinarily large, and with subtracting from infinity a given tolerance becomes greater. Adding to infinity decreases that tolerance by bringing the actual number closer to its equivalent of the all powerful forever number. Am I nuts to argue that an expanding universe can not persist its size? 2. Jun 16, 2011 It basically which side of the fence you are on If you think that the universe was created by the big bang, then it is physically impossible to fit infinite mass inside a ball the size of a pea (yes I know, laws of physics has been broken before). Then I would say that the universe is expading an it does have it's limits if you consider the theory that energy cannot be created or destroyed. If you are one of those people who say that the universe has been here since the dawn of time and will always be here, then no... the universe isn't expanding (somehow :P) as for the question is it infinite, I believe that rather than the universe being never ending, I think its in a big loop (like how people thought that earth was flat, but it's in a sphere, if you go in one direction for long enough you will eventually arrive in the exact same place that you have started from. ;) Live Long and Prosper \ m / 3. Jun 16, 2011 ### sicarius Infinity is a funny thing. For example, lets say you have an infinite amount of water. That means for every 1 oxygen atom you have 2 hydrogen atoms. While you obviosly have 2x as many hydrogen atoms, you at the same time have equal amounts, since there are infinite amounts of both hydrogen and oxygen. (1*∞)/3 = (2*∞)/3 If you take 1/2 of an infinite volume you still end up with an infinite volume. ∞/2 = ∞. Only if you divide an infinite volume infinitely do you get a finite number: (2*∞)/∞ = 2 So, if the universe is infinite it would have to be expanding infinitely fast, as there is an infinite amount of space to contribute to the expansion. But when you take an infinitely small portion of the universe (like what we can measure) that expansion rate can be finite. So if the universe were to gain 10% size over a given time, it could do so and still be infinite as 1.1*∞ = ∞. All this seems counter intuitive and hard to accept, but the math is the math. Without accepting infinity as a real possibility than things like a singularity become impossible. An infinite universe also nicely explains expansion. While the universe was a finite size each point had an infinite amount of energy contributing to its expansion. Only once it reached an infinite size would there be a finite amount of energy at each point and be able to start to cool down. Hope this helps. 4. Jun 16, 2011 ### BruceW The current theory of the universe says that the universe is of finite size. But there are models of the universe that say it could be infinite. At the moment, they think its finite. 5. Jun 16, 2011 ### DragonPetter Wouldn't an infinite universe imply infinite energy? For example, two objects infinitely away from each other would have infinite potential energy. What if 2 objects fall at each other from opposite "sides" of an infinite universe, would they accelerate more and more as they got closer to each other? Would they be infinitesimally approaching the speed of light as they accelerate closer to each other, or would it just take infinite time for this to ever happen anyway? 6. Jun 16, 2011 ### Haroldingo Well for all intents and purposes the universe is infinite, in the fact that if we travel from one end to the other we find ourselves back at the place from which we started. 7. Jun 16, 2011 ### WannabeNewton WMAP has found, to some degree of experimental error, that the universe is flat. This means that when one looks at the Friedmann model for a flat universe it is infinite in extent not finite. However, we can only view a finite portion (observable universe) of it because there are regions of the universe that are expanding faster than the speed of signals from those areas. 8. Jun 16, 2011 ### sicarius Yes an infinite universe implies infinite energy. If the universe was empty execpt for those two obejects then yes they theoretically would fall towards at speeds approaching the speed of light, and yes they would never actually reach eachother. It is also possible that gravitons from one object would never reach the other and the falling would never even begin. We don't know enough about gravity to say for sure. 9. Jun 16, 2011 ### Nano-Passion Wow, that was interesting. But can there be an infinite velocity? I mean, Einstein proved that the speed of light can not be breached. Unless you are telling me that space needs not follow that rule. If so it would be pose very interesting questions to space and its affects. 10. Jun 16, 2011 ### WannabeNewton Precisely, space does not follow that rule. 11. Jun 16, 2011 ### Nano-Passion Space is indeed interesting and mysterious. It really challenges your imagination and reasoning to the extreme. 0__o Too many people take the word "space" for granted. We all grew up in it. Space to most people is just the room in their kitchen. Most people think of space as nothing. But in physics space actually has a life and physics of its own. Wow, I only got to appreciate it when I started to deeply ponder.. I wonder how much we truly understand about it? 12. Jun 17, 2011 ### sicarius Thank you. The speed of light is a measure of movement through space, and that speed cannot be breached. Expanding space is not "moving through space" and does not hit the same limitations. 13. Jun 17, 2011 ### Nano-Passion Yes I've heard that before -- though I wonder about the mathematics behind these sort of things and I'm dumbfounded how we can use math to describe phenomena such as that. Unless there is no rigorous math to it and it is philosophical reasoning to the idea of expanding space. 14. Jun 17, 2011 ### sicarius I may be wrong here, but I think that it is more like they have not found any math that disallows this, not so much as they have mathematically proved it. 15. Jun 17, 2011 ### BruceW There is a rigorous mathematical explanation for the universe being able to expand faster than the speed of light. It is called general relativity. The universe is highly curved at large scale, which is why two faraway objects can be moving away from each other faster than the speed of light. 16. Jun 17, 2011 ### WannabeNewton The universe is flat, according to observations, at the large scale and two objects don't really move faster than each at the speed of light but rather the space between them expands faster than the speed of light. 17. Jun 17, 2011 ### ZapperZ Staff Emeritus Closed, pending moderation. Zz. 18. Jun 18, 2011 ### bcrowell Staff Emeritus Hi, pedersean, Welcome to PF! It would have been better to post this in Cosmology rather than in General Physics. Typically people who are most knowledgeable about a particular field will only pay attention to posts in the relevant forum. Because the discussion had drifted off track, the thread was temporarily locked. I've moved it to Cosmology and opened it back up again. This is not really right, and since Brian Greene is a competent physicist, I think probably what's happened is that you misinterpreted or oversimplified something he wrote. We have an entry on this topic in the cosmology FAQ: https://www.physicsforums.com/showthread.php?t=506986 We actually don't know whether the universe is spatially finite or spatially infinite. This sounds like another case where the message got garbled somewhere along the line. This would depend on what was meant by "mathematical modification." The Math FAQ has a good entry on infinity: https://www.physicsforums.com/showthread.php?t=507003 [Broken] The truth or falsehood of your statement would depend on what you meant by "obtainably intangible and impossible." This kind of statement really can't be decided, because it uses undefined terms like "all powerful forever number." The real number system doesn't include infinite numbers. The math FAQ entry gives some examples of number systems that do include infinite numbers. Not nuts, just incorrect :-) I'm not clear here on why you use the word "persist." Are you discussing the possibility that it would start out infinite and then become finite at some later time? (This would seem to go along with what you said above about "mathematical modification.") According to general relativity, if the universe is finite at one time, then it's finite at all earlier and later times; if it's infinite at one time, then it's infinite at all earlier and later times. This can be proved mathematically based on the Einstein field equations plus some other very reasonable physical assumptions that we have good reason to believe hold in our universe: http://arxiv.org/abs/gr-qc/9406053 The term for this is "topology change." This actually doesn't quite work in cosmology. There is no principle of conservation of energy in cosmology. We have a FAQ entry about this: https://www.physicsforums.com/showthread.php?t=506985 Mass and energy are equivalent in relativity, so we actually can't define the total mass of the universe (regardless of whether it's spatially finite or spatially infinite). However, we can discuss things like how many hydrogen atoms there are. "The size of a pea" would only apply to cosmologies that are spatially finite (and therefore spatially finite at all times). In these cosmologies, there is only a finite number of hydrogen atoms (or any other particle) in the universe. The thing to be careful about here is that unless you specify a particular number system (with certain axioms), these statements about arithmetic operations involving infinity are neither true not false. You also have to be careful about your implicit assumption that there is only one infinite number, which is not true in all number systems that include infinite numbers. The math FAQ entry does a good job of explaining this. This is sort of right, except that you haven't really defined what you meant by "infinitely fast." Maybe you mean the velocity of one galaxy relative to another galaxy that is at a cosmological distance from it? In this case, there is actually no uniquely defined way to talk about the velocity in GR. However, one reasonable way to talk about it is to let $v=\Delta L/\Delta t$, where L and t are the quantities defined in this cosmology FAQ entry: https://www.physicsforums.com/showthread.php?t=506990 In that case, v is finite for any two galaxies, but in an infinite universe there is no upper bound on v (and v can be greater than c). This is incorrect, because, as discussed above, GR says changes of topology aren't possible. Nope. The cosmology FAQ entry discusses this. No, the wrap-around thing would apply to a spatially finite universe (one with finite volume), but as explained in the FAQ, we don't know if it's spatially finite or spatially infinite. This is not quite right. As explained in the FAQ entry, the universe is within error bars of being flat. Therefore it could have either positive curvature (with finite spatial volume) or negative curvature (with infinite spatial volume). Sorry, but this is basically all wrong. It's not an question of space versus physical objects, it's a question of local versus global. Relativity only prohibits objects from zooming right past each other at >c. For cosmologically distant objects, velocity isn't even uniquely well defined (see above). It is rigorous math. It's how general relativity works. This is a common way of explaining it nonmathematically. Mathematically, "speed" is just not defined in this context, and expansion of space, although a possible verbal description, is not the only way of verbally describing the mathematics of an expanding universe. -Ben Last edited by a moderator: May 5, 2017 19. Jun 18, 2011 ### Bob3141592 No, not if the universe is a mixture of positive and negative energy, unevenly distributed on small scales. Then you could have an infinite universe with zero or really any finite amount of energy. 20. Jun 18, 2011 ### bcrowell Staff Emeritus
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.802178680896759, "perplexity": 633.2789494645596}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-43/segments/1539583512592.60/warc/CC-MAIN-20181020055317-20181020080817-00502.warc.gz"}
https://www.physicsforums.com/threads/velocity-from-position-vector-in-rotating-object.921143/
# Velocity from position vector in rotating object 1. Jul 26, 2017 ### Nikstykal 1. The problem statement, all variables and given/known data I am trying to solve for change in velocity for the center of a rim with respect to the contact patch of a tire that has some degree of camber. The equation finalized is shown in the image below, equation 2.6. http://imgur.com/a/oHucp 2. Relevant equations 3. The attempt at a solution I understand how to get the position vector shown in 2.5. The first part of 2.6 is just deriving 2.5 with respect to h. The 3rd and 4th terms are what confuse me. In regards to dj/dt = -wz i, I understand that the change in j with respect to time is directly related to the yaw moment (wz) but what is the mathematical reasoning for using the i unit vector? Further, the 4th term demonstrates that dk/dt = -γ' / cos2 γ j, showing that k = -tanγ. Sorry for the improper notation, was hoping to get further insight into how these terms are being derived. Last edited: Jul 26, 2017 2. Jul 26, 2017 ### Dr.D I can't see your figure, and without that, there is no good way to reply to you. Please get the figure into the post. 3. Jul 26, 2017 ### Nikstykal Sorry about that, should be fixed. Draft saved Draft deleted Similar Discussions: Velocity from position vector in rotating object
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8426638245582581, "perplexity": 924.190429708382}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-34/segments/1502886104612.83/warc/CC-MAIN-20170818063421-20170818083421-00686.warc.gz"}
https://www.physicsforums.com/threads/subsequence-converging.231439/
# Subsequence converging 1. Apr 26, 2008 ### Doom of Doom 1. The problem statement, all variables and given/known data Consider the sequence $$\left\{ x_{n} \right\}$$. Then $$x_{n}$$ is convergent and $$\lim x_{n}=a$$ if and only if, for every non-trivial convergent subsequence, $$x_{n_{i}}$$, of $$x_{n}$$, $$\lim x_{n_{i}}=a$$. 2. Relevant equations The definition of the limit of a series: $$\lim {x_{n}} = a \Leftrightarrow$$ for every $$\epsilon > 0$$, there exists $$N \in \mathbb{N}$$ such that for every $$n>N$$, $$\left| x_{n} - a \right| < \epsilon$$. 3. The attempt at a solution Ok, so I easily see how to show that it $$\lim {x_{n}} = a$$, then every convergent subsequence must also converge to $$a$$. But I'm stuck on how to show the other way. 2. Apr 26, 2008 ### Dick I would say, well isn't a_n a subsequence of itself? But you also said 'non-trivial'. I'm not sure exactly what that means, but can't you split a_n into two 'non-trivial' subsequences, which then converge, but when put together make all of a_n? 3. Apr 26, 2008 ### Doom of Doom Yeah, I asked my prof about this one. To him, apparently "non-trivial" just means that the subsequence is not equal to the original sequence. I don't think it actually has any bearing on the problem. The trick, he said, is that you have to consider every non-trivial (convergent) subsequence. I'm not sure I know what that means. 4. Apr 26, 2008 ### Dick Ok, then suppose a_n has two convergent subsequences with different limits. Then does a_n have a limit? Similar Discussions: Subsequence converging
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9883826375007629, "perplexity": 681.7856393897385}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-22/segments/1495463609404.11/warc/CC-MAIN-20170528004908-20170528024908-00015.warc.gz"}
https://eng.libretexts.org/Bookshelves/Electrical_Engineering/Introductory_Electrical_Engineering/Electrical_Engineering_(Johnson)/05%3A_Digital_Signal_Processing/5.12%3A_Discrete-Time_Systems_in_the_Time-Domain
# 5.12: Discrete-Time Systems in the Time-Domain $$\newcommand{\vecs}[1]{\overset { \scriptstyle \rightharpoonup} {\mathbf{#1}} }$$ $$\newcommand{\vecd}[1]{\overset{-\!-\!\rightharpoonup}{\vphantom{a}\smash {#1}}}$$$$\newcommand{\id}{\mathrm{id}}$$ $$\newcommand{\Span}{\mathrm{span}}$$ $$\newcommand{\kernel}{\mathrm{null}\,}$$ $$\newcommand{\range}{\mathrm{range}\,}$$ $$\newcommand{\RealPart}{\mathrm{Re}}$$ $$\newcommand{\ImaginaryPart}{\mathrm{Im}}$$ $$\newcommand{\Argument}{\mathrm{Arg}}$$ $$\newcommand{\norm}[1]{\| #1 \|}$$ $$\newcommand{\inner}[2]{\langle #1, #2 \rangle}$$ $$\newcommand{\Span}{\mathrm{span}}$$ $$\newcommand{\id}{\mathrm{id}}$$ $$\newcommand{\Span}{\mathrm{span}}$$ $$\newcommand{\kernel}{\mathrm{null}\,}$$ $$\newcommand{\range}{\mathrm{range}\,}$$ $$\newcommand{\RealPart}{\mathrm{Re}}$$ $$\newcommand{\ImaginaryPart}{\mathrm{Im}}$$ $$\newcommand{\Argument}{\mathrm{Arg}}$$ $$\newcommand{\norm}[1]{\| #1 \|}$$ $$\newcommand{\inner}[2]{\langle #1, #2 \rangle}$$ $$\newcommand{\Span}{\mathrm{span}}$$$$\newcommand{\AA}{\unicode[.8,0]{x212B}}$$ ##### Learning Objectives • Discrete-time systems allow for mathematically specified processes like the difference equation. A discrete-time signal $$s(n)$$ is delayed by $$n_0$$ samples when we write $$s(n-n)0)$$ with $$n_0>0$$. Choosing $$n_0$$ to be negative advances the signal along the integers. As opposed to analog delays, discrete-time delays can only be integer valued. In the frequency domain, delaying a signal corresponds to a linear phase shift of the signal's discrete-time Fourier transform: $s(n=-n_{0}\leftrightarrow e^{-(i2\pi fn_{0})}S(e^{i2\pi f}) \nonumber$ Linear discrete-time systems have the superposition property. $S\left ( a_{1}x_{1}(n)+a_{2}x_{2}(n) \right )=a_{1}S\left ( x_{1}(n) \right )+a_{2}S\left ( x_{2}(n) \right ) \nonumber$ A discrete-time system is called shift-invariant (analogous to time-invariant analog systems) if delaying the input delays the corresponding output. If $S\left ( x(n) \right )=y(n) \nonumber$ Then a shift-invariant system has the property $S\left ( x(n-n_{0}) \right )=y(n-n_{0}) \nonumber$ We use the term shift-invariant to emphasize that delays can only have integer values in discrete-time, while in analog signals, delays can be arbitrarily valued. We want to concentrate on systems that are both linear and shift-invariant. It will be these that allow us the full power of frequency-domain analysis and implementations. Because we have no physical constraints in "constructing" such systems, we need only a mathematical specification. In analog systems, the differential equation specifies the input-output relationship in the time-domain. The corresponding discrete-time specification is the difference equation. $y(n)=a_{1}y(n-1)+...+a_{p}y(n-p)+b_{0}x(n)+b_{1}x(n-1)+...+b_{q}x(n-q) \nonumber$ Here, the output signal $$y(n)$$ is related to its past values $y(n-1),l=\left \{ 1,...,p \right \} \nonumber$ and to the current and past values of the input signal $$x(n)$$. The system's characteristics are determined by the choices for the number of coefficients $$p$$ and $$q$$ and the coefficients' values $\left \{ a_{1},...,a_{p} \right \}\; and\; \left \{ b_{0},b_{1},...,b_{q} \right \} \nonumber$ ##### Note There is an asymmetry in the coefficients: where is $$a_0$$? This coefficient would multiply the $$y(n)$$ term in the above equation. We have essentially divided the equation by it, which does not change the input-output relationship. We have thus created the convention that $$a_0$$ is always one. As opposed to differential equations, which only provide an implicit description of a system (we must somehow solve the differential equation), difference equations provide an explicit way of computing the output for any input. We simply express the difference equation by a program that calculates each output from the previous output values, and the current and previous inputs. Difference equations are usually expressed in software with for loops. A MATLAB program that would compute the first 1000 values of the output has the form for n=1:1000 y(n) = sum(a.*y(n-1:-1:n-p)) + sum(b.*x(n:-1:n-q)); end An important detail emerges when we consider making this program work; in fact, as written it has (at least) two bugs. What input and output values enter into the computation of y(1)? We need values for y(0), y(-1),..., values we have not yet computed. To compute them, we would need more previous values of the output, which we have not yet computed. To compute these values, we would need even earlier values, ad infinitum. The way out of this predicament is to specify the system's initial conditions: we must provide the p output values that occurred before the input started. These values can be arbitrary, but the choice does impact how the system responds to a given input. One choice gives rise to a linear system: Make the initial conditions zero. The reason lies in the definition of a linear system: The only way that the output to a sum of signals can be the sum of the individual outputs occurs when the initial conditions in each case are zero. ##### Exercise $$\PageIndex{1}$$ The initial condition issue resolves making sense of the difference equation for inputs that start at some index. However, the program will not work because of a programming, not conceptual, error. What is it? How can it be "fixed?" Solution The indices can be negative, and this condition is not allowed in MATLAB. To fix it, we must start the signals later in the array. ##### Example $$\PageIndex{1}$$ Let's consider the simple system having $$p = 1$$ and $$q = 0$$. $y(n)=ay(n-1)+bx(n) \nonumber$ To compute the output at some index, this difference equation says we need to know what the previous output y(n-1) and what the input signal is at that moment of time. In more detail, let's compute this system's output to a unit-sample input: $x(n)=\delta (n) \nonumber$ Because the input is zero for negative indices, we start by trying to compute the output at n = 0. $y(0)=ay(-1)+b \nonumber$ What is the value of y(-1)? Because we have used an input that is zero for all negative indices, it is reasonable to assume that the output is also zero. Certainly, the difference equation would not describe a linear system if the input that is zero for all time did not produce a zero output. With this assumption, y(-1) = 0, leaving y(0) = b. For n > 0, the input unit-sample is zero, which leaves us with the difference equation $\forall n,n> 0:\left ( y(n)=ay(n-1) \right ) \nonumber$ We can envision how the filter responds to this input by making a table. $y(n)=ay(n-1)+b\delta (n) \nonumber$ n x(n) y(n) -1 0 0 0 1 b 1 0 ba 2 0 ba2 : 0 : n 0 ban Coefficient values determine how the output behaves. The parameter b can be any value, and serves as a gain. The effect of the parameter a is more complicated (see Table above). If it equals zero, the output simply equals the input times the gain b. For all non-zero values of a, the output lasts forever; such systems are said to be IIR (Infinite Impulse Response). The reason for this terminology is that the unit sample also known as the impulse (especially in analog situations), and the system's response to the "impulse" lasts forever. If a is positive and less than one, the output is a decaying exponential. When a = 1, the output is a unit step. If a is negative and greater than -1, the output oscillates while decaying exponentially. When a = -1, the output changes sign forever, alternating between b and -b. More dramatic effects when |a| > 1; whether positive or negative, the output signal becomes larger and larger, growing exponentially. Positive values of ##### Exercise $$\PageIndex{1}$$ Note that the difference equation $y(n)=a_{1}y(n-1)+...+a_{p}y(n-p)+b_{0}x(n)+b_{1}x(n-1)+...+b_{q}x(n-q) \nonumber$ does not involve terms like $$y(n+1)$$ or $$x(n+1)$$ on the equation's right side. Can such terms also be included? Why or why not? Solution Such terms would require the system to know what future input or output values would be before the current value was computed. Thus, such terms can cause difficulties. ##### Example $$\PageIndex{1}$$: A somewhat different system has no "a" coefficients. Consider the difference equation $y(n)=\frac{1}{q}\left ( x(n)+...+x(n-q+1) \right ) \nonumber$ Because this system's output depends only on current and previous input values, we need not be concerned with initial conditions. When the input is a unit-sample, the output equals $\frac{1}{q}\; for\; n=\left \{ 0,...,q-1 \right \} \nonumber$ then equals zero thereafter. Such systems are said to be FIR (Finite Impulse Response) because their unit sample responses have finite duration. Plotting this response (Figure 5.12.2) shows that the unit-sample response is a pulse of width q and height 1/q. This waveform is also known as a boxcar, hence the name boxcar filter given to this system. We'll derive its frequency response and develop its filtering interpretation in the next section. For now, note that the difference equation says that each output value equals the average of the input's current and previous values. Thus, the output equals the running average of input's previous q values. Such a system could be used to produce the average weekly temperature (q = 7) that could be updated daily. ## Contributor • ContribEEOpenStax This page titled 5.12: Discrete-Time Systems in the Time-Domain is shared under a CC BY 1.0 license and was authored, remixed, and/or curated by Don H. Johnson via source content that was edited to the style and standards of the LibreTexts platform; a detailed edit history is available upon request.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8880831003189087, "perplexity": 552.5273732847138}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-06/segments/1674764500719.31/warc/CC-MAIN-20230208060523-20230208090523-00348.warc.gz"}
https://www.askmehelpdesk.com/mathematics/how-can-find-area-inner-circle-area-large-outer-circle-583845.html?s=626e1606a2595a557abdbd30af5e3013
Joe's centrepiece is a simple but very effective use of two circles and Two regular hexagons rotated to give the effect of a medieval dial. The Radius of the inner circle is 10 cm, half the length of the sides of the Regular hexagon. AC is a side of one of the hexagons and BD is a side Of the second, which is obtained from the first by rotation. (I) Find the area of the inner circle, giving your answer in terms of π. (ii) Find the area of the large outer circle also in terms of π and Hence express the area of the inner circle as a percentage of the Area of the large outer circle. Hint: Each hexagon can be divided into six congruent equilateral Triangles, for example the triangle AMC is one of the six Equilateral triangles that make up one hexagon and the triangle BMD is one of six equilateral triangles that make up the second Hexagon.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8103160262107849, "perplexity": 454.4559555860714}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-13/segments/1521257648178.42/warc/CC-MAIN-20180323044127-20180323064127-00023.warc.gz"}
https://socratic.org/questions/5825dfb47c01491fc85a4392
Physics Topics # Question #a4392 Aug 22, 2017 #### Explanation: 1. What's electromagnetism and it's characteristics? 2. Define Ampere's swimming rule? 3. What's right hand grip rule? 4. What's magnetic lines of forces? 5. Solenoid? 6. How we can increase power of electromagnet? 7. Flemming's left hand rule? 8. Structure of Barlow's wheel? 9. Structure of electric motor? Differently ac and dc... 10. What's amperian loop? 11. What's magnetic retentivity and coercivity? 12. To make permanent magnet steel is used. Raw iron why not? 13. Can we define Barlow's wheel as a motor? 14. Why Barlow's wheel can not be rotated by ac current? 15. Difference between ordinary magnet and electric magnet? ##### Impact of this question 235 views around the world
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8005349040031433, "perplexity": 26187.415155150928}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-30/segments/1563195525136.58/warc/CC-MAIN-20190717101524-20190717123524-00221.warc.gz"}
https://asmedigitalcollection.asme.org/pressurevesseltech/article-abstract/131/2/021407/474980/Fatigue-Damage-Behavior-of-a-Structural-Component?redirectedFrom=fulltext
The common design practice of pressure vessels subjected to variable amplitude loading is based on the application of a linear damage summation rule, also known as the Palmgren–Miner’s rule. Even though damage induced by small stress cycles, below the fatigue limit, are often taken into account in design codes of practice by two-slope stress-life curves, the sequential effects of the load history have been neglected. Several studies have shown that linear damage summation rules can predict conservative as well as nonconservative lives depending on the loading sequence. This paper presents experimental results about the fatigue damage accumulation behavior of a structural component made of P355NL1 steel, which is a material usually applied for pressure vessel purposes. The structural component is a rectangular double notched plate, which was subjected to block loading. Each block is characterized by constant remote stress amplitude. Two-block sequences were applied for various combinations of remote stress ranges. Three stress ratios were considered, namely, $R=0$, $R=0.15$, and $R=0.3$. Also, constant amplitude fatigue data are generated for the investigated structural component. In general, the block loading illustrates that the fatigue damage evolves nonlinearly with the number of load cycles and is a function of the load sequence, stress levels, and stress ratios. In particular, a clear load sequence effect is verified for the two-block loading, with null stress ratio. For the other (higher) stress ratios, the load sequence effect is almost negligible; however the damage evolution still is nonlinear. This suggests an important effect of the stress ratio on fatigue damage accumulation. 1. European Committee for Standardization-CEN , 2002, EN 13445: Unfired Pressure Vessels, European Standard. 2. Miner , M. A. , 1945, “ Cumulative Damage in Fatigue ,” ASME J. Appl. Mech. , 67 , pp. A159 A164 . 0021-8936 3. De Jesus , A. M. P. , Ribeiro , A. S. , and Fernandes , A. A. , 2005, “ Finite Element Modeling of Fatigue Damage Using a Continuum Damage Mechanics Approach ,” ASME J. Pressure Vessel Technol. 0094-9930, 127 ( 2 ), pp. 157 164 . 4. Pereira , H. F. G. S. , De Jesus , A. M. P. , Fernandes , A. A. , and Ribeiro , A. S. , 2006, “ ,” Fifth International Conference on Mechanics and Materials in Design , Porto, Portugal, Jul. 24–26. 5. Manson , S. S. , and Halford , G. R. , 1986, “ Re-Examination of Cumulative Fatigue Damage Analysis—An Engineering Perspective ,” Eng. Fract. Mech. 0013-7944, 25 , pp. 539 571 . 6. Fatemi , A. , and Yang , L. , 1998, “ Cumulative Fatigue Damage and Life Prediction Theories: A Survey of the State of the Art for Homogeneous Materials ,” Int. J. Fatigue 0142-1123, 20 ( 1 ), pp. 9 34 . 7. Schijve , J. , 2003, “ Fatigue of Structures and Materials in the 20th Century and the State of the Art ,” Mater. Sci. , 39 ( 3 ), pp. 307 333 . 1068-820X 8. Lemaitre , J. , and Chaboche , J. -L. , 1990, Mechanics of Solid Materials , Cambridge University Press , Cambridge, UK . 9. Marco , S. M. , and Starkey , W. L. , 1954, “ A Concept of Fatigue Damage ,” Trans. ASME 0097-6822, 76 ( 4 ), pp. 627 632 . 10. Peterson , R. E. , 1959, “ Notch Sensitivity ,” Metal Fatigue , G. Sines and J. L. Waisman , eds., McGraw-Hill , New York , pp. 293 306 . 11. De Jesus , A. M. P. , Ribeiro , A. S. , and Fernandes , A. A. , 2006, “ Low Cycle Fatigue and Cyclic Elastoplastic Behaviour of the P355NL1 Steel ,” ASME J. Pressure Vessel Technol. 0094-9930, 128 ( 3 ), pp. 298 304 . 12. Pereira , H. F. G. S. , De Jesus , A. M. P. , Fernandes , A. A. , and Ribeiro , A. S. , 2007, “
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 3, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.607903778553009, "perplexity": 10233.767622799704}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-43/segments/1570986696339.42/warc/CC-MAIN-20191019141654-20191019165154-00099.warc.gz"}
https://mathoverflow.net/questions/20308/deciding-when-an-infinite-graph-is-connected
# Deciding when an infinite graph is connected I am interested in algorithms that help decide whether a countably infinite locally finite graph is connected. I think there is no algorithm that works for all graphs, e.g. no algorithm should work for an infinite chain with one edge removed. I care about a specific graph $\Gamma$ whose automorphism group acts with finite quotient, i.e. there are only finitely many orbits of vertices. Also $\Gamma$ can be realized as an explicit collection of points in $\mathbb R^n$, and there is an easily computable function $d:\mathbb R^n\times \mathbb R^n\to \mathbb R$ such that two vertices $v,w$ are adjacent if and only if $d(v,w)=0$. I hope to find an algorithm that can be implemented so that after some computer experiments I would have evidence that the graph is actually connected. • Igor, how about the following family of examples? \Gamma_n is the real line, with the usual graph structure, with every nth edge removed. It seems to me that these graphs are just as hard to distinguish from the real line as your example with just one edge removed, but the automorphism group now acts with finite quotient. – HJRW Apr 4 '10 at 15:45 • You need a way to bound the size of the quotient as Henry Wilton's comment shows. Another explanation: Consider all computable functions that can produce a graph whose nodes are indexed by $\mathbb Z$, and which is either real line or a segment $[-n,n]$ plus a bunch of isolated nodes ($n$ is arbitrary). You can not tell algorithmically functions of the first type from the second type, so you need a bound for $n$ as an input to your program. – Sergei Ivanov Apr 4 '10 at 16:19 • If you have only one graph, then there is a program. It is either "begin; print YES; end" or "begin; print NO; end", depending on your graph. – Sergei Ivanov Apr 4 '10 at 16:30 • Igor, when you "fix n" you are basically saying that you know the size of the quotient. That's why the quotient graph is relevant! – HJRW Apr 4 '10 at 16:31 • Actually the size of the quotient is not enough too. You cannot tell a line from a bunch of very long loops although both are homogeneous. – Sergei Ivanov Apr 4 '10 at 17:20 • Thanks! I really do not know much about the graph beyond what I stated. There is a closely related problem that is easier to explain. There the graph arises from a locally finite arrangement of hyperplanes in $\mathbb C^n$, where vertices correspond to hyperplanes, and two vertices are adjacent if and only if the hyperplanes intersect. This graph need not not locally finite though. – Igor Belegradek Apr 5 '10 at 0:37 • @damiano: complex hyperplanes have codimension $2$, so I do not see why connectedness comes for free. In truth I have thought more of the case when $\mathbb C^n$ is replaced with the unit ball in $\mathbb C^n$ with the complex hyperbolic metric, and in this case I have tons of examples of arrangements whose correspondning graphs aren't connected. – Igor Belegradek Apr 5 '10 at 1:00 • @damiano: actually you are right, this is what happens in $\mathbb C^n$, but things become different once we only pay attentions to intersections inside the unit ball, so in my comments above substitute $\mathbb C^n$ by the unit ball in $\mathbb C^n$ with the complex hyperbolic metric. – Igor Belegradek Apr 5 '10 at 1:19
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6855676770210266, "perplexity": 249.77547545420836}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-43/segments/1570986647517.11/warc/CC-MAIN-20191013195541-20191013222541-00051.warc.gz"}
https://www.itl.nist.gov/div898/handbook/pri/section3/pri3344.htm
5. Process Improvement 5.3. Choosing an experimental design 5.3.3. How do you select an experimental design? 5.3.3.4. Fractional factorial designs ## Fractional factorial design specifications and design resolution Generating relation and diagram for the 28-3 fractional factorial design We considered the 23-1 design in the previous section and saw that its generator written in "I = ... " form is {I = +123}. Next we look at a one-eighth fraction of a 28 design, namely the 28-3 fractional factorial design. Using a diagram similar to Figure 3.5, we have the following: FIGURE 3.6: Specifications for a 28-3 Design 28-3 design has 32 runs Figure 3.6 tells us that a 28-3 design has 32 runs, not including centerpoint runs, and eight factors. There are three generators since this is a 1/8 = 2-3 fraction (in general, a 2k-p fractional factorial needs p generators which define the settings for p additional factor columns to be added to the 2k-p full factorial design columns - see the following detailed description for the 28-3 design). How to Construct a Fractional Factorial Design From the Specification Rule for constructing a fractional factorial design In order to construct the design, we do the following: 1. Write down a full factorial design in standard order for k-p factors (8-3 = 5 factors for the example above). In the specification above we start with a 25 full factorial design. Such a design has 25 = 32 rows. 2. Add a sixth column to the design table for factor 6, using 6 = 345 (or 6 = -345) to manufacture it (i.e., create the new column by multiplying the indicated old columns together). 3. Do likewise for factor 7 and for factor 8, using the appropriate design generators given in Figure 3.6. 4. The resultant design matrix gives the 32 trial runs for an 8-factor fractional factorial design. (When actually running the experiment, we would of course randomize the run order. Design generators We note further that the design generators, written in `I = ...' form, for the principal 28-3 fractional factorial design are: { I = + 3456; I = + 12457; I = +12358 }. These design generators result from multiplying the "6 = 345" generator by "6" to obtain "I = 3456" and so on for the other two generators. "Defining relation" for a fractional factorial design The total collection of design generators for a factorial design, including all new generators that can be formed as products of these generators, is called a defining relation. There are seven "words", or strings of numbers, in the defining relation for the 28-3 design, starting with the original three generators and adding all the new "words" that can be formed by multiplying together any two or three of these original three words. These seven turn out to be I = 3456 = 12457 = 12358 = 12367 = 12468 = 3478 = 5678. In general, there will be (2p -1) words in the defining relation for a 2k-p fractional factorial. Definition of "Resolution" The length of the shortest word in the defining relation is called the resolution of the design. Resolution describes the degree to which estimated main effects are aliased (or confounded) with estimated 2-level interactions, 3-level interactions, etc. Notation for resolution (Roman numerals) The length of the shortest word in the defining relation for the 28-3 design is four. This is written in Roman numeral script, and subscripted as $$2_{IV}^{8-3}$$. Note that the 23-1 design has only one word, "I = 123" (or "I = -123"), in its defining relation since there is only one design generator, and so this fractional factorial design has resolution three; that is, we may write $$2_{III}^{3-1}$$. Diagram for a 28-3 design showing resolution Now Figure 3.6 may be completed by writing it as: FIGURE 3.7: Specifications for a 28-3, Showing Resolution IV Resolution and confounding The design resolution tells us how badly the design is confounded. Previously, in the 23-1 design, we saw that the main effects were confounded with two-factor interactions. However, main effects were not confounded with other main effects. So, at worst, we have 3=12, or 2=13, etc., but we do not have 1=2, etc. In fact, a resolution II design would be pretty useless for any purpose whatsoever! Similarly, in a resolution IV design, main effects are confounded with at worst three-factor interactions. We can see, in Figure 3.7, that 6=345. We also see that 36=45, 34=56, etc. (i.e., some two-factor interactions are confounded with certain other two-factor interactions) etc.; but we never see anything like 2=13, or 5=34, (i.e., main effects confounded with two-factor interactions). The complete first-order interaction confounding for the given 28-3 design The complete confounding pattern, for confounding of up to two-factor interactions, arising from the design given in Figure 3.7 is 34 = 56 = 78 35 = 46 36 = 45 37 = 48 38 = 47 57 = 68 58 = 67 All of these relations can be easily verified by multiplying the indicated two-factor interactions by the generators. For example, to verify that 38= 47, multiply both sides of 8=1235 by 3 to get 38=125. Then, multiply 7=1245 by 4 to get 47=125. From that it follows that 38=47. One or two factors suspected of possibly having significant first-order interactions can be assigned in such a way as to avoid having them aliased For this $$2_{IV}^{8-3}$$ fractional factorial design, 15 two-factor interactions are aliased (confounded) in pairs or in a group of three. The remaining 28 - 15 = 13 two-factor interactions are only aliased with higher-order interactions (which are generally assumed to be negligible). This is verified by noting that factors "1" and "2" never appear in a length-4 word in the defining relation. So, all 13 interactions involving "1" and "2" are clear of aliasing with any other two factor interaction. If one or two factors are suspected of possibly having significant first-order interactions, they can be assigned in such a way as to avoid having them aliased. Higher resoulution designs have less severe confounding, but require more runs A resolution IV design is "better" than a resolution III design because we have less-severe confounding pattern in the 'IV' than in the 'III' situation; higher-order interactions are less likely to be significant than low-order interactions. A higher-resolution design for the same number of factors will, however, require more runs and so it is 'worse' than a lower order design in that sense. Resolution V designs for 8 factors Similarly, with a resolution V design, main effects would be confounded with four-factor (and possibly higher-order) interactions, and two-factor interactions would be confounded with certain three-factor interactions. To obtain a resolution V design for 8 factors requires more runs than the 28-3 design. One option, if estimating all main effects and two-factor interactions is a requirement, is a $$2_{V}^{8-3}$$ design. However, a 48-run alternative (John's 3/4 fractional factorial) is also available. There are many choices of fractional factorial designs - some may have the same number of runs and resolution, but different aliasing patterns. Note: There are other $$2_{V}^{8-3}$$ fractional designs that can be derived starting with different choices of design generators for the "6", "7" and "8" factor columns. However, they are either equivalent (in terms of the number of words of length of length of four) to the fraction with generators 6 = 345, 7 = 1245, 8 = 1235 (obtained by relabeling the factors), or they are inferior to the fraction given because their defining relation contains more words of length four (and therefore more confounded two-factor interactions). For example, the $$2_{V}^{8-3}$$ design with generators 6 = 12345, 7 = 135, and 8 = 245 has five length-four words in the defining relation (the defining relation is I = 123456 = 1357 = 2458 = 2467 = 1368 = 123478 = 5678). As a result, this design would confound more two factor-interactions (23 out of 28 possible two-factor interactions are confounded, leaving only "12", "14", "23", "27" and "34" as estimable two-factor interactions). Diagram of an alternative way for generating the 28-3 design As an example of an equivalent "best" $$2_{V}^{8-3}$$ fractional factorial design, obtained by "relabeling", consider the design specified in Figure 3.8. FIGURE 3.8: Another Way of Generating the 28-3 Design This design is equivalent to the design specified in Figure 3.7 after relabeling the factors as follows: 1 becomes 5, 2 becomes 8, 3 becomes 1, 4 becomes 2, 5 becomes 3, 6 remains 6, 7 becomes 4 and 8 becomes 7. Minimum aberration A table given later in this chapter gives a collection of useful fractional factorial designs that, for a given k and p, maximize the possible resolution and minimize the number of short words in the defining relation (which minimizes two-factor aliasing). The term for this is "minimum aberration". Design Resolution Summary Commonly used design Resolutions The meaning of the most prevalent resolution levels is as follows: Resolution III Designs Main effects are confounded (aliased) with two-factor interactions. Resolution IV Designs No main effects are aliased with two-factor interactions, but two-factor interactions are aliased with each other. Resolution V Designs No main effect or two-factor interaction is aliased with any other main effect or two-factor interaction, but two-factor interactions are aliased with three-factor interactions.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7657374143600464, "perplexity": 1304.5582677880518}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-50/segments/1606141732835.81/warc/CC-MAIN-20201203220448-20201204010448-00494.warc.gz"}
https://byjus.com/icse-class-8-maths-selina-solutions-chapter-14-linear-equations-in-one-variable/
# ICSE Class 8 Maths Selina Solutions for Chapter 14 Linear Equations in One Variable ICSE Class 8 Maths Chapter 14 Linear Equations in One Variable covers concepts which involve solving linear equations in one variable in contextual problems involving multiplication and division. The expressions with only one variable are called Linear Equations in one variable. Variables are the numbers mentioned in terms of alphabets. Few of the examples of linear equations in one variable are 2x = 8, 4y = 9, 3z = 7. This chapter will provide a foundation base for future complex topics related to equations. ICSE Class 8 students should solve all the questions mentioned in the exercises to clear their doubts. By solving these exercises students will gain confidence which will help them to score good marks in their ICSE Class 8 maths examination. Students can download the ICSE Class 8 Maths Selina Solutions Chapter 14 Linear Equations in One Variable from the provided link below. The Selina Solutions are prepared by the subject experts which are explained in step by step format for easy understanding of the students. ## Download ICSE Class 8 Maths Selina Solutions Chapter 14 Linear Equations in One Variable ICSE Class 8 Maths Chapter 14 Linear Equations in One Variable covers a total of 15 questions and the solutions of all these questions are provided below. Students of ICSE Class 8 can also download Selina Solutions of all the other chapters for Class 8 maths subjects by click on ICSE Selina Class 8 Maths Solution. ### CHAPTER 14 – LINEAR EQUATIONS IN ONE VARIABLE Solve the following equations: Question 1 20 = 6+2x Solution:- Simplifying we get 20 = 6 + 2x 20 – 6 = 2x 14 = 2x 7 = x x = 7 Question 2 15 + x = 5x + 3 Solution:- Simplifying we get 15 – 3 = 5x – x 12 = 4x 3=x x=3 Question 3 $\frac{3 x+2}{x-6}=-7$ Solution:- By cross multiplying 3x + 2 = – 7(x-6) 3x + 2 = -7x + 42 3x + 7x = 42 – 2 10x = 40 x = 4 Question 4 3a – = 2 (4 – a) Solution:- 3a – 4 = 8 – 2a 3a + 2a = 8 + 4 5a = 12 a = 2.4 Question 5 3(b – 4) = 2 (4 – b) Solution:- 3b-12=8-2b 3b+2b=8+12 5b=20 $b=\frac{20}{5}$ b=4 Question 6 $\frac{x+2}{9}=\frac{x+4}{11}$ Solution:- By cross multiplying 11(x+2)=9(x+4) 11x+22=9x+36 11x-9x=36-22 2x=14 $x=\frac{14}{2}$ ⇒ x=7 Question 7 $\frac{x-8}{5}=\frac{x-12}{9}$ Solution:- By cross multiplying 9(x-8)=x(x-12) 9x-72=5x-60 9x-5=-60+72 4x=12 $x=\frac{12}{4}$ x=3 Question 8 5(8x + 3) = 9(4x + 7) Solution:- 40x+15=36x+63 40x-36x=63-15 4x=48 $x=\frac{48}{4}$ x=12 Question 9 3(x + 1) = 12 + 4 (x – 1) Solution:- 3(x+1)=12+4(x-1) 3x+3=12+4x-4 3x-4x=12-4-3 -x=5 ⇒x= -5 Question 10 $\frac{3 x}{4}-\frac{1}{4}(x-20)=\frac{x}{4}+32$ Solution:- $\frac{3 x}{4}-\frac{x}{4}+5=\frac{x}{4}+32$ $\frac{3 x}{4}-\frac{x}{4}-\frac{x}{4}=32-5$ $\frac{3 \mathrm{x}-\mathrm{x}-\mathrm{x}}{4}=27$ $\frac{x}{4}=27$ $x=27 \times 4$ x=108 Question 11. $3 a-\frac{1}{5}=\frac{a}{5}+5 \frac{2}{5}$ Solution:- $3 a-\frac{a}{5}=5 \frac{2}{5}+\frac{1}{5}$ $3 a-\frac{a}{5}=\frac{27}{5}+\frac{1}{5}$ (Multiplying each term by 5) $\Rightarrow 3 a \times 5-\frac{a}{5} \times 5=\frac{27}{5} \times 5+\frac{1}{5} \times 5$ 15a-a=27+1 14a=28 $a=\frac{28}{14}$ a=2 Question 12. $\frac{x}{3}-2 \frac{1}{2}=\frac{4 x}{9}-\frac{2 x}{3}$ Solution:- $\frac{x}{3}-\frac{5}{2}=\frac{4 x}{9}-\frac{2 x}{3}$ Since, L.C.M. of denominators 3, 2, 9 and 3=18 [Multiplying each term by 18] $\Rightarrow \quad \frac{x}{3} \times 18-\frac{5}{2} \times 18=\frac{4 x}{9} \times 18-\frac{2 x}{3} \times 18$ 6x-45=8x-12x 6x+12x-8x=45 18x-8x=45 10x=45 $x=\frac{45}{10}$ x=4·5 Question 13: $\frac{4(y+2)}{5}=7+\frac{5 y}{13}$ Solution:- $\frac{4 y+8}{5}=7+\frac{5 y}{13}$ $\frac{4 y+8}{5}=\frac{91+5 y}{13}$ (By cross multiplying) 13(4y+8)=5(91+5y) 52y+104=455+25y 52y-25y=455-104 27y=351 $y=\frac{351}{27}$ y=13 Question 14. $\frac{a+5}{6}-\frac{a+1}{9}=\frac{a+3}{4}$ Solution:- Since, L.C.M. of denominators 6, 9 and 4=36 Multiplying each term by $36 \Rightarrow \frac{a+5}{6} \times 36-\frac{a+1}{9} \times 36=\frac{a+3}{4} \times 36$ 6(a+5)-4(a+1)=9(a+3) 6a+30-4a-4=9a+27 6a-4a-9a=27-30+4 6a-13a=1 -7a=1 $a=-\frac{1}{7}$ Question 15: $\frac{2 x-13}{5}-\frac{x-3}{11}=\frac{x-9}{5}+1$ Solution:- $\frac{2 x-13}{5}-\frac{x-3}{11}=\frac{x-9}{5}+\frac{1}{1}$ Since, L.C.M. of denominators 5, 11, 5 and 1=55 $∴ \frac{2 x-13}{5} \times 55-\frac{x-3}{11} \times 55=\frac{x-9}{5} \times 55+\frac{1}{1} \times 55$ 11(2x-13)-5(x-3)=11(x-9)+55 22x-143-5x+15=71x-99+55 22x-5x-11x=-99+55+143-15 6x=198-114 6x=84 $x=\frac{84}{6}$ x=14 ### ICSE Class 8 Maths Selina Solutions Chapter 14 – Linear Equations in One Variable Linear Equations in One Variable introduces students about the basic concepts of linear equations. It helps students to solve linear equations on one side and number on the other side. It will help the students to learn different methods to solve linear equations. The chapter also includes problems, solved and unsolved exercises which makes it more interesting and fun to learn. Students of Class 8 ICSE can access to the solutions for Physics, Chemistry, and Biology by clicking on ICSE Class 8 Selina Solutions. Keep learning and stay tuned for further updates on ICSE and other competitive exams.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.5555243492126465, "perplexity": 1018.9786228647977}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-47/segments/1573496670643.58/warc/CC-MAIN-20191121000300-20191121024300-00388.warc.gz"}
https://www.arxiv-vanity.com/papers/1008.4579/
Nonlinear as Asymptotic Symmetry of Three-Dimensional Higher Spin AdS Gravity Marc Henneaux   &   Soo-Jong Rey Université Libre de Bruxelles and International Solvay Institutes ULB-Campus Plaine CP231, 1050 Brussels, BELGIUM Centro de Estudios Científicos (CECS), Casilla 1469, Valdivia, CHILE School of Natural Sciences, Institute for Advanced Study, Princeton NJ 08540 USA School of Physics and Astronomy & Center for Theoretical Physics Seoul National University, Seoul 151-747 KOREA abstract We investigate the asymptotic symmetry algebra of (2+1)-dimensional higher spin, anti-de Sitter gravity. We use the formulation of the theory as a Chern-Simons gauge theory based on the higher spin algebra . Expanding the gauge connection around asymptotically anti-de Sitter spacetime, we specify consistent boundary conditions on the higher spin gauge fields. We then study residual gauge transformation, the corresponding surface terms and their Poisson bracket algebra. We find that the asymptotic symmetry algebra is a nonlinearly realized algebra with classical central charges. We discuss implications of our results to quantum gravity and to various situations in string theory. ## 1 Introduction Higher spin (HS) anti-de Sitter (AdS) gravity [1, 2, 3] is an interesting extension of AdS Einstein-Hilbert gravity, whose various properties turn out to be highly nontrivial compared to the latter. This HS theory is also expected to be relevant to a variety of situations in string theory. For example, in Maldacena’s anti-de Sitter / conformal field theory (AdS/CFT) correspondence [4], one would like to understand the holographic dual of CFT at weak ’t Hooft coupling regime. CFTs in this regime are known to possess infinitely many towers of HS currents [5]. By holography, this would mean that the putative closed string dual is at small string tension or large spacetime curvature, and must contain infinitely many towers of HS gauge fields in addition to gravity. One expects that HS AdS gravity is the simplest framework for studying the AdS/CFT correspondence in this regime. In this context, asymptotic symmetry was studied extensively for AdS (black hole) spacetime as the holographic dual of symmetries of CFT at strong ’t Hooft coupling regime. An interesting question is whether the symmetry persists as the correspondence is interpolated to small ’t Hooft coupling regime and, if so, how we may identify it as an asymptotic symmetry of the holographic dual, HS AdS gravity. (2+1)-dimensional AdS gravity is particularly interesting since the theory is simple yet possesses a rich asymptotic symmetry [6] and provides a concrete framework for studying the AdS/CFT correspondence. It was shown in [6] that the asymptotic symmetry algebra is the infinite-dimensional conformal algebra in two dimensions, viz. two copies of the Virasoro algebra , with central charge c=3ℓ2G , (1.1) where is the anti-de Sitter radius and is the Newton’s constant. Extension to (2+1)-dimensional AdS supergravities [7] was considered in [8, 9]. In this case, the asymptotic symmetry algebra is enlarged to appropriate extended superconformal algebras with quadratic nonlinearities in the currents [10, 11, 12, 13, 14, 15]. The purpose of this work is to report results on the asymptotic symmetry algebra of HS AdS gravity in (2+1) dimensional spacetime. The reason we focus on (2+1) dimensions is because the HS AdS gravity again takes a particularly simple form — it can be formulated as a Chern-Simons theory based on so-called infinite-dimensional HS algebra [16, 17]. This algebra contains as a subalgebra, and hence its Chern-Simons formulation automatically contains three-dimensional AdS gravity [7, 18]. After briefly reviewing the theory, we provide boundary conditions on the fields that are asymptotically invariant under an infinite-dimensional set of transformations that contains the conformal group at infinity, and whose generators are shown to close according to a classical nonlinear algebra. This algebra is an extension of the classical version of the algebras of [19]. Classical [20, 21] and quantum [22] nonlinear algebras have appeared previously, but unlike the classical algebra of [20, 21], the asymptotic algebra uncovered here has a nontrivial central charge set by the AdS radius scale measured in unit of the Newton’s constant. In particular, the central charge in the Virasoro subalgebra remains equal to that of the pure gravity (1.1). A more detailed presentation of our results as well as supersymmetric extensions will be presented in separate works [23]. ## 2 Higher Spin Anti-de Sitter Gravity We first recapitulate (2+1)-dimensional AdS Einstein-Hilbert gravity coupled to an infinite tower of HS gauge fields. It is well-known that the (2+1)-dimensional AdS Einstein-Hilbert gravity can be reformulated as a Chern-Simons gauge theory with gauge group . Following the pioneering work of Blencowe [16], an approach incorporating the HS gauge fields simply replaces the gauge gauge group by a suitable infinite-dimensional extension of it. In this work, we shall follow this approach. We should, however, emphasize that our analysis is strictly at classical level and there will make no difference between the Chern-Simons and the Einstein-Hilbert formulations. The action describing the HS extension is a difference of two Chern-Simons actions [16]: S[Γ,~Γ]=SCS[Γ]−SCS[~Γ] (2.1) where , are connections taking values in the algebra . This algebra is a ’higher spin algebra’ of the class introduced in [24]. Its properties needed for the present discussion are reviewed in Appendix A, to which we refer for notations and conventions. The connections contain all HS gauge field components as well as the metric and spin connection. In (2.1), is the Chern-Simons action, defined by SCS[Γ]=k4π∫MTr(Γ∧dΓ+23Γ∧Γ∧Γ) . (2.2) The 3-manifold is assumed to have topology where is a 2-manifold with at least one boundary on which we shall focus our analysis and which we refer as ’infinity’. The parameter is related to the (2+1)-dimensional Newton constant as , where is the AdS radius of curvature. It is well known that the HS theory (2.1) embeds the AdS gravity by truncation. Truncating the connections to the components and identifying them with the triad and the spin connection: Aai=ωai+1ℓeaiand~Aai=ωai−1ℓeai, (2.3) one finds that the action takes the form S[Γ,~Γ]=18πG∫Md3x(12eR+eℓ2+2LHS) , (2.4) the Einstein-Hilbert gravity with negative cosmological constant. The equations of motion read dea+ϵabcωb∧ec=0 dωa+12ϵabcωb∧ωc+12ℓ2ϵabceb∧ec=0. (2.5) The last term in (2.4) denotes contribution of higher spin fields. For instance, retaining the components as well and identifying them with Aabi=ωabi+1ℓeabi% and~Aabi=ωabi−1ℓeabi, (2.6) one easily find that the last term in (2.4) takes the form LHS=ϵabcea∧(ωbd∧ωce+ebd∧ece)ηde+eab∧(dωab+ϵdeaωd∧ωbe)] . (2.7) These are precisely the spin-3 field equations in the background of negative cosmological constant, expressed in the first-order formalism. We should, however, note that the HS theory (2.1) is not a smooth extrapolation of the AdS gravity - for example, integrating out the massless HS gauge fields does not lead to the AdS gravity in any direct and obvious way. ## 3 Asymptotic symmetries ### 3.1 boundary conditions and surface terms With Chern-Simons formulation of the (2+1)-dimensional HS AdS gravity at hand, we are ready to study global gauge symmetries at asymptotic infinity. We shall from now on focus on either chiral sector in (2.1). The analysis for the other chiral sector proceeds in exactly the same way. We shall also work in units of , unless otherwise stated. In the case of the (2+1)-dimensional AdS Einstein-Hilbert gravity, it was shown in [25, 8, 9] that the boundary conditions of [6] describing asymptotically AdS metrics is given in terms of the connections of the Chern-Simons formulation by A∼[−1r2πkL(ϕ,t)X11+rX22]dx+−[1rX122]dr (3.1) with the other chirality sector fulfilling a similar condition. Here, are chiral coordinates, and is an arbitrary function of and . We denoted the generators as . See appendix A.2 for our conventions and notations. It is convenient to eliminate the leading -dependence by performing the gauge transformation [25, 8, 9] Γi→Δi=Ω∂iΩ−1+ΩΓiΩ−1,~Γi→~Δi=Ω∂iΩ−1+Ω~ΓiΩ−1 , (3.2) where depends only on and is given by Ω=⎛⎝r1200r−12⎞⎠. (3.3) In the new connection , the only component that does not vanish asymptotically is , given by Δ∼X22−2πkL(ϕ,t)X11. (3.4) We see that the asymptotic boundary conditions are encoded entirely to the highest-weight component, spanned in the present case by the generator . We shall generalize these boundary conditions to HS gauge fields. In the conventions and notations of appendix A.3, we proceed by allowing non-zero components of the . Intuitively, it suffices to vary only the highest-weight components spanned by the generators whose indices are all , viz. . Thus, we require that, after gauge transformation (3.2), the connection behaves asymptotically as Δ∼X22−2πkLX11+122πkMX1111+% higher" . (3.5) Here, “higher” denotes terms involving the generators of higher spin () and , , … are arbitrary functions of and . The numerical factors are chosen to get correct normalization in the gauge functional (3.9) below. The boundary conditions are preserved by the residual gauge transformations δΔ=Λ′+[Δ,Λ] (3.6) that maintain the behavior at asymptotic infinity. Here, the prime denotes derivative with respect to . Recall that does not depend asymptotically on in order to preserve at asymptotic infinity, so the derivative with respect to is also the derivative with respect to . As shown in the next subsection, these asymptotic symmetries are spanned by the gauge parameter Λ=εX22+∑s≥2ηs+1X(0,2s)+λ . (3.7) Here, and are mutually independent arbitrary functions of . Also, involves only the generators with at least one index equal to (i.e., ) and is completely determined through the asymptotic conditions in terms of and . The lower order terms in take the form λ = (12ε′′−2πkεL+a(2,0))X11+(12ε′+a(1,1))X12 (3.8) +∑p≥1,q≥0,p+q=2k≥4A(p,q)X(p,q) , where and are determined by ’s (independent of ) and where the coefficients are also completely determined by and . Therefore, we see that the asymptotic symmetries are completely encoded to the independent functions and in the gauge parameter (3.7). We stress again that they are arbitrary functions of and thus arbitrary functions of at a given time . According to the general principle of gauge theory, these asymptotic symmetries are generated in the equal-time Poisson bracket by the spatial integral , where (i) are the Chern-Simons-Gauss constraints, equal to minus the factor of the temporal components of the connection in the action, and (ii) is a boundary term at asymptotic infinity chosen such that the variation of the generator contains only un-differentiated field variations under the given boundary conditions [26]. This is the requirement that has well-defined functional derivatives. Applying this procedure and using the fact that the generators (which are the only ones that appear in except for ) are paired in the scalar product with , one gets G[Λ]=∮dϕ (εL+ηM+⋯) , (3.9) up to bulk terms that vanish on-shell. Here, is abbreviation of and the ellipses denote contribution of HS terms involving for . The normalization factors in (3.5) were chosen chosen so as not to have factors in (3.9). In the next section, we shall show that the asymptotic symmetry generated by is a nonlinearly realized algebra with classical central charges. ### 3.2 general structure of symmetry transformations In the previous subsection, we argued that the gauge parameter generating the asymptotic symmetry algebra takes the form of (3.7) where is given by (3.8). Here, we prove this and further identify the general structure generating the sought-for HS symmetry algebra. The condition that , with given by (3.5) and given by (3.6), should take the same form as leads to conditions on the gauge parameter . This gauge parameter has a priori the general form (3.7) but with not yet known. We want to prove that the conditions on yield no restriction on and , while completely determine in terms of and . To that end, we first observe that reads asymptotically δΔ=−2πkδLX11+122πkδMX1111+More" , (3.10) where “More” denotes terms involving the generators of higher orders (i.e., ). Thus, involves only the generators (no index ). We must therefore require that all terms proportional to the generators with at least one index equal to should cancel in (3.6). To analyze this requirement, it is useful to have a notation that counts the number of indices and in the generators. Therefore, we rewrite as Δ=X22+∑k≥1N(2k,0)X(2k,0) (3.11) where the coefficients and are evidently proportional to and , respectively. We also rewrite as Λ = ∑k≥1ρ(0,2k)X(0,2k)+∑k≥1ρ(1,2k−1)X(1,2k−1)+∑k≥1ρ(2,2k−2)X(2,2k−2) + ∑k≥2ρ(3,2k−3)X(3,2k−3)+∑k≥2ρ(4,2k−4)X(4,2k−4) + ∑k≥3ρ(5,2k−5)X(5,2k−5)+⋯ The first term in this expansion is a rewriting of part, while is the sum of all the other terms. The idea now is to investigate consequences of the requirement that all terms proportional to the generators with at least one index equal to ought to cancel in (3.6) by examining (i) first the terms containing the generators with no index equal to 1 in (viz. all indices equal to 2), (ii) next those with only one index equal to 1, (iii) next those with only two indices equal to 1, etc. A simple calculation shows that the coefficient of in (no indices equal to 1) is given by c(0,2k)∼ρ′(0,2k)+ρ(1,2k−1)+f0(ρ(0,2i),N(2j,0)) , (3.12) As our goal is to explicitly indicate how the structure emerges, we presented the terms only schematically by dropping numerical factors. The term is an infinite sum of bilinears in the ’s and the ’s. The first contribution to comes from the bracket of with (one s replaced by one ), while the second contribution to arises from the bracket which is the only bracket in yielding generators with no index equal to . This bracket yields other generators as well, but they only contribute to the equations at the subsequent levels. Thus, we can regard the condition as determining the coefficients of in in terms of the ’s and the ’s of the connection . Note that even though is an infinite sum, there is only a finite number of terms involving a given because one must have for the bracket to yield a non vanishing term involving (). It is also easy to check that is explicitly given by c(0,2)=ρ′(0,2)−2ρ(1,1)+more" , (3.13) where numerical factors are reinstated and “more” denotes terms independent of . The condition then implies the expression (3.8) for the coefficient of in . The next step is to examine the coefficient of in (only one index equal to ). By a similar reasoning, one finds c(1,2k−1)∼ρ′(1,2k−1)+ρ(2,2k−2)+f1(ρ(0,2i),ρ(1,2l−1),N(2j,0)). (3.14) Therefore, the requirement determines the ’s in terms of the ’s and the ’s. Since the ’s are functions of the ’s that have been determined at the previous step, the ’s are determined in terms of the ’s. Note again that even though there is an infinite number of terms in because of , there is only a finite number of terms containing a given . One finds in particular that takes the schematic form c(1,1)=ρ′(1,1)−ρ(2,0)+N(2,0)ρ(0,2)+more" (3.15) so that the equation implies the expression (3.8) for the coefficient of in . The triangular pattern of the procedure is now evident and proceeds similarly at the next levels. One determines in this fashion recursively not only the coefficients , but also , , viz. the complete functional form of , in terms of the coefficients ’s, which remain unconstrained. The procedure terminates once one has imposed the conditions . Consequently, there is no condition imposed on . Rather, the coefficient determines the variation of the connection through . Notice that the procedure introduces nonlinearities through the ’s. We have thus established that the gauge parameter generating asymptotic symmetry takes precisely the form given in (3.7) and (3.8). ## 4 Nonlinear W∞ Symmetry Algebra As we explained above, the variations of the coefficients of the connection under the asymptotic symmetries are given by the equation δN(2k,0)=c(2k,0) , (4.1) where the are the unconstrained coefficients of the generator in . The recursive method explained in the previous section enables to determine these coefficients in terms of the independent parameters ’s parametrizing the asymptotic symmetry. We have recalled in the previous section that the coefficients of the connection are themselves the generators of the gauge transformations and hence of the asymptotic symmetries. In fact, in more compact notations, (3.9) has the form G[Λ]=∮dϕ(∑k≥1ρ(0,2k)N(2k,0)) (4.2) up to bulk terms that vanish on-shell. Again, for clarity, we kept the expression schematic regarding normalization of the generators. They will not affect foregoing argument and result. (We shall work out the normalization explicitly in the next section for the truncation to ). In general, the variation of any phase-space function under the gauge transformation with parameter is equal to where is classical Poisson bracket. Thus, in the present case, we have δN(2k,0)={N(2k,0)(ϕ),∫dϕ′(∑m≥1ρ(0,2m)(ϕ′)N(2m,0)(ϕ′))}PB . (4.3) This observation enables us to read the Poisson bracket commutators of the ’s in (4.2) among themselves from their variations (4.1) 111If one drops the bulk terms as can be done by fixing the gauge in the bulk, the Poisson bracket in question is the corresponding Dirac bracket. The form of the symmetry algebra does not depend on how one fixes the gauge because the generators are first class.: {N(2k,0)(ϕ),∫dϕ′(∑m≥1ρ(0,2m)(ϕ′)N(2m,0)(ϕ′))}PB=c(2k,0)(ϕ) , (4.4) where we have made it explicit for the angular dependence at a fixed time. By identifying the coefficients of the arbitrary parameter on both sides of this equation, one can read off the Poisson brackets {N(2k,0)(ϕ),N(2m,0)(ϕ′)}PB (4.5) and resulting algebra . In the rest of this section, we sketch the general procedure of extracting . To illustrate the procedure concretely, in the next section, we will work out the case corresponding to the truncation of . It is evident from the above analysis that the expression obtained for is closed, in the sense that it is expressed entirely in terms of the ’s. Terms that are generated from the Poisson bracket are in fact nonlinear polynomials in the ’s. Therefore, the resulting gauge algebra is not a Lie-type but a nonlinear realization thereof. Furthermore, by construction, the Jacobi identity holds for because it always holds for the Poisson brackets or the corresponding Dirac brackets after the bulk terms are gauge-fixed. We claim that the resulting algebra is a classical, nonlinearly realized with classical central charges. It is a classical algebra because we are using the Poisson-Dirac bracket of classical quantities and not the commutator of corresponding operators. It also has nontrivial classical central charges. To support this claim, it suffices to prove that 1. The algebra contains the Virasoro algebra at lowest degree , viz. the generators form a Virasoro algebra with central charge : {L(ϕ),L(ϕ′)}PB=−k4π∂3ϕδ(ϕ−ϕ′)+(L(ϕ)+L(ϕ′))∂ϕδ(ϕ−ϕ′) (4.6) 2. The generators have the conformal weight : {L(ϕ),Mj+1(ϕ′)}PB=(Mj+1(ϕ)+jMj+1(ϕ′))∂ϕδ(ϕ−ϕ′) . (4.7) To establish these statements, we pick up the terms proportional to in and . This is done by first determining the form of in the particular case when the only non-vanishing free parameter is . In that case, the solution is easily determined to be Λ=εX22+12ε′X12+(12ε′′−2πkεL)X11+ε∑j≥2N(2j,0)X(2j,0) , (4.8) since with this , the expression contains only generators . The coefficients of the generators in give furthermore the variations of and (). These are easily derived from (4.8) using [ X(2j,0),X12 ]=(2j)X(2j,0) . (4.9) δL = −k4πε′′′+(εL)′+ε′L (4.10) δN(2j,0) = (εN(2j,0))′+jε′N(2j,0)(j>1). (4.11) The relations (4.6) and (4.7) follow immediately from these. Explicit form of the Poisson-Dirac brackets of the resulting algebra and classical central charges therein are obtainable by straightforward though tedious computations. ## 5 Truncation to W3 Algebra To illustrate the above procedure explicitly, we truncate the theory by assuming that all the generators with are zero. i.e., we keep only and . This amounts to truncating the HS algebra by keeping only , and setting all the other generators to zero. As shown in appendix B, this is a unique, consistent truncation as the Jacobi identity remains to hold. The resulting algebra is , albeit not in a Chevalley-Serre basis 222The relation between the Chern-Simons formulation and the symmetric tensor formulation is implicit in [16] (using the vielbein/spin connection-like Vasiliev formulation of higher spins), and underlies the fact that [16] is a theory of higher spins coupled to gravity. Truncating this general relation to , one gets the ’metric + 3-index symmetric tensor’ formulation of the coupled ‘spin-2 + spin-3’ system. This was briefly recalled at the end of section 2.. We now show that and fulfill the classical nonlinear algebra with classical central charges. The condition that , with given by Δ∼X22−2πkLX11+122πkMX1111 (5.1) and given by (3.6), should take the same form as leads to conditions on the coefficients of the gauge parameter in the expansion Λ=aX11+bX12+εX22+mX1111+nX1112+pX1122+qX1222+ηX2222 (5.2) These conditions are explicitly that are determined as b=12ε′,a=12ε′′−2πkεL−22πkηM . (5.3) and that are determined as m = 124η′′′′−16⋅2πk(ηL)′′−14⋅2πk(η′L)′ −2πk(14η′′−2πkηL)L+122πkεM n = 124η′′′−16⋅2πk(ηL)′−14⋅2πkη′L p = 112η′′−13⋅2πkηL q = 14η′ . (5.4) One also obtains the gauge variations of and as δL=−k4πε′′′+(Lε)′+ε′L+2(ηM)′+η′M (5.5) and δM = 1288⋅k2πη′′′′′−172(ηL)′′′−148(η′L)′′ (5.6) −112((14η′′−2πkηL)L)′ −112(16η′′′−23⋅2πk(ηL)′−2πkη′L)L +(εM)′+2ε′M . Now, as already recalled above in the general case, the variation of any phase space function under the gauge transformation with parameter is equal to where is the classical Poisson bracket. One can use this to find the Poisson brackets of and from their variations, taking (3.9) into account. One finds explicitly that {L(ϕ),L(ϕ′)}PB=−k4π∂3ϕδ(ϕ−ϕ′)+(L(ϕ)+L(ϕ′))∂ϕδ(ϕ−ϕ′) {L(ϕ),M(ϕ′)}PB=(M(ϕ)+2M(ϕ′))∂ϕδ(ϕ−ϕ′) {M(ϕ),M(ϕ′)}PB=1288⋅k2π∂5ϕδ(ϕ−ϕ′)−5144(L(ϕ)+L(ϕ′))∂3ϕδ(ϕ−ϕ′) +148(L′′(ϕ)+L′′(ϕ′))∂ϕδ(ϕ−ϕ′) +19⋅2πk(L2(ϕ)+L2(ϕ′))∂ϕδ(ϕ−ϕ′) . This is the classical algebra studied previously in various different contexts [19], [27], [28]. Upon Fourier mode decomposition, the nonlinear algebra is given by [27] i[ Lm,Ln ] = (n−m)Lm+n+c12m(m2−1)δm+n,0 (5.7) i[ Lm,Vn ] = (2m−n)Vm+n i[ Vm,Vn ] = c360m(m2−1)(m2−4)δm+n,0+165c(m−n)Λm+n + (m−n)(115(m+n+2)(m+n+3)−16(m+2)(n+2))Lm+n where Λm=+∞∑n=−∞Lm−n Ln . (5.8) sums quadratic nonlinear terms. The classical central charges are given by . The quantum counterpart of this algebra was studied by Zamolodchikov [19] in a different context. Comparing it with the above classical algebra, one sees that the quantum effects enter to regularization of the quadratic nonlinear terms ’s and to the shift of the overall coefficient of the quadratic terms. This fits with the fact that classical limit takes . ## 6 Discussions In this paper, we have established that the asymptotic symmetries of the HS AdS gravity form a nonlinear algebra. A salient feature of the emerging classical algebra is that it is determined in a unique manner from the gauge algebra and the Chern-Simons parameter , without an extra free parameter. Moreover, this classical algebra has from the outset definite nontrivial central charges expressed solely in terms of the AdS radius and the Newton’s constant (and nothing else). In particular, the central charge appearing in the Virasoro subalgebra is just the AdS central charge (1.1). Truncation of the higher spin gauge algebra up to a finite spin is inconsistent when , since the corresponding generators do not form a subalgebra (except when ). The Poisson-Dirac commutators of with involve indeed generators of degree , which is strictly higher than and when , . One may try to ignore these higher degree terms but this brutal truncation yields commutators that do not fulfill the Jacobi identity (except for as we pointed out, see appendix A) and so this cannot be done (except for )333That the case works is somewhat unanticipated and should be considered exceptional from the point of view.. On the other hand, from the purely algebraic viewpoint, one might opt to start from the algebra with finite obtained from algebra of gauge invariance and take the limit to obtain a universal -algebra. However, these two approaches are completely different in spirit since in general the truncation of up to a finite spin does not yield the algebra for any finite . Nonlinearity of the Poisson-Dirac brackets or commutation relations (compared to the Lie-type algebra) is an important and distinguishing characteristic of the operator algebra for spin . However, in the usual large- limit, this nonlinearity is typically lost [29], [30], [31], [32]: the resulting algebra is usually linear (and also in some cases the classical central charge is absent). Our approach obtains the gauge algebra in a completely different way, and in particular does not rely on such a limiting procedure. We note that the nonlinearity of the algebra puts strong constraints through the Jacobi identities. Thus, the nonlinear algebra derived in this work, which is inherent to the -based HS extension of the (2+1)-dimensional AdS Einstein-Hilbert gravity – in the sense that it uniquely determined by it – has a rich and interesting structure. More detailed analysis of this algebraic structure, extensions to supersymmetry and inclusion of spin-1 currents will be reported elsewhere [23]. Related to this, it has been known previously that the linear version of the algebra is related to the first Hamiltonian structure of the KP hierarchy [33]. In our nonlinearly realized version of the algebra, we speculate the relation goes to the second Hamiltonian structure of the KP hierarchy, the structure proposed by Dickey [34] from generalizing the Gelfand-Dickey brackets [35] to pseudo-differential operators. It is an interesting question what these relations tell us about the spectrum of classical solutions of the HS AdS gravity. It is tempting to interpret that the presence of the algebra at infinity implies that the classical solutions of the (2+1)-dimensional HS AdS gravity are labeled by infinitely many conserved charges, among which mass and angular momentum are just the first two. If the interpretation is correct, we expect that these charges play a central role in understanding microstates responsible for the black hole entropy in the regime where the spacetime curvature is large or, in string theory context, the string scale is very low444One should also mention here the intriguing appearance of the linear algebra found in [37, 38] in the context of black holes and Hawking radiation.. A possible holographic dual in this regime was explored recently by Witten [36] for pure AdS gravity. There, an indication was found that two-dimensional CFT duals are the monster theory of Frenkel, Lepowsky and Meurman or discrete series extensions thereof. Once embedded to string theory, one expects this regime must includes (nearly) massless HS gauge fields in addition to the gravity. This brings in a host of intriguing questions: Are there HS extensions of the monster theory and, if so, what are they? Can the extension be related or interpreted physically to condensation of long strings? In the context of string theory, the massless HS gauge fields were interpreted to arise via a sort of inverse Higgs mechanism in the limit of vanishing string mass scale (viz. string tension) [39]. If so, the HS gauge fields would become massive at large but finite string mass scale [40]. In CFT dual, this would be reflected to anomalous violation of conservation laws of the HS currents. Nevertheless, the symmetry algebra discovered in this work would be an approximate symmetry of the CFT duals and should still be useful for understanding these theories. In addition to the weak ’ t Hooft coupling regime alluded in the Introduction, there is another situation in string theory where the result of this paper may be applicable. The near-horizon geometry of the small black strings carrying one or two charges is singular in Einstein-Hilbert gravity. One expects that, by the stretched horizon mechanism [41], string corrections resolve it to the (2+1)-dimensional AdS spacetime times a compact 7-dimensional manifold characterizing the black string horizon with residual chiral supersymmetries. [42]. A concrete suggestion like this was put forward for the ’stretched horizon’, near-horizon geometry of the macroscopic Type II and heterotic strings [43], [44], [45]. In both cases, the near-horizon geometry has curvature radius of order the string scale. So, not just the massless but also all HS string states are equally important for finite energy excitations. This suggests that (2+1)-dimensional HS AdS supergravity theories are appropriate frameworks. It would then be very interesting to identify the origin of the symmetry algebra as well as the classical central charges associated with HS currents from the macroscopic superstring viewpoint. On a more speculative side, our result may also find a potentially novel connection of the (2+1)-dimensional HS AdS gravity to higher-dimensional gravity. It has been known [46], [47], [48] that 4-dimensional self-dual gravity is equivalent to a large limit of 2-dimensional nonlinear sigma model with Wess-Zumino terms only. The self-dual sector has an infinite-dimensional symmetry algebra which includes the algebra. This hints that (2+1)-dimensional HS AdS gravity might be ’holographically dual’ to 4-dimensional self-dual gravity, providing a concrete example of heretofore unexplored gravity-gravity correspondence. Centered to all these issues, the most outstanding question posed by our work is: What are the black holes carrying hairs in HS AdS gravity? We are currently exploring these issues and intend to report progress elsewhere. ## Acknowledgement We thank Nima Arkani-Hamed, Juan Maldacena and Edward Witten for useful discussions. MH is grateful to the Institute for Advanced Study (Princeton) for hospitality during this work and to the Max-Planck-Institut für Gravitationphysik (Potsdam) where it was completed. SJR is grateful to the Max-Planck-Institut für Gravitationphysik (Potsdam) during this work and to the Institute for Advanced Study (Princeton) where it was completed. We both acknowledge support from the Alexander von Humboldt Foundation through a Humboldt Research Award (MH) and a Bessel Research Award (SJR). The work of MH is partially supported by IISN - Belgium (conventions 4.4511.06 and 4.4514.08), by the Belgian Federal Science Policy Office through the Interuniversity Attraction Pole P6/11 and by an ARC research grant 2010-2015. The work of SJR is partially supported in part by the National Research Foundation of Korea grants KRF-2005-084-C00003, KRF-2010-220-C00003, KOSEF-2009-008-0372, EU-FP Marie Curie Training Program (KICOS-2009-06318), and the U.S. Department of Energy grant DE-FG02-90ER40542. ## Appendix A Higher Spin Algebra ### a.1 Definition The higher spin algebra in (2+1)-dimensional spacetime is the direct sum of two chiral copies of : A=hs(1,1)L⊕hs(1,1)R (A.1) The infinite-dimensional algebra itself is defined as follows. Consider an auxiliary space of polynomials of even degree in two commuting spinors , . One defines the trace of a polynomial as Trf=2f(0)≡2f(ξ)∣∣ξ=0. (A.2) Here, the factor 2 is included to match the traces of matrices considered below. Then, the elements of the algebra are the elements of with no constant term, viz. traceless polynomials. To define the Lie bracket, one first considers the star-product defined by (f⋆g)(ξ)=exp[i(∂∂η1∂∂ζ2−∂∂η2∂∂ζ1)] f(η)g(ζ)∣∣η=ζ=ξ (A.3) The star-product is associative. Although non-commutative, the star-product is trace-commutative: (f⋆g)(0)=(g⋆f)(0), (A.4) viz. Tr(f⋆g)=Tr(g⋆f) (A.5) because and are polynomials of even degree. The Lie bracket in the algebra is just the -commutator (modulo a numerical factor chosen for convenience to be ): [f,g]≡12i(f⋆g−g⋆f)=sin(∂∂η1∂∂ζ2−∂∂η2∂∂ζ1)f(η)g(ζ)∣∣η=ζ=ξ. (A.6) It fulfills the Jacobi identity because the star-product is associative. The Lie algebra possesses a symmetric and invariant bilinear form denoted , defined by (f,g)≡Tr(f⋆g). (A.7) This bilinear form is symmetric because of the trace-commutativity (A.5) and invariant (f,[g,h])=([f,g],h) (A.8) because of the associativity of the star-product and (A.5) again. The invariant symmetric bilinear form is non-degenerate. ### a.2 sl(2,R) subalgebra The polynomials of degree 2 form a subalgebra isomorphic to . Taking as a basis of this subspace as X11=12(ξ1)2,X12=ξ1ξ2,X22=12(ξ2)2 , (A.9) one finds [X11,X12]=2X11,[X11,X22]=X12,[X12,X22]=2X22 . (A.10) One can thus identify the with the standard Chevalley-Serre generators as follows: , and . Moreover, traces of the products of ’s match with traces of the products of the corresponding matrices. The non-zero scalar products are (X12,X12)=2,(X11,X22)=−1,(X22,X11)=−1. (A.11) The subalgebra splits into a direct sum of representations of : hs(1,1)=⊕k≥1Dk , (A.12) where the spin representation corresponds to the homogeneous polynomials of degree . The trivial representation does not appear because we consider traceless polynomials. It is straightforward to verify that the subspaces and are orthogonal for and that the scalar product is non-degenerate on each . We emphasize that, as showed in the text, the representation yields asymptotically the generators of conformal spin . Notice the shift of the spin label by one unit. ### a.3 more commutation relations We list here the commutation relations involving and . A basis of the representation of , polynomials of order 4, may be taken to be X1111=14!(ξ1)4,X1112=13!(ξ1)3ξ2,X1122=14(ξ1)2(ξ2)2, X1222=13!ξ1(ξ2)3,X2222=14!(ξ2)4 . More generally, we define X(p,q)≡X1⋯1p2⋯2q=1p!1q!(ξ1)p(ξ2)q with p+q even (A.13) The vectors with form a basis of . We use the collective notation for the ’s with . The brackets of the ’s with the ’s are given by [X11,
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9580032229423523, "perplexity": 596.0583610433706}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-10/segments/1614178350706.6/warc/CC-MAIN-20210225012257-20210225042257-00156.warc.gz"}
https://www.deepdyve.com/lp/springer_journal/gold-nanoparticles-fabricated-by-pulsed-laser-ablation-in-NngqTJ0vlH
# Gold nanoparticles fabricated by pulsed laser ablation in supercritical CO2 Gold nanoparticles fabricated by pulsed laser ablation in supercritical CO2 Nanosecond pulsed laser ablation (PLA) of gold plate with an excitation wavelength of 532 nm was carried out in supercritical CO2 (scCO2) to fabricate gold nanoparticles. Surface morphology of the gold plate after irradiation and the crater depth after PLA were observed by scanning electron microscopy and laser scanning microscopy, while extinction spectra of gold nanoparticles collected in the glass slide was measured by UV–Vis spectrophotometer. The gold plate was ablated at various scCO2 densities and irradiation times at constant temperature of 40°C. The ablation was also conducted at atmospheric condition with air to evaluate the environmental dependence of ablation. Both surface morphology of the irradiated gold plate and crater depth formation were significantly affected by the changes in scCO2 density, the surrounding environment, and irradiation time. As expected, the increasing scCO2 density resulted in a deeper ablation crater, however, the deepest crater was obtained at a density of 0.63 g/cm3 or pressure of 10 MPa. Gold nanoparticles generated by PLA in scCO2 have been confirmed at the spectra band near 530 nm. http://www.deepdyve.com/assets/images/DeepDyve-Logo-lg.png Research on Chemical Intermediates Springer Journals # Gold nanoparticles fabricated by pulsed laser ablation in supercritical CO2 Research on Chemical Intermediates, Volume 37 (5) – Jan 29, 2011 8 pages /lp/springer_journal/gold-nanoparticles-fabricated-by-pulsed-laser-ablation-in-NngqTJ0vlH Publisher Springer Journals Subject Chemistry; Inorganic Chemistry ; Catalysis; Physical Chemistry ISSN 0922-6168 eISSN 1568-5675 D.O.I. 10.1007/s11164-011-0279-x Publisher site See Article on Publisher Site ### Abstract Nanosecond pulsed laser ablation (PLA) of gold plate with an excitation wavelength of 532 nm was carried out in supercritical CO2 (scCO2) to fabricate gold nanoparticles. Surface morphology of the gold plate after irradiation and the crater depth after PLA were observed by scanning electron microscopy and laser scanning microscopy, while extinction spectra of gold nanoparticles collected in the glass slide was measured by UV–Vis spectrophotometer. The gold plate was ablated at various scCO2 densities and irradiation times at constant temperature of 40°C. The ablation was also conducted at atmospheric condition with air to evaluate the environmental dependence of ablation. Both surface morphology of the irradiated gold plate and crater depth formation were significantly affected by the changes in scCO2 density, the surrounding environment, and irradiation time. As expected, the increasing scCO2 density resulted in a deeper ablation crater, however, the deepest crater was obtained at a density of 0.63 g/cm3 or pressure of 10 MPa. Gold nanoparticles generated by PLA in scCO2 have been confirmed at the spectra band near 530 nm. ### Journal Research on Chemical IntermediatesSpringer Journals Published: Jan 29, 2011 ## You’re reading a free preview. Subscribe to read the entire article. ### DeepDyve is your personal research library It’s your single place to instantly that matters to you. over 18 million articles from more than 15,000 peer-reviewed journals. All for just \$49/month ### Search Query the DeepDyve database, plus search all of PubMed and Google Scholar seamlessly ### Organize Save any article or search result from DeepDyve, PubMed, and Google Scholar... all in one place. ### Access Get unlimited, online access to over 18 million full-text articles from more than 15,000 scientific journals. ### Your journals are on DeepDyve Read from thousands of the leading scholarly journals from SpringerNature, Elsevier, Wiley-Blackwell, Oxford University Press and more. All the latest content is available, no embargo periods. DeepDyve DeepDyve ### Pro Price FREE \$49/month \$360/year Save searches from PubMed Create folders to Export folders, citations
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8245230317115784, "perplexity": 8350.026840640126}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-18/segments/1555578759182.92/warc/CC-MAIN-20190426033614-20190426055614-00177.warc.gz"}
https://portlandpress.com/biochemj/article/245/3/723/23552/Effects-of-Mg2-anions-and-cations-on-the-Ca2-Mg2
In a previous paper [Gould, East, Froud, McWhirter, Stefanova & Lee (1986) Biochem. J. 237, 217-227] we presented a kinetic model for the activity of the Ca2+ + Mg2+-activated ATPase of sarcoplasmic reticulum. Here we extend the model to account for the effects on ATPase activity of Mg2+, cations and anions. We find that Mg2+ concentrations in the millimolar range inhibit ATPase activity, which we attribute to competition between Mg2+ and MgATP for binding to the nucleotide-binding site on the E1 and E2 conformations of the ATPase and on the phosphorylated forms of the ATPase. Competition is also suggested between Mg2+ and MgADP for binding to the phosphorylated form of the ATPase. ATPase activity is increased by low concentrations of K+, Na+ and NH4+, but inhibited by higher concentrations. It is proposed that these effects follow from an increase in the rate of dephosphorylation but a decrease in the rate of the conformational transition E1′PCa2-E2′PCa2 with increasing cation concentration. Li+ and choline+ decrease ATPase activity. Anions also decrease ATPase activity, the effects of I- and SCN- being more marked than that of Cl-. These effects are attributed to binding at the nucleotide-binding site, with a decrease in binding affinity and an increase in ‘off’ rate constant for the nucleotide. This content is only available as a PDF.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8465520739555359, "perplexity": 4877.050563354128}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-43/segments/1570986660231.30/warc/CC-MAIN-20191015182235-20191015205735-00263.warc.gz"}
https://www.physicsforums.com/threads/find-all-roots-of-x-3-3x-2-10x-6.240450/
# Homework Help: Find all roots of x^3 + 3x^2 - 10x + 6 1. Jun 15, 2008 ### stat643 find all roots of x^3 + 3x^2 - 10x + 6 the solution: identify the easy root of x=1, find the remaining roots from (x-1)(x^2+4x) using quadratic formula. The only thing i dont understand here is how to factorize to (x-1)(x^2+4x)... namely the (x^2+4x) part. 2. Jun 15, 2008 ### rocomath factor out a common term in x^2+4x and you're pretty much done 3. Jun 15, 2008 ### stat643 but how did i get to x^2+4x in the first place?... the original equation was x^3 + 3x^2 - 10x + 6.. i merely copied the solution... so i find the easy root of 1.. then what? Last edited: Jun 15, 2008 4. Jun 15, 2008 ### rocomath use synthetic division 5. Jun 15, 2008 ### matt grime No, don't use synthetic division (just yet). Pause for a moment and think: is it plausible that x^2+4x is a factor? It isn't. Copying out the answer is never a good idea. 6. Jun 15, 2008 ### stat643 i just looked up synthetic devision on wikipedia and tried it but it didnt work 7. Jun 15, 2008 ### stat643 should i take out the common term x first? 8. Jun 15, 2008 ### arildno Then you should practice synthetic division once more! Further, ponder over matt grime's words: WHY should you be suspicious of that particular factorization? Hint: How could you ascertain whether the factorization is correct or false? 9. Jun 15, 2008 ### stat643 oh sorry i copied it wrong, it should be (x-1)(x^2+4x-6).. now expanding that get: x^3 + 4x^2 -6x -x^2 -4x + 6 = x^3 + 3x^2 - 10x + 6.. so yeh its right now.. though i still cant get the synthetic devision right (its new to me) i tried to learn it now from http://en.wikipedia.org/wiki/Synthetic_division though i keep getting 1,2,-12,18 can someone help show how i would use synthetic devision for the original polynomial ? Last edited: Jun 15, 2008 10. Jun 15, 2008 ### arildno Okay, we wish to find a second-order polynomial so that: $$(x-1)(ax^{2}+bx+c)=x^{3}+3x^{2}-10x+6$$ holds for all x. I.e, we must determine a,b and c!! Multiplying out the left-hand side, and organizing in powers of x, the lefthandside can be rewritten as: $$ax^{3}+(b-a)x^{2}+(c-b)x-c= x^{3}+3x^{2}-10x+6$$ NOw, the coefficients of each power must be equal on the right and left sides, yielding the system of equations: a=1 b-a=3 c-b=-10 -c=6 This yields: a=1 b=4 c=-6 11. Jun 15, 2008
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8364384174346924, "perplexity": 3414.3847004038453}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-39/segments/1537267156252.31/warc/CC-MAIN-20180919141825-20180919161825-00328.warc.gz"}
http://www.cfd-online.com/Forums/main/115443-viscosity-term-discetrization-momentum-equation.html
# viscosity term in discetrization momentum equation Register Blogs Members List Search Today's Posts Mark Forums Read March 31, 2013, 06:38 viscosity term in discetrization momentum equation #1 New Member   hans Join Date: Sep 2012 Posts: 7 Rep Power: 4 Hi all, I have a question regarding the discretization of the momentum equation for use in a simple solver. I'm having trouble in understanding where the viscosity term goes. I'm currently reading Versteeg 2007 , An introduction to CFD, and can't figure it out. At some point the momentum equation, including the body surface forces caused by viscosity is discretisized to: As I understand it, the viscosity term is not present here, coefficients only have a velocity and density dependence. I can't seem to locate a viscosity in the source term,S ,either. If find that on the web it is often pointed out that the discretization is similar to that of the general transport equation where the viscosity term can be handled equal to the diffusion term and the velocity term to the property term. In this case the diffusion term is preserved in the discretization, why than not (or at least lost to me) in the momentum equation? Can anyone explain what i'm missing or point me in a direction where i can find some answers?I would be very happy to find the complete derivation of the discretization of the momentum equation so i can see what's happening step by step. Kind regards, March 31, 2013, 06:51 #2 Senior Member Filippo Maria Denaro Join Date: Jul 2010 Posts: 1,588 Rep Power: 20 Quote: Originally Posted by hans-186 Hi all, I have a question regarding the discretization of the momentum equation for use in a simple solver. I'm having trouble in understanding where the viscosity term goes. I'm currently reading Versteeg 2007 , An introduction to CFD, and can't figure it out. At some point the momentum equation, including the body surface forces caused by viscosity is discretisized to: As I understand it, the viscosity term is not present here, coefficients only have a velocity and density dependence. I can't seem to locate a viscosity in the source term,S ,either. If find that on the web it is often pointed out that the discretization is similar to that of the general transport equation where the viscosity term can be handled equal to the diffusion term and the velocity term to the property term. In this case the diffusion term is preserved in the discretization, why than not (or at least lost to me) in the momentum equation? Can anyone explain what i'm missing or point me in a direction where i can find some answers?I would be very happy to find the complete derivation of the discretization of the momentum equation so i can see what's happening step by step. Kind regards, in § 6.3 it is explicitly stated that the coefficients contain combination of convective and diffusive terms.. March 31, 2013, 06:56 #3 New Member   hans Join Date: Sep 2012 Posts: 7 Rep Power: 4 FMDenaro, Thanks for your reply! I've seen this in chapter 6.3 .But I'm a bit confused in how to interpret this, the viscosity and diffusive terms are the same? I presume this is the diffusion of momentum from one cell to the other? Hoe to couple this momentum diffusion term to viscosity then? Last edited by hans-186; March 31, 2013 at 07:25. April 1, 2013, 12:17 #4 New Member   Aniket Sachdeva Join Date: Mar 2012 Posts: 22 Rep Power: 5 Viscosity by definition is the momentum diffusivity.. The rate at which momentum of one layer of the fluid is diffused to other layers is decided by the viscosity.. April 1, 2013, 13:34 #5 New Member   hans Join Date: Sep 2012 Posts: 7 Rep Power: 4 Yeah, I'm back on track . Got somewhat lost in the maze I guess. Thanks for your replies. Thread Tools Display Modes Linear Mode Posting Rules You may not post new threads You may not post replies You may not post attachments You may not edit your posts BB code is On Smilies are On [IMG] code is On HTML code is OffTrackbacks are On Pingbacks are On Refbacks are On Forum Rules Similar Threads Thread Thread Starter Forum Replies Last Post zwdi FLUENT 13 December 5, 2013 18:58 Cyp OpenFOAM Programming & Development 8 June 12, 2012 09:36 MPJ OpenFOAM 3 October 4, 2011 09:44 jannnesss CFX 0 January 8, 2010 20:53 Michael Main CFD Forum 1 June 25, 1999 10:20 All times are GMT -4. The time now is 18:15.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8177857995033264, "perplexity": 1032.2256672127464}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-27/segments/1435375102712.76/warc/CC-MAIN-20150627031822-00073-ip-10-179-60-89.ec2.internal.warc.gz"}
http://mathoverflow.net/questions/39002/optimizing-over-matrices-with-spectral-radius-1/39013
Optimizing over matrices with spectral radius <1? Suppose $F(x)$ is a convex objective function on $n\times n$ matrices, and I need to numerically optimize $F$ with the condition that $x$ has spectral radius less than $1$. This might be too hard, so an approximation would be needed. Has this problem been studied before? Motivation: Boltzmann machines are hard to evaluate when spectral radius of the weight matrix is large, especially if it's above $1$ so best fit to data subject to this constraint would give a useful model. Example: Let $X=\{1,-1\}^d$ and $\hat{X}$ some list of $\{1,-1\}$ $d$-tuples. Find $$\max_A \sum_{x\in \hat{X}} \mathbf{x}'A\mathbf{x} - |\hat{X}|\log \sum_{x\in X} \exp(\mathbf{x}'A\mathbf{x})$$ Where $A$ is symmetric real-valued $d\times d$ matrix with spectral radius < 1. This needs to be done in time polynomial in $d$ and linear in $|\hat{X}|$. When spectral radius is <1, belief propagation gives a reasonably accurate way to approximate gradient of this objective in $O(|\hat{X}|d^2)$ time - If the function is convex, can't you restrict trivially to spectral radius=1? –  Federico Poloni Sep 16 '10 at 19:21 Well, I want <1, but restricting to, say, 1/2 would be useful, how would I do that? –  Yaroslav Bulatov Sep 16 '10 at 20:45 By convexity, $f(\frac{a+b}2) \leq \frac{f(a)+f(b)}2$, therefore one among $f(a)$ and $f(b)$ is larger (or equal) than $f(\frac{a+b}2)$. So the supremum cannot occur on an inner point (i.e., one that you can write as the midpoint of two other points in the set). So you are can restrict wlog to the boundary of your (convex) set. –  Federico Poloni Sep 17 '10 at 2:04 If your matrices are symmetric, the set of matrices with spectral radius $\le 1$ is convex, and can be modelled using a linear matrix inequality (LMI), see e.g. page 147 in Lectures on Modern Convex Optimization by Ben-Tal and Nemirovski. If you wanted to minimize a convex objective that is also semidefinite-representable, you could in principle formulate and solve your problem as a semidefinite programming problem. However, maximizing a convex objective over a convex set is a much more difficult problem. - Good observation, I re-examined the underlying problem, and looks like it comes down to minimizing a convex objective (or maximizing concave negation, as it's more frequently presented) –  Yaroslav Bulatov Sep 17 '10 at 18:03 Thanks, that formula seems pretty useful. But I still don't know how to turn it into Semidefinite Programming since my objective is not linear...do you have a good reference for some examples of how that's done? –  Yaroslav Bulatov Sep 21 '10 at 20:59 Ben-Tan and Nemirovski is the bible for semidefinite modelling, so you should start looking there. –  F_G Sep 22 '10 at 10:03 This started as a comment, but it's too long. Is the objective function invariant under conjugation? Spectral radius is far from a convex function of matrices. If you take two non-negative matrices with 1's on the diagonal, one zero below the diagonal and positive above, the other vice versa, any convex combination has spectral radius bigger than 1. It's easy to make the spectral radius as large as you like. The set of characteristic polynomials for matrices of spectral radius 1 isn't convex either. For example, the average of $(x - .99)^2$ and $(x - .99i)^2$ has roots outside the unit circle. However, every conjugacy class in $GL(n,\mathbb C)$ has a representative that is upper triangular, and the upper triangular matrices of spectral radius 1 form a convex set. This may make it easier to find the optimum (depending what it is, which you didn't say). There are convex sets containing just conjugacy classes of spectral radius < 1 for various other kinds of matrices. Even if the function is not invariant under conjugation, it may help to break it up by conjugacy class: for each conjugacy class, find the optimum, then find the optimum among all conjugacy classes. - Does invariance under conjugation matter if my objective and constraints only involve real numbers? Also, I added the complete problem description –  Yaroslav Bulatov Sep 17 '10 at 4:42 No, with the added information, you're using $A$ as a quadratic form, and as FG noted, spectral radius is a convex function among those, and the potential function is not invariant by a significant group. But: as you've phrased it, $\hat(X)$ might contain exponentially many $d$-tuples, so to evaluate the function just once already could take exponential time. Are you asking about probabilistic algorithms? –  Bill Thurston Sep 17 '10 at 5:23 Actually $O(|\hat{X}|)$ time is acceptable because $\hat{X}$ represents the input to the optimizer which is small compared to $2^d$. A typical value would be $|\hat{X}|<10,000$, $d<1000$ –  Yaroslav Bulatov Sep 17 '10 at 8:03
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9196537137031555, "perplexity": 265.00962032291847}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-06/segments/1422122238694.20/warc/CC-MAIN-20150124175718-00222-ip-10-180-212-252.ec2.internal.warc.gz"}
https://en.wikipedia.org/wiki/Air_shower_(physics)
# Air shower (physics) Cosmic ray air shower created by a 1TeV proton hitting the atmosphere 20 km above the Earth. The shower was simulated using the AIRES package. Animated 3d models of this and other showers can be found on COSMUS. An air shower is an extensive (many kilometres wide) cascade of ionized particles and electromagnetic radiation produced in the atmosphere when a primary cosmic ray (i.e. one of extraterrestrial origin) enters the atmosphere. The term cascade means that the incident particle, which could be a proton, a nucleus, an electron, a photon, or (rarely) a positron, strikes an atom's nucleus in the air so as to produce many energetic hadrons. The unstable hadrons decay in the air speedily into other particles and electromagnetic radiation, which are part of the shower components. The secondary radiation rains down, including x-rays, muons, protons, antiprotons, alpha particles, pions, electrons, positrons, and neutrons. The dose from cosmic radiation is largely from muons, neutrons, and electrons, with a dose rate that varies in different parts of the world and based largely on the geomagnetic field, altitude, and solar cycle. The cosmic-radiation dose rate on airplanes is so high that, according to the United Nations UNSCEAR 2000 Report (see links at bottom), airline flight crew workers receive more dose on average than any other worker, including those in nuclear power plants. Airline crews receive more cosmic rays if they routinely work flight routes that take them close to the North or South pole at high altitudes, where this type of radiation is maximal. The air shower was discovered by Bruno Rossi in 1934. By observing the cosmic ray with the detectors placed apart from each other, Rossi recognized that many particles arrive simultaneously at the detectors.[1] This phenomenon is now called an air shower. ## Air shower formation Air shower formation in the atmosphere. First proton collides with a particle in the air creating pions, protons and neutrons. After the primary cosmic particle has collided with the air molecule, the main part of the first interactions are pions. Also kaons and baryons may be created. Pions and kaons are not stable, thus they may decay into other particles. The neutral pions ${\displaystyle \scriptstyle \pi ^{0}}$ decay into photons ${\displaystyle \scriptstyle \gamma }$ in a process ${\displaystyle \scriptstyle \pi ^{0}\rightarrow \gamma +\gamma }$. The photons produced form an electromagnetic cascade by creating more photons, protons, antiprotons, electrons and positrons.[2] The charged pions ${\displaystyle \scriptstyle \pi ^{\pm }}$ preferentially decay into muons and neutrinos in the processes ${\displaystyle \scriptstyle \pi ^{+}\rightarrow \mu ^{+}+\nu }$ and ${\displaystyle \scriptstyle \pi ^{-}\rightarrow \mu ^{-}+\nu }$. This is how the muons and neutrinos are produced in the air shower.[2] Also, kaon may be an origin of muons, which means the decay process is ${\displaystyle \scriptstyle K^{+/-}\rightarrow \mu ^{+/-}+\nu }$. In the other hand kaons can produce also pions via the decay mode ${\displaystyle \scriptstyle K^{+/-}\rightarrow \pi ^{+/-}+\pi ^{0}}$.[2] ## Detection The original particle arrives with high energy and hence a velocity near the speed of light, so the products of the collisions tend also to move generally in the same direction as the primary, while to some extent spreading sidewise. In addition, the secondary particles produce a widespread flash of light in forward direction due to the Cherenkov effect, as well as fluorescence light that is emitted isotropically from the excitation of nitrogen molecules. The particle cascade and the light produced in the atmosphere can be detected with surface detector arrays and optical telescopes. Surface detectors typically use Cherenkov detectors or Scintillation counters to detect the charged secondary particles at ground level. The telescopes used to measure the fluorescence and Cherenkov light use large mirrors to focus the light on PMT clusters. The longitudinal profile of the number of charged particles can be parameterized by the Gaisser–Hillas function.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 8, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6489344239234924, "perplexity": 1068.2963171691426}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-30/segments/1469257824757.8/warc/CC-MAIN-20160723071024-00320-ip-10-185-27-174.ec2.internal.warc.gz"}
https://www.mathsnetalevel.com/188
Go to content ## Summary/Background A geometric progression, also known as a geometric sequence, is a sequence of numbers where each term after the first is found by multiplying the previous one by a fixed non-zero number called the common ratio. For example, the sequence 2, 6, 18, 54, ... is a geometric progression with common ratio 3 and 10, 5, 2.5, 1.25, ... is a geometric sequence with common ratio 1/2. The sum of the terms of a geometric progression is known as a geometric series. \displaystyle \sum_{k=1}^n ar^{k-1} = ar^0 + ar^1 + ar^2 + ... + ar^{n-1} You can get a better display of the maths by downloading special TeX fonts from jsMath. In the meantime, we will do the best we can with the fonts you have, but it may not be pretty and some equations may not be rendered correctly. ## Glossary ### geometric A sequence where each term is obtained by multiplying the previous one by a constant. ### sequence a number pattern in a definite order following a certain rule ### series the sum of terms in a sequence ### union The union of two sets A and B is the set containing all the elements of A and B. Full Glossary List ## This question appears in the following syllabi: SyllabusModuleSectionTopic AP Calculus BC (USA)5Sequences and seriesGeometric series AQA A-Level (UK - Pre-2017)C2Sequences and seriesGeometric series AQA A2 Maths 2017Pure MathsSequences and SeriesGeometric Sequences AQA AS/A2 Maths 2017Pure MathsSequences and SeriesGeometric Sequences CBSE XI (India)AlgebraSequence and SeriesGeometric progression CCEA A-Level (NI)C2Sequences and seriesGeometric series CIE A-Level (UK)P1Sequences and seriesGeometric series D Sequences and SeriesD5 Geometric SequencesGeometric Sequences Edexcel A-Level (UK - Pre-2017)C2Sequences and seriesGeometric series Edexcel A2 Maths 2017Pure MathsSequences and SeriesGeometric Sequences Edexcel AS/A2 Maths 2017Pure MathsSequences and SeriesGeometric Sequences I.B. Higher Level1Sequences and seriesGeometric series I.B. Standard Level1Sequences and seriesGeometric series Methods (UK)M7Sequences and seriesGeometric series I.B. (MSSL)2Sequences and seriesGeometric series OCR A-Level (UK - Pre-2017)C2Sequences and seriesGeometric series OCR A2 Maths 2017Pure MathsArithmetic and Geometric SequencesGeometric Sequences OCR MEI A2 Maths 2017Pure MathsArithmetic and Geometric SequencesGeometric Sequences OCR-MEI A-Level (UK - Pre-2017)C2Sequences and seriesGeometric series Pre-Calculus (US)E3Sequences and seriesGeometric series Pre-U A-Level (UK)3Sequences and seriesGeometric series Scottish Advanced HighersM2Sequences and seriesGeometric series Scottish (Highers + Advanced)AM2Sequences and seriesGeometric series Universal (all site questions)SSequences and seriesGeometric series WJEC A-Level (Wales)C2Sequences and seriesGeometric series
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9598944783210754, "perplexity": 9136.765450392524}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-26/segments/1560627999000.76/warc/CC-MAIN-20190619143832-20190619165832-00145.warc.gz"}
http://mathhelpforum.com/differential-equations/222159-first-order-autonomous-equations.html
# Math Help - First order autonomous equations 1. ## First order autonomous equations Hello, I've been working more out of Christian Constanda's "Differential Equations: A Primer for Scientists and Engineers" ISBN 978-1-4614-7296-4. This is from section 3.3, which is on autonomous equations and their models. In my particular section of Diff-EQ, all homework is 'optional'. I'm having what is likely a stupid problem. I've been working later sections all day - second order homogenous differential equations - and have been fine. It's only now that I'm back to first order autonomous equations that I'm hitting a brick wall. This chapter is killing me every time I look at any of its sections! The exercise numbers are 1 and 15. The instructions are: "Find the critical points and the equilibrium solutions of the given equation and solve the equation with each of the prescribed initial conditions." Additionally we're supposed to sketch the graphs of the solutions and comment on the stability/instability of the equilibrium solutions as well as identify what model is governed by the problem (I.E. population with logistic growth, population with ac ritical threshold, chemical reactions, etc). The part in quotes is what I'm looking for help with. #1: y'=300y - 2y^2; y(0) = 50; y(0) = 100; and y(0) = 200 Correct answer: Critical points are at 0, 150. y(t)=0 (unstable, y(t) = 150 (asymptotically stable); y=(150y_0)/(y_0-(y_0-150)e^(-300t)). This models a population with logistic growth, tau=300 and Beta = 150. The given equation in the section for population with logistic growth is y'=((tau)-(alpha)*y)*y OR y'=(tau)*(1-y/(Beta))*y. Alpha is a constant > 0. My work is: dy/dt = 300y-2y^2 dy/(y^2-150y) = -2*dt 1/150 * LN((y-150)/y) = -2T + C (y-150)/y = e^(-300T+150C) y=(-150*e^(300t))/(e^300t(e^c-1)+1) At this point I realized I was swinging my shovel in the air trying to dig myself up out of a hole. Where'd I take a wrong turn/what rule did I break? Is anyone willing to work this problem through to solution? The second question I have is for good measure to make certain I get the rest of the section. It's exercise 15, I think I need to see it solved for y. y'=y^2+y-6; y(0) = -4; y(0) = -2; y(0) = 1; and y(0) = 3. -3, 2; y(t) = -3 (asymptotically stable), y(t) = 2 (unstable), and y=[-3(y_0 - 2) - 2(y_0+3)e^(-5t)] / (y_0 - 2 - (y_0 + 3) e^(-5t)]. It does not, by the book's answer, model any specific system. Thank you very much for your time! 2. ## Re: First order autonomous equations Hey AnotherGeek. This line: dy/(y^2-150y) = -2*dt looks ok but the next one doesn't. You should try completing the square and then solving the integral. It will be in terms of Integral 1/[(y-75)^2 - b] for some non-zero b (Complete the square to get b). (Also look up arctanh function).
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8755000829696655, "perplexity": 1103.2625272690025}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-42/segments/1413507445886.27/warc/CC-MAIN-20141017005725-00288-ip-10-16-133-185.ec2.internal.warc.gz"}
http://math.stackexchange.com/questions/117441/the-compactness-of-the-unit-sphere-in-finite-dimensional-normed-vector-space
The compactness of the unit sphere in finite dimensional normed vector space We define $(\mathbb{R}^m, \|.\|)$ to be a finite dimensional normed vector space with $\|.\|$ is defined to be any norm in $\mathbb{R^m}$. Let $S = \lbrace x \in \mathbb{R}^m: \| x\| = 1 \rbrace.$ Prove that $S$ is compact in $(\mathbb{R}^m, \|.\|).$ - have you heard of Heine-Borel? –  Holdsworth88 Mar 7 '12 at 8:16 @Sulaiman: Since you are happy with the answers for this questions, I suggest you accept one of them by ticking it. It makes this site much more organised. While I'm at it, I also suggest to you to accept the (good) answers to your other questions. –  Michalis Mar 7 '12 at 19:09 @Michalis Thank you but I already did. In fact I ticked up both answers if this is not a problem. –  Zizo Mar 7 '12 at 19:32 @Sulaiman: What you did is upvote. For the OP there is also another option, that marks a question as "answered". It is the tick right below the up/downvote buttons. It is important that you mark answered questions, as the users of this site will be able to concentrate on the unanswered ones. –  Michalis Mar 7 '12 at 19:38 Thank you @Michalis! sure! done!! –  Zizo Mar 7 '12 at 19:51 You can use induction on $m$ and properties of $\mathbb R$ to show compacity using sequential compactness, which means the same thing for metric spaces. Now consider the norm induced on the space $\mathbb R \cong \mathbb R \times \{ 0 \} \times \dots \times \{ 0 \}$ viewed as a sub-metric-space of $\mathbb R^m$, and also consider the subsequence $^1 x_n$ induced by putting all the other components but the first equal to $0$. Therefore the first component is a sequence of real numbers. Since in the reals, every metric is equivalent to the absolute value metric in the following sense $$\forall (\mathbb R,d), \quad \exists c_1, c_2 > 0 \quad s.t. \quad \forall x,y \in \mathbb R, \quad c_1 d(x,y) \le |x-y| \le c_2 d(x,y).$$ One can deduce that the Bolzano-Weierstrass theorem also holds if we replace $| \cdot |$ by the induced metric from the norm in $\mathbb R^m$. Since the sequence $x_n$ is bounded, the sequence $^1x_n$ is also bounded in $\mathbb R$. Therefore there exists a subsequence of the sequence $x_n$ such that the first component converges. Repeat this procedure with the other components $^kx_n$ with $1 \le k \le n$, and you will get a subsequence that converges component by component, hence converges. This gives you for every sequence an element $x$ and a subsequence for which $x_n \to x$. Since $\| x_n \| = 1$ for every $n$, clearly $\|x \| = 1$, so that your subsequence converges in $S$ and we are done. That is one way to do it ; if you have seen theorems in class that might help, perhaps they might make this less complicated. Hope that helps, - Thanks, this answered my question. Nice work. –  Zizo Mar 7 '12 at 12:50 Is there any reference for this proof? –  Zizo Mar 12 '12 at 3:30 I just wrote it ; so I guess you could call me the reference. –  Patrick Da Silva Mar 12 '12 at 15:10 Use the Bolzano-Weierstrass theorem: Since all the norms on $\mathbb{R}^m$ are equivalent, your subset will be closed and bounded in the euclidian norm $||\cdot||_2$, and hence compact. Here is an exercise I found, that shows that all norms on $\mathbb{R}^m$ are equivalent: http://math.bu.edu/people/paul/771/equivalent_norms.pdf - The compacity depends on the metric space, thus on the metric ; your way is one way to go, but it's not complete yet ; you need to show that compacity is a property invariant by equivalent metrics induced by norms. –  Patrick Da Silva Mar 7 '12 at 8:56 The equivalence essentially relies on the fact that equivalent metrics induce open balls that can be included in one another. –  Patrick Da Silva Mar 7 '12 at 9:17 @RagibZaman: This is not generally true, it depends on your base field. It is true if you have a complete valued field like $\mathbb{R},\mathbb{C}$ or $\mathbb{Q}_p$, but fails for example if you look at vector spaces over $\mathbb{Q}$ (e.g. number fields). –  Michalis Mar 9 '12 at 11:21 @Patrick Da Silva: You are right, proving that the balls are included in each other you can show that two equivalent norms (with the "inequality"-definition) induce the same topology, now I understand your remark. –  Michalis Mar 9 '12 at 11:25 @Michalis Sorry, I should have remembered that! Every normed vector space I've been studying lately has been over $\mathbb{R}$ or $\mathbb{C}$. –  Ragib Zaman Mar 9 '12 at 13:31
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9623887538909912, "perplexity": 270.017729502257}, "config": {"markdown_headings": false, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2013-48/segments/1387345759442/warc/CC-MAIN-20131218054919-00083-ip-10-33-133-15.ec2.internal.warc.gz"}
http://mathhelpforum.com/trigonometry/469-finding-length-arc-print.html
# Finding the length of an arc • June 19th 2005, 06:18 AM Justardms Finding the length of an arc :) Hi, just wanted to make sure that I am doing this correctly? Can someone please help me with Finding the length of an arc if the radius is 10" and the central angle is 45 deg. I need to find the answer in terms of pie. Thanks! Also, I need to find a positive co-terminal angle for 120 deg. and one for pie/5 and a negative co-terminal angle for 420 deg. and one for 3 pie/5 can someone verify what the complement of angle of 20 deg is. and the supplement of an angle of 3pie / 5 How do I change a the following into decimal form? 23 deg. 30' 15" THANK YOU VERY MUCH • June 19th 2005, 10:10 AM MathGuru arc length give radius and angle Welcom Justardms, Just a note of advice, try to keep questions to a minimum of 1 or 2 per post as you will get better results. In keeping with my advice I will answer your first question only;) To find the arc length you can use the equation $s=r\theta$ where s = arc length, r = radius, and $\theta$ = angle in radians so $s = 10"(45*\frac{\pi}{180})$ • June 19th 2005, 11:49 AM Justardms Is this correct? So is the answer 10pie/4 ? First I have to convert deg. in to rads? correct? which is pie / 4 and then mutiple that by 10? correct? So the Arc lenght is 10 pie / 4? Or would you write 10 times pie / 4? Does it matter!?!?!?! THANKS and thanks for the tip about how to submit questions! • June 19th 2005, 11:51 AM Justardms Since it is 10 inches * pie / 4, I am just not sure how I would write that? Thanks again • June 19th 2005, 03:28 PM Math Help pi or 3.14 you can either answer with $\frac{10\pi}{4}inches = \frac{5\pi}{2}inches$ or substitue 3.14 for pi and multiply it out to get the answer in inches.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 4, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8272402882575989, "perplexity": 941.5829893750247}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-11/segments/1424936464809.62/warc/CC-MAIN-20150226074104-00269-ip-10-28-5-156.ec2.internal.warc.gz"}
https://coal.askdefine.com/
# Dictionary Definition coal ### Noun 1 fossil fuel consisting of carbonized vegetable matter deposited in the Carboniferous period 2 a hot glowing or smouldering fragment of wood or coal left from a fire [syn: ember] ### Verb 1 burn to charcoal; "Without a drenching rain, the forest fire will char everything" [syn: char] 2 supply with coal 3 take in coal; "The big ship coaled" # User Contributed Dictionary ## English col. ### Noun 1. A black rock formed from prehistoric plant remains, composed largely of carbon and burned as a fuel. 2. A piece of coal used for burning. Note that in British English the first of the following examples would usually be used, whereas in American English the latter would. Put some coals on the fire. Put some coal on the fire. 3. A type of coal, such as bituminous, anthracite, or lignite, and grades and varieties thereof. 4. A smouldering piece of material. Just as the camp-fire died down to just coals, with no flames to burn the marshmallows, someone dumped a whole load of wood on, so I gave up and went to bed. #### Translations uncountable: carbon rock • Czech: uhlí • Kurdish: Sorani: خه‌ڵوزی به‌رد • Spanish: carbón countable: carbon rock • Spanish: carbón smouldering material • Spanish: brasa ### Verb 1. To take in coal; as, the steamer coaled at Southampton. 2. To be converted to charcoal. Quotations • 1957: As a result, particles of wood and twigs insufficiently coaled are frequently found at the bottom of such pits. — H.R. Schubert, History of the British Iron and Steel Industry, p. 18. # Extensive Definition Coal is a fossil fuel formed in ecosystems where plant remains were preserved by water and mud from oxidization and biodegradation, thus sequestering atmospheric carbon. Coal is a readily combustible black or brownish-black rock. It is a sedimentary rock, but the harder forms, such as anthracite coal, can be regarded as metamorphic rocks because of later exposure to elevated temperature and pressure. It is composed primarily of carbon along with assorted other elements, including sulfur. It is the largest single source of fuel for the generation of electricity world-wide, as well as the largest world-wide source of carbon dioxide emissions, which, according to the IPCC are responsible for causing climate change and global warming. In terms of carbon dioxide emissions, coal is slightly ahead of petroleum and about double that of natural gas. Coal is extracted from the ground by coal mining, either underground mining or open pit mining (surface mining). ## Types of coal As geological processes apply pressure to dead matter over time, under suitable conditions, it is transformed successively into • Peat, considered to be a precursor of coal. It has industrial importance as a fuel in some countries, for example, Ireland and Finland. • Lignite, also referred to as brown coal, is the lowest rank of coal and used almost exclusively as fuel for steam-electric power generation. Jet is a compact form of lignite that is sometimes polished and has been used as an ornamental stone since the Iron Age. • Sub-bituminous coal, whose properties range from those of lignite to those of bituminous coal and are used primarily as fuel for steam-electric power generation. • Bituminous coal, a dense coal, usually black, sometimes dark brown, often with well-defined bands of bright and dull material, used primarily as fuel in steam-electric power generation, with substantial quantities also used for heat and power applications in manufacturing and to make coke. • Anthracite, the highest rank; a harder, glossy, black coal used primarily for residential and commercial space heating. • Graphite, technically the highest rank, but difficult to ignite and is not so commonly used as fuel: it is mostly used in pencils and, when powdered, as a lubricant. The classification of coal is generally based on the content of volatiles. However, the exact classification varies between countries. According to the German classification, coal is classified as follows: The middle six grades in the table represent a progressive transition from the English-language sub-bituminous to bituminous coal, while the last class is an approximate equivalent to anthracite, but more inclusive (the U.S. anthracite has < 8% volatiles). ## Early use China Coal Information Institute reports the Chinese mined coalstone for fuel 10,000 years ago at the time of the New Stone Age, or Neolithic Era. "People in Shanxi, now the largest coal production base, have been burning coal as fuel since then." Outcrop coal was used in Britain during the Bronze Age (2000-3000 years BC), where it has been detected as forming part of the composition of funeral pyres. It was also commonly used in the early period of the Roman occupation: Evidence of trade in coal (dated to about AD 200) has been found at the inland port of Heronbridge, near Chester, and in the Fenlands of East Anglia, where coal from the Midlands was transported via the Car Dyke for use in drying grain. Coal cinders have been found in the hearths of villas and military forts, particularly in Northumberland, dated to around AD 400. In the west of England contemporary writers described the wonder of a permanent brazier of coal on the altar of Minerva at Aquae Sulis (modern day Bath) although in fact easily-accessible surface coal from what is now the Somerset coalfield was in common use in quite lowly dwellings locally. However, there is no evidence that the product was of great importance in Britain before the High Middle Ages, after about AD 1000. Mineral coal came to be referred to as "seacoal," probably because it came to many places in eastern England, including London, by sea. This is accepted as the more likely explanation for the name than that it was found on beaches, having fallen from the exposed coal seams above or washed out of underwater coal seam outcrops. These easily accessible sources had largely become exhausted (or could not meet the growing demand) by the 13th century, when underground mining from shafts or adits was developed. Approximately 40% of the world electricity production uses coal. The total known deposits recoverable by current technologies, including highly polluting, low energy content types of coal (i.e., lignite, bituminous), might be sufficient for 300 years' use at current consumption levels, although maximal production could be reached within decades (see World Coal Reserves, below). A more energy-efficient way of using coal for electricity production would be via solid-oxide fuel cells or molten-carbonate fuel cells (or any oxygen ion transport based fuel cells that do not discriminate between fuels, as long as they consume oxygen), which would be able to get 60%–85% combined efficiency (direct electricity + waste heat steam turbine). Currently these fuel cell technologies can only process gaseous fuels, and they are also sensitive to sulfur poisoning, issues which would first have to be worked out before large scale commercial success is possible with coal. As far as gaseous fuels go, one idea is pulverized coal in a gas carrier, such as nitrogen. Another option is coal gasification with water, which may lower fuel cell voltage by introducing oxygen to the fuel side of the electrolyte, but may also greatly simplify carbon sequestration. ### Coking and use of coke Coke is a solid carbonaceous residue derived from low-ash, low-sulfur bituminous coal from which the volatile constituents are driven off by baking in an oven without oxygen at temperatures as high as 1,000 °C (1,832 °F) so that the fixed carbon and residual ash are fused together. Metallurgic coke is used as a fuel and as a reducing agent in smelting iron ore in a blast furnace. Coke from coal is grey, hard, and porous and has a heating value of 24.8 million Btu/ton (29.6 MJ/kg). Some cokemaking processes produce valuable byproducts that include coal tar, ammonia, light oils, and "coal gas". Petroleum coke is the solid residue obtained in oil refining, which resembles coke but contains too many impurities to be useful in metallurgical applications. #### Gasification seealso Underground Coal Gasification High prices of oil and natural gas are leading to increased interest in "BTU Conversion" technologies such as gasification, methanation and liquefaction. Coal gasification breaks down the coal into smaller molecular weight molecules, usually by subjecting it to high temperature and pressure, using steam and measured amounts of oxygen. This leads to the production of syngas, a mixture mainly consisting of carbon monoxide (CO) and hydrogen (H2). In the past, coal was converted to make coal gas, which was piped to customers to burn for illumination, heating, and cooking. At present, the safer natural gas is used instead. South Africa still uses gasification of coal for much of its petrochemical needs. The Synthetic Fuels Corporation was a U.S. government-funded corporation established in 1980 to create a market for alternatives to imported fossil fuels (such as coal gasification). The corporation was discontinued in 1985. Gasification is also a possibility for future energy use, as the produced syngas can be cleaned-up relatively easily leading to cleaner burning than burning coal directly (the conventional way). The cleanliness of the cleaned-up syngas is comparable to natural gas enabling to burn it in a more efficient gas turbine rather than in a boiler used to drive a steam turbine. Syngas produced by gasification can be CO-shifted meaning that the combustible CO in the syngas is transferred into carbon dioxide (CO2) using water as a reactant. The CO-shift reaction also produces an amount of combustible hydrogen (H2) equal to the amount of CO converted into CO2. The CO2 concentrations (or rather CO2 partial pressures) obtained by using coal gasification followed by a CO-shift reaction are much higher than in case of direct combustion of coal in air (which is mostly nitrogen). These higher concentrations of carbon dioxide make carbon capture and storage much more economical than it otherwise would be. #### Liquefaction - Coal-To-Liquids (CTL) Coals can also be converted into liquid fuels like gasoline or diesel by several different processes. The Fischer-Tropsch process of indirect synthesis of liquid hydrocarbons was used in Nazi Germany for many years and is today used by Sasol in South Africa. Coal would be gasified to make syngas (a balanced purified mixture of CO and H2 gas) and the syngas condensed using Fischer-Tropsch catalysts to make light hydrocarbons which are further processed into gasoline and diesel. Syngas can also be converted to methanol, which can be used as a fuel, fuel additive, or further processed into gasoline via the Mobil M-gas process. A direct liquefaction process Bergius process (liquefaction by hydrogenation) is also available but has not been used outside Germany, where such processes were operated both during World War I and World War II. SASOL in South Africa has experimented with direct hydrogenation. Several other direct liquefaction processes have been developed, among these being the SRC-I and SRC-II (Solvent Refined Coal) processes developed by Gulf Oil and implemented as pilot plants in the United States in the 1960s and 1970s. Another direct hydrogenation process was explored by the NUS Corporation in 1976 and patented by Wilburn C. Schroeder. The process involved dried, pulverized coal mixed with roughly 1wt% molybdenum catalysts. Hydrogenation occurred by use of high temperature and pressure synthesis gas produced in a separate gasifier. The process ultimately yielded a synthetic crude product, Naphtha, a limited amount of C3/C4 gas, light-medium weight liquids (C5-C10) suitable for use as fuels, small amounts of NH3 and significant amounts of CO2. Yet another process to manufacture liquid hydrocarbons from coal is low temperature carbonization (LTC). Coal is coked at temperatures between 450 and 700°C compared to 800 to 1000°C for metallurgical coke. These temperatures optimize the production of coal tars richer in lighter hydrocarbons than normal coal tar. The coal tar is then further processed into fuels. The Karrick process was developed by Lewis C. Karrick, an oil shale technologist at the U.S. Bureau of Mines in the 1920s. All of these liquid fuel production methods release carbon dioxide (CO2) in the conversion process, far more than is released in the extraction and refinement of liquid fuel production from petroleum. If these methods were adopted to replace declining petroleum supplies, carbon dioxide emissions would be greatly increased on a global scale. For future liquefaction projects, Carbon dioxide sequestration is proposed to avoid releasing it into the atmosphere, though no pilot projects have confirmed the feasibility of this approach on a wide scale. As CO2 is one of the process streams, sequestration is easier than from flue gases produced in combustion of coal with air, where CO2 is diluted by nitrogen and other gases. Sequestration will, however, add to the cost. The reaction of coal and water using high temperature heat from a nuclear reactor offers promise of liquid transport fuels that could prove carbon-neutral compared to petroleum use. The development of a reliable nuclear reactor that could provide 900 to 1000 deg C process heat, such as the pebble bed reactor, would be necessary. Coal liquefaction is one of the backstop technologies that could potentially limit escalation of oil prices and mitigate the effects of transportation energy shortage that some authors have suggested could occur under peak oil. This is contingent on liquefaction production capacity becoming large enough to satiate the very large and growing demand for petroleum. Estimates of the cost of producing liquid fuels from coal suggest that domestic U.S. production of fuel from coal becomes cost-competitive with oil priced at around 35 USD per barrel, (break-even cost). This price, while above historical averages, is well below current oil prices. This makes coal a viable financial alternative to oil for the time being, although current production is small. Among commercially mature technologies, advantage for indirect coal liquefaction over direct coal liquefaction are reported by Williams and Larson (2003). Estimates are reported for sites in China where break-even cost for coal liquefaction may be in the range between 25 to 35 USD/barrel of oil.' Intensive research and project developments have been implemented from 2001. The World CTL Award is granted to personalities having brought eminent contribution to the understanding and development of Coal liquefaction. The 2008 presentation ceremony took place at the World CTL 2008 Conference (3 & 4 April, 2008). ### Coal as a traded commodity The price of coal has gone up from around $30 per short ton in 2000 to around$130 per short ton in 2008. In North America, a Central Appalachian coal futures contract is currently traded on the New York Mercantile Exchange (trading symbol QL). The trading unit is 1,550 short tons per contract, and is quoted in U.S. dollars and cents per ton. Since coal is the principal fuel for generating electricity in the United States, the futures contract provides coal producers and the electric power industry an important tool for hedging and risk management. In addition to the NYMEX contract, the IntercontinentalExchange (ICE) has European (Rotterdam) and South African (Richards Bay) coal futures available for trading. The trading unit for these contracts is 5,000 tonnes, and are also quoted in U.S. dollars and cents per tonne. ### Cultural usage Coal is the official state mineral of Kentucky and the official state rock of Utah. Both U.S. states have a historic link to coal mining. Some cultures uphold that children who misbehave will receive coal from Santa Claus for Christmas in their stockings instead of presents. It is also customary and lucky in Scotland to give coal as a gift on New Year's Day. It happens as part of First-Footing and represents warmth for the year to come. ## Environmental effects There are a number of adverse environmental effects of coal mining and burning. These effects include: • release of carbon dioxide and methane, both of which are greenhouse gases, which are causing climate change and global warming according to the IPCC. Coal is the largest contributor to the human-made increase of CO2 in the air. • waste products including uranium, thorium, and other heavy metals • acid rain • interference with groundwater and water table levels • impact of water use on flows of rivers and consequential impact on other land-uses • dust nuisance • subsidence above tunnels, sometimes damaging infrastructure • rendering land unfit for other uses. • coal-fired power plants without effective fly ash capture are one of the largest sources of human-caused background radiation exposure. ## Energy density The energy density of coal, i.e. its heating value, is roughly 24 megajoules per kilogram. The energy density of coal can also be expressed in kilowatt-hours for some unit of mass, the units that electricity is most commonly sold in, to estimate how much coal is required to power electrical appliances. The energy density of coal is 6.67 kW·h/kg and the typical thermodynamic efficiency of coal power plants is about 30%. Of the 6.67 kW·h of energy per kilogram of coal, about 30% of that can successfully be turned into electricity—the rest is waste heat. Coal power plants obtain approximately 2.0 kW·h per kg of burned coal. As an example, running one 100 watt computer for one year requires 876 kW·h (100 W × 24 h/day × 365 = 876000 W·h = 876 kW·h). Converting this power usage into physical coal consumption: \frac = 438 \ \mathrm = 966 \ \mathrm It takes 438 kg (966 pounds) of coal to power a computer for one full year. One should also take into account transmission and distribution losses caused by resistance and heating in the power lines, which is in the order of 5–10%, depending on distance from the power station and other factors. ## Relative carbon cost Because coal is at least 50% carbon (by mass), then 1 kg of coal contains at least 0.5 kg of carbon, which is \frac = \frac \ \mathrm where 1 mol is equal to NA (Avogadro Number) particles. This combines with oxygen in the atmosphere during combustion, producing carbon dioxide, with an atomic weight of (12 + 16 × 2 = mass(CO2) = 44 kg/kmol), so kmol of CO2 is produced from the kmol present in every kilogram of coal, which once trapped in CO2 weighs approximately \frac \ \mathrm \cdot \frac = \frac \ \mathrm \approx 1.83 \ \mathrm. This can be used to put a carbon-cost of energy on the use of coal power. Since the useful energy output of coal is about 30% of the 6.67 kW·h/kg(coal), we can say about 2 kW·h/kg(coal) of energy is produced. Since 1 kg coal roughly translates as 1.83 kg of CO2, we can say that using electricity from coal produces CO2 at a rate of about 0.915 kg/(kW·h), or about 0.254 kg/MJ. This estimate compares favourably with the U.S. Energy Information Agency's 1999 report on CO2 emissions for energy generation, which quotes a specific emission rate of 950 g CO2/(kW·h). By comparison, generation from oil in the U.S. was 890 g CO2/(kW·h), while natural gas was 600 g CO2/(kW·h). Estimates for specific emission from nuclear power, hydro, and wind energy vary, but are about 100 times lower. See environmental effects of nuclear power for estimates. ## Coal fires There are hundreds of coal fires burning around the world. Those burning underground can be difficult to locate and many cannot be extinguished. Fires can cause the ground above to subside, their combustion gases are dangerous to life, and breaking out to the surface can initiate surface wildfires. Coal seams can be set on fire by spontaneous combustion or contact with a mine fire or surface fire. A grass fire in a coal area can set dozens of coal seams on fire. Coal fires in China burn 109 million tonnes of coal a year, emitting 360 million metric tons of CO2. This contradicts the ratio of 1:1.83 given earlier, but it amounts to 2-3% of the annual worldwide production of CO2 from fossil fuels, or as much as emitted from all of the cars and light trucks in the United States. In Centralia, Pennsylvania (a borough located in the Coal Region of the United States) an exposed vein of coal ignited in 1962 due to a trash fire in the borough landfill, located in an abandoned anthracite strip mine pit. Attempts to extinguish the fire were unsuccessful, and it continues to burn underground to this day. The Australian Burning Mountain was originally believed to be a volcano, but the smoke and ash comes from a coal fire which may have been burning for over 5,500 years. At Kuh i Malik in Yagnob Valley, Tajikistan, coal deposits have been burning for thousands of years, creating vast underground labyrinths full of unique minerals, some of them very beautiful. Local people once used this method to mine ammoniac. This place has been well-known since the time of Herodotus, but European geographers mis-interpreted the Ancient Greek descriptions as the evidence of active volcanism in Turkestan (up to the 19th century, when Russian army invaded the area). The reddish siltstone rock that caps many ridges and buttes in the Powder River Basin (Wyoming), and in western North Dakota is called porcelanite, which also may resemble the coal burning waste "clinker" or volcanic "scoria". Clinker is rock that has been fused by the natural burning of coal. In the Powder River Basin approximately 27 to 54 billion tonnes of coal burned within the past three million years. Wild coal fires in the area were reported by the Lewis and Clark Expedition as well as explorers and settlers in the area. ## Production trends In 2006, China was the top producer of coal with 38% share followed by the USA and India, reports the British Geological Survey. ### World coal reserves At the end of 2006 the recoverable coal reserves amounted around 800 or 900 gigatonnes. The United States Energy Information Administration gives world reserves as 998 billion short tons (equal to 905 gigatonnes), approximately half of it being hard coal. At the current production rate, this would last 164 years. At the current global total energy consumption of 15 terawatt, there is enough coal to provide the entire planet with all of its energy for 57 years. The 998 billion tons of recoverable coal reserves estimated by the Energy Information Administration are equal to about 4,417 BBOE (billion barrels of oil equivalent). The amount of coal burned during 2001 was calculated as 2.337 GTOE (gigatonnes of oil equivalent), which is about 46 million barrels of oil equivalent per day. Were consumption to continue at that rate those reserves would last about 263 years. As a comparison, natural gas provided 51 million barrels (oil equivalent), and oil 76 million barrels, per day during 2001. British Petroleum, in its annual report 2007, estimated at 2006 end, there were 909,064 million tons of proven coal reserves worldwide, or 147 years reserves to production ratio. This figure only includes reserves classified as "proven"; exploration drilling programs by mining companies, particularly in under-explored areas, are continually providing new reserves. In many cases, companies are aware of coal deposits that have not been sufficiently drilled to qualify as "proven". However, some nations haven't updated their information and assume reserves remain at the same levels even with withdrawals. Of the three fossil fuels coal has the most widely distributed reserves; coal is mined in over 100 countries, and on all continents except Antarctica. The largest reserves are found in the USA, Russia, Australia, China, India and South Africa. Note the table below. ## References • The Face of Decline: The Pennsylvania Anthracite Region in the Twentieth Century • Where the Sun Never Shines: A History of America's Bloody Coal Industry • In the Kingdom of Coal; An American Family and the Rock That Changed the World • Water: A Natural History • Mining America: The Industry and the Environment, 1800-1980 • Coal: A Human History coal in Afrikaans: Steenkool coal in Arabic: فحم حجري coal in Aymara: K'illima coal in Min Nan: Thô͘-thoàⁿ coal in Belarusian: Вугаль coal in Belarusian (Tarashkevitsa): Вугаль coal in Bosnian: Ugalj coal in Bulgarian: Каменни въглища coal in Catalan: Carbó coal in Czech: Uhlí coal in Welsh: Glo coal in Danish: Kul (bjergart) coal in Pennsylvania German: Kohl coal in German: Kohle coal in Estonian: Kivisüsi coal in Modern Greek (1453-): Γαιάνθρακας (καύσιμο) coal in Spanish: Carbón coal in Esperanto: Terkarbo coal in Basque: Ikatz coal in Persian: زغال‌سنگ coal in French: Houille (roche) coal in Galician: Carbón coal in Armenian: Ածուխ coal in Croatian: Ugljen coal in Ido: Karbono coal in Icelandic: Kol coal in Italian: Carbone (minerale) coal in Hebrew: פחם coal in Latvian: Akmeņogles coal in Lithuanian: Akmens anglis coal in Macedonian: Јаглен coal in Mongolian: Нүүрс coal in Dutch: Steenkool coal in Japanese: 石炭 coal in Norwegian: Kull coal in Norwegian Nynorsk: Kol coal in Polish: Węgle kopalne coal in Portuguese: Carvão mineral coal in Romanian: Cărbune coal in Quechua: K'illimsa coal in Russian: Уголь coal in Albanian: Qymyri coal in Slovak: Uhlie coal in Slovenian: Premog coal in Finnish: Kivihiili coal in Swedish: Kol (bränsle) coal in Tamil: நிலக்கரி coal in Thai: ถ่านหิน coal in Vietnamese: Than đá coal in Turkish: Kömür coal in Ukrainian: Кам'яне вугілля coal in Walloon: Hoye coal in Contenese: 煤 coal in Chinese: 煤炭 # Synonyms, Antonyms and Related Words alcohol, anthracite, ash, ashes, benzine, blaze, blister, brand, briquette, brown coal, bunker, burn, burn in, burn off, burnable, burning ember, butane, calx, carbon, cast, cater, cauterize, char, charcoal, cinder, clinker, coke, combustible, coom, crack, crow, cupel, detonate, dope, dross, ebon, ebony, ember, ethane, ethanol, explode, feed, fill up, fireball, firebrand, firing, flame, flammable, flammable material, forage, found, fuel, fuel additive, fuel dope, fuel up, fulminate, fume, gas, gas carbon, gas up, gasoline, heptane, hexane, inflammable, inflammable material, ink, isooctane, jet, jet fuel, kerosene, lava, lignite, live coal, methane, methanol, natural gas, night, octane, oil, oxidate, oxidize, paraffin, parch, peat, pentane, pitch, propane, propellant, provender, provision, purvey, pyrolyze, raven, reek, refuel, rocket fuel, scorch, scoria, sea coal, sear, sell, singe, slag, sloe, smoke, smudge, smut, solder, soot, stoke, sullage, swinge, tar, top off, torrefy, turf, vesicate, victual, vulcanize, weld
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.532004177570343, "perplexity": 8228.506148180297}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-13/segments/1490218189462.47/warc/CC-MAIN-20170322212949-00006-ip-10-233-31-227.ec2.internal.warc.gz"}
https://www.science.gov/topicpages/0-9/3d+ladar+imagery.html
#### Sample records for 3d ladar imagery 1. Workbench for 3D target detection and recognition from airborne motion stereo and ladar imagery Roy, Simon; Se, Stephen; Kotamraju, Vinay; Maheux, Jean; Nadeau, Christian; Larochelle, Vincent; Fournier, Jonathan 2010-04-01 3D imagery has a well-known potential for improving situational awareness and battlespace visualization by providing enhanced knowledge of uncooperative targets. This potential arises from the numerous advantages that 3D imagery has to offer over traditional 2D imagery, thereby increasing the accuracy of automatic target detection (ATD) and recognition (ATR). Despite advancements in both 3D sensing and 3D data exploitation, 3D imagery has yet to demonstrate a true operational gain, partly due to the processing burden of the massive dataloads generated by modern sensors. In this context, this paper describes the current status of a workbench designed for the study of 3D ATD/ATR. Among the project goals is the comparative assessment of algorithms and 3D sensing technologies given various scenarios. The workbench is comprised of three components: a database, a toolbox, and a simulation environment. The database stores, manages, and edits input data of various types such as point clouds, video, still imagery frames, CAD models and metadata. The toolbox features data processing modules, including range data manipulation, surface mesh generation, texture mapping, and a shape-from-motion module to extract a 3D target representation from video frames or from a sequence of still imagery. The simulation environment includes synthetic point cloud generation, 3D ATD/ATR algorithm prototyping environment and performance metrics for comparative assessment. In this paper, the workbench components are described and preliminary results are presented. Ladar, video and still imagery datasets collected during airborne trials are also detailed. 2. Expedient Gap Definition Using 3D LADAR DTIC Science & Technology 2006-09-01 Research and Development Center (ERDC), ASI has developed an algorithm to reduce the 3D point cloud acquired with the LADAR system into sets of 2D ...developed an algorithm to extract from this 3D point cloud any user-defined number of 2D slices. ASI has incorporated this sensor and algorithm into...direction, ASI has developed an algorithm to condense the 3D point cloud acquired with the LADAR system into sets of 2D profiles that describe the 3. Image Science Research for Speckle-based LADAR (Speckle Research for 3D Imaging LADAR) DTIC Science & Technology 2008-04-03 patterns to establish 3D qualitative features for a remote object, (2) study of performance of this compact ladar at photon counting light levels, and (3...establish 3D qualitative features for a remote object, (2) study of performance of this compact ladar at photon counting light levels, and (3) space and... holography using a microbolometer array," Applied Optics, 47, A7-A12 (2008). 2. Nien-an Chang and Nicholas George, "Speckle in the 4F optical system 4. Speckle Research for 3D Imaging LADAR DTIC Science & Technology 2011-03-24 computing systems. Four major research projects are (1) study of speckle patterns including metrology for small pixels on photodetector arrays. (2) Theory...radars (LADAR) as well as related basic studies of novel integrated imaging and computing systems. Four major research projects are (1) study of...the depth of field through unbalanced OPD, OSA annual meeting, Rochester NY (2008) 3. Nicholas George and Wanli Chi, Emerging integrated computational 5. Chirped amplitude modulation ladar for range and Doppler measurements and 3-D imaging Stann, Barry; Redman, Brian C.; Lawler, William; Giza, Mark; Dammann, John; Krapels, Keith 2007-04-01 6. A model and simulation to predict the performance of angle-angle-range 3D flash ladar imaging sensor systems Grasso, Robert J.; Odhner, Jefferson E.; Russo, Leonard E.; McDaniel, Robert V. 2004-11-01 BAE SYSTEMS reports on a program to develop a high-fidelity model and simulation to predict the performance of angle-angle-range 3D flash LADAR Imaging Sensor systems. 3D Flash LADAR is the latest evolution of laser radar systems and provides unique capability in its ability to provide high-resolution LADAR imagery upon a single laser pulse; rather than constructing an image from multiple pulses as with conventional scanning LADAR systems. However, accurate methods to model and simulate performance from these 3D LADAR systems have been lacking, relying upon either single pixel LADAR performance or extrapolating from passive detection FPA performance. The model and simulation developed and reported here is expressly for 3D angle-angle-range imaging LADAR systems. To represent an accurate "real world" type environment, this model and simulation accounts for: 1) laser pulse shape; 2) detector array size; 3) atmospheric transmission; 4) atmospheric backscatter; 5) atmospheric turbulence; 6) obscurants, and; 7) obscurant path length. The angle-angle-range 3D flash LADAR model and simulation accounts for all pixels in the detector array by modeling and accounting for the non-uniformity of each individual pixel in the array. Here, noise sources are modeled based upon their pixel-to-pixel statistical variation. A cumulative probability function is determined by integrating the normal distribution with respect to detector gain, and, for each pixel, a random number is compared with the cumulative probability function resulting in a different gain for each pixel within the array. In this manner very accurate performance is determined pixel-by-pixel. Model outputs are in the form of 3D images of the far-field distribution across the array as intercepted by the target, gain distribution, power distribution, average signal-to-noise, and probability of detection across the array. Other outputs include power distribution from a target, signal-to-noise vs. range, probability of 7. A model and simulation to predict the performance of angle-angle-range 3D flash LADAR imaging sensor systems Grasso, Robert J.; Odhner, Jefferson E.; Russo, Leonard E.; McDaniel, Robert V. 2005-10-01 BAE SYSTEMS reports on a program to develop a high-fidelity model and simulation to predict the performance of angle-angle-range 3D flash LADAR Imaging Sensor systems. 3D Flash LADAR is the latest evolution of laser radar systems and provides unique capability in its ability to provide high-resolution LADAR imagery upon a single laser pulse; rather than constructing an image from multiple pulses as with conventional scanning LADAR systems. However, accurate methods to model and simulate performance from these 3D LADAR systems have been lacking, relying upon either single pixel LADAR performance or extrapolating from passive detection FPA performance. The model and simulation developed and reported here is expressly for 3D angle-angle-range imaging LADAR systems. To represent an accurate "real world" type environment, this model and simulation accounts for: 1) laser pulse shape; 2) detector array size; 3) atmospheric transmission; 4) atmospheric backscatter; 5) atmospheric turbulence; 6) obscurants, and; 7) obscurant path length. The angle-angle-range 3D flash LADAR model and simulation accounts for all pixels in the detector array by modeling and accounting for the non-uniformity of each individual pixel in the array. Here, noise sources are modeled based upon their pixel-to-pixel statistical variation. A cumulative probability function is determined by integrating the normal distribution with respect to detector gain, and, for each pixel, a random number is compared with the cumulative probability function resulting in a different gain for each pixel within the array. In this manner very accurate performance is determined pixel-by-pixel. Model outputs are in the form of 3D images of the far-field distribution across the array as intercepted by the target, gain distribution, power distribution, average signal-to-noise, and probability of detection across the array. Other outputs include power distribution from a target, signal-to-noise vs. range, probability of 8. 3D-LZ helicopter ladar imaging system Savage, James; Harrington, Walter; McKinley, R. Andrew; Burns, H. N.; Braddom, Steven; Szoboszlay, Zoltan 2010-04-01 A joint-service team led by the Air Force Research Laboratory's Munitions and Sensors Directorates completed a successful flight test demonstration of the 3D-LZ Helicopter LADAR Imaging System. This was a milestone demonstration in the development of technology solutions for a problem known as "helicopter brownout", the loss of situational awareness caused by swirling sand during approach and landing. The 3D-LZ LADAR was developed by H.N. Burns Engineering and integrated with the US Army Aeroflightdynamics Directorate's Brown-Out Symbology System aircraft state symbology aboard a US Army EH-60 Black Hawk helicopter. The combination of these systems provided an integrated degraded visual environment landing solution with landing zone situational awareness as well as aircraft guidance and obstacle avoidance information. Pilots from the U.S. Army, Air Force, Navy, and Marine Corps achieved a 77% landing rate in full brownout conditions at a test range at Yuma Proving Ground, Arizona. This paper will focus on the LADAR technology used in 3D-LZ and the results of this milestone demonstration. 9. The application of iterative closest point (ICP) registration to improve 3D terrain mapping estimates using the flash 3D ladar system Woods, Jack; Armstrong, Ernest E.; Armbruster, Walter; Richmond, Richard 2010-04-01 The primary purpose of this research was to develop an effective means of creating a 3D terrain map image (point-cloud) in GPS denied regions from a sequence of co-bore sighted visible and 3D LIDAR images. Both the visible and 3D LADAR cameras were hard mounted to a vehicle. The vehicle was then driven around the streets of an abandoned village used as a training facility by the German Army and imagery was collected. The visible and 3D LADAR images were then fused and 3D registration performed using a variation of the Iterative Closest Point (ICP) algorithm. The ICP algorithm is widely used for various spatial and geometric alignment of 3D imagery producing a set of rotation and translation transformations between two 3D images. ICP rotation and translation information obtain from registering the fused visible and 3D LADAR imagery was then used to calculate the x-y plane, range and intensity (xyzi) coordinates of various structures (building, vehicles, trees etc.) along the driven path. The xyzi coordinates information was then combined to create a 3D terrain map (point-cloud). In this paper, we describe the development and application of 3D imaging techniques (most specifically the ICP algorithm) used to improve spatial, range and intensity estimates of imagery collected during urban terrain mapping using a co-bore sighted, commercially available digital video camera with focal plan of 640×480 pixels and a 3D FLASH LADAR. Various representations of the reconstructed point-clouds for the drive through data will also be presented. 10. A range/depth modulation transfer function (RMTF) framework for characterizing 3D imaging LADAR performance Staple, Bevan; Earhart, R. P.; Slaymaker, Philip A.; Drouillard, Thomas F., II; Mahony, Thomas 2005-05-01 Murray, James T.; Moran, Steven E.; Roddier, Nicolas; Vercillo, Richard; Bridges, Robert; Austin, William 2003-08-01 High-resolution three-dimensional flash ladar system technologies are under development that enables remote identification of vehicles and armament hidden by heavy tree canopies. We have developed a sensor architecture and design that employs a 3D flash ladar receiver to address this mission. The receiver captures 128×128×>30 three-dimensional images for each laser pulse fired. The voxel size of the image is 3"×3"×4" at the target location. A novel signal-processing algorithm has been developed that achieves sub-voxel (sub-inch) range precision estimates of target locations within each pixel. Polarization discrimination is implemented to augment the target-to-foliage contrast. When employed, this method improves the range resolution of the system beyond the classical limit (based on pulsewidth and detection bandwidth). Experiments were performed with a 6 ns long transmitter pulsewidth that demonstrate 1-inch range resolution of a tank-like target that is occluded by foliage and a range precision of 0.3" for unoccluded targets. 12. Small SWAP 3D imaging flash ladar for small tactical unmanned air systems Bird, Alan; Anderson, Scott A.; Wojcik, Michael; Budge, Scott E. 2015-05-01 13. A theoretical framework for 3D LADAR ATR problem definition and performance evaluation DelMarco, Stephen; Sobel, Erik; Douglas, Joel 2005-05-01 LADAR imagery provides the capability to represent high resolution detail of 3D surface geometry of complex targets. In previous work we exploited this capability for automatic target recognition (ATR) by developing matching algorithms for performing surface matching of 3D LADAR point clouds with highly-detailed target CAD models. A central challenge in evaluating ATR performance is characterizing the degree of problem difficulty. One of the most important factors is the inherent similarity of target signatures. We've developed a flexible approach to target taxonomy based on 3D shape which includes a classification framework for defining the target recognition problem and evaluating ATR algorithm performance. The target model taxonomy consists of a hierarchical, tree-structured target classification scheme in which different levels of the tree correspond to different degrees of target classification difficulty. Each node in the tree corresponds to a collection of target models forming a target category. Target categories near the tree root represent large and very general target classes, exhibiting large interclass distance. Targets in these categories are easily separated. Target categories near the tree bottom represent very specific target classes with small interclass distance. These targets are difficult to separate. In this paper we focus on creation of optimal categories. We develop approaches for optimal aggregation of target model types into categories which provide for improved classification performance. We generate numerical results using match scores derived from matching highly-detailed CAD models of civilian ground vehicles. 14. MBE based HgCdTe APDs and 3D LADAR sensors Jack, Michael; Asbrock, Jim; Bailey, Steven; Baley, Diane; Chapman, George; Crawford, Gina; Drafahl, Betsy; Herrin, Eileen; Kvaas, Robert; McKeag, William; Randall, Valerie; De Lyon, Terry; Hunter, Andy; Jensen, John; Roberts, Tom; Trotta, Patrick; Cook, T. Dean 2007-04-01 Raytheon is developing HgCdTe APD arrays and sensor chip assemblies (SCAs) for scanning and staring LADAR systems. The nonlinear characteristics of APDs operating in moderate gain mode place severe requirements on layer thickness and doping uniformity as well as defect density. MBE based HgCdTe APD arrays, engineered for high performance, meet the stringent requirements of low defects, excellent uniformity and reproducibility. In situ controls for alloy composition and substrate temperature have been implemented at HRL, LLC and Raytheon Vision Systems and enable consistent run to run results. The novel epitaxial designed using separate absorption-multiplication (SAM) architectures enables the realization of the unique advantages of HgCdTe including: tunable wavelength, low-noise, high-fill factor, low-crosstalk, and ambient operation. Focal planes built by integrating MBE detectors arrays processed in a 2 x 128 format have been integrated with 2 x 128 scanning ROIC designed. The ROIC reports both range and intensity and can detect multiple laser returns with each pixel autonomously reporting the return. FPAs show exceptionally good bias uniformity <1% at an average gain of 10. Recent breakthrough in device design has resulted in APDs operating at 300K with essentially no excess noise to gains in excess of 100, low NEP <1nW and GHz bandwidth. 3D LADAR sensors utilizing these FPAs have been integrated and demonstrated both at Raytheon Missile Systems and Naval Air Warfare Center Weapons Division at China Lake. Excellent spatial and range resolution has been achieved with 3D imagery demonstrated both at short range and long range. Ongoing development under an Air Force Sponsored MANTECH program of high performance HgCdTe MBE APDs grown on large silicon wafers promise significant FPA cost reduction both by increasing the number of arrays on a given wafer and enabling automated processing. 15. Aerial video and ladar imagery fusion for persistent urban vehicle tracking Cho, Peter; Greisokh, Daniel; Anderson, Hyrum; Sandland, Jessica; Knowlton, Robert 2007-04-01 We assess the impact of supplementing two-dimensional video with three-dimensional geometry for persistent vehicle tracking in complex urban environments. Using recent video data collected over a city with minimal terrain content, we first quantify erroneous sources of automated tracking termination and identify those which could be ameliorated by detailed height maps. They include imagery misregistration, roadway occlusion and vehicle deceleration. We next develop mathematical models to analyze the tracking value of spatial geometry knowledge in general and high resolution ladar imagery in particular. Simulation results demonstrate how 3D information could eliminate large numbers of false tracks passing through impenetrable structures. Spurious track rejection would permit Kalman filter coasting times to be significantly increased. Track lifetimes for vehicles occluded by trees and buildings as well as for cars slowing down at corners and intersections could consequently be prolonged. We find high resolution 3D imagery can ideally yield an 83% reduction in the rate of automated tracking failure. 16. Ultra-Compact, High-Resolution LADAR System for 3D Imaging NASA Technical Reports Server (NTRS) Xu, Jing; Gutierrez, Roman 2009-01-01 An eye-safe LADAR system weighs under 500 grams and has range resolution of 1 mm at 10 m. This laser uses an adjustable, tiny microelectromechanical system (MEMS) mirror that was made in SiWave to sweep laser frequency. The size of the laser device is small (70x50x13 mm). The LADAR uses all the mature fiber-optic telecommunication technologies in the system, making this innovation an efficient performer. The tiny size and light weight makes the system useful for commercial and industrial applications including surface damage inspections, range measurements, and 3D imaging. 17. Finding Organized Structures in 3-D LADAR Data DTIC Science & Technology 2004-12-01 work exists also on how to extract planar and linear objects from scattered 3-D point clouds , see for example [5], [6]. Methods were even proposed to...of structure detection and segmentation from 3-D point clouds collected from a single sensor location or integrated from multiple locations. In [2...primitives to point clouds are difficult to use practically for large data sets containing multiple complex structures, in opposition to multiple planar 18. 3D organization of 2D urban imagery Cho, Peter 2008-04-01 Working with New York data as a representative and instructive example, we fuse aerial ladar imagery with satellite pictures and Geographic Information System (GIS) layers to form a comprehensive 3D urban map. Digital photographs are then mathematically inserted into this detailed world space. Reconstruction of the photos' view frusta yields their cameras' locations and pointing directions which may have been a priori unknown. It also enables knowledge to be projected from the urban map onto georegistered image planes. For instance, absolute geolocations can be assigned to individual pixels, and GIS annotations can be transferred from 3D to 2D. Moreover, such information propagates among all images whose view frusta intercept the same urban map location. We demonstrate how many imagery exploitation challenges (e.g. identify objects in cluttered scenes, select all photos containing some stationary ground target, etc) become mathematically tractable once a 3D framework for analyzing 2D images is adopted. Finally, we close by briefly discussing future applications of this work to photo-based querying of urban knowledge databases. 19. Segmentation, classification, and pose estimation of maritime targets in flash-ladar imagery Armbruster, Walter; Hammer, Marcus 2012-09-01 The paper presents new techniques for automatic segmentation, classification, and generic pose estimation of ships and boats in laser radar imagery. Segmentation, which primarily involves elimination of water reflections, is based on modeling surface waves and comparing the expected water reflection signature to the ladar intensity image. Shape classification matches a parametric shape representation of a generic ship hull with parameters extracted from the range image. The extracted parameter vector defines an instance of a geometric 3D model which can be registered with the range image for precision pose estimation. Results show that reliable automatic acquisition, aim point selection and realtime tracking of maritime targets is feasible even for erratic sensor and target motions, temporary occlusions, and evasive target maneuvers. 20. Use of laser radar imagery in optical pattern recognition: the Optical Processor Enhanced Ladar (OPEL) Program Goldstein, Dennis H.; Mills, Stuart A.; Dydyk, Robert B. 1998-03-01 The Optical Processor Enhanced Ladar (OPEL) program is designed to evaluate the capabilities of a seeker obtained by integrating two state-of-the-art technologies, laser radar, or ladar, and optical correlation. The program is a thirty-two month effort to build, optimize, and test a breadboard seeker system (the OPEL System) that incorporates these two promising technologies. Laser radars produce both range and intensity image information. Use of this information in an optical correlator is described. A correlator with binary phase input and ternary amplitude and phase filter capability is assumed. Laser radar imagery was collected on five targets over 360 degrees of azimuth from 3 elevation angles. This imagery was then processed to provide training sets in preparation for filter construction. This paper reviews the ladar and optical correlator technologies used, outlines the OPEL program, and describes the OPEL system. 1. Geiger-mode APD camera system for single-photon 3D LADAR imaging Entwistle, Mark; Itzler, Mark A.; Chen, Jim; Owens, Mark; Patel, Ketan; Jiang, Xudong; Slomkowski, Krystyna; Rangwala, Sabbir 2012-06-01 The unparalleled sensitivity of 3D LADAR imaging sensors based on single photon detection provides substantial benefits for imaging at long stand-off distances and minimizing laser pulse energy requirements. To obtain 3D LADAR images with single photon sensitivity, we have demonstrated focal plane arrays (FPAs) based on InGaAsP Geiger-mode avalanche photodiodes (GmAPDs) optimized for use at either 1.06 μm or 1.55 μm. These state-of-the-art FPAs exhibit excellent pixel-level performance and the capability for 100% pixel yield on a 32 x 32 format. To realize the full potential of these FPAs, we have recently developed an integrated camera system providing turnkey operation based on FPGA control. This system implementation enables the extremely high frame-rate capability of the GmAPD FPA, and frame rates in excess of 250 kHz (for 0.4 μs range gates) can be accommodated using an industry-standard CameraLink interface in full configuration. Real-time data streaming for continuous acquisition of 2 μs range gate point cloud data with 13-bit time-stamp resolution at 186 kHz frame rates has been established using multiple solid-state storage drives. Range gate durations spanning 4 ns to 10 μs provide broad operational flexibility. The camera also provides real-time signal processing in the form of multi-frame gray-scale contrast images and single-frame time-stamp histograms, and automated bias control has been implemented to maintain a constant photon detection efficiency in the presence of ambient temperature changes. A comprehensive graphical user interface has been developed to provide complete camera control using a simple serial command set, and this command set supports highly flexible end-user customization. 2. Processing 3D flash LADAR point-clouds in real-time for flight applications Craig, R.; Gravseth, I.; Earhart, R. P.; Bladt, J.; Barnhill, S.; Ruppert, L.; Centamore, C. 2007-04-01 Ball Aerospace & Technologies Corp. has demonstrated real-time processing of 3D imaging LADAR point-cloud data to produce the industry's first time-of-flight (TOF) 3D video capability. This capability is uniquely suited to the rigorous demands of space and airborne flight applications and holds great promise in the area of autonomous navigation. It will provide long-range, three dimensional video information to autonomous flight software or pilots for immediate use in rendezvous and docking, proximity operations, landing, surface vision systems, and automatic target recognition and tracking. This is enabled by our new generation of FPGA based "pixel-tube" processors, coprocessors and their associated algorithms which have led to a number of advancements in high-speed wavefront processing along with additional advances in dynamic camera control, and space laser designs based on Ball's CALIPSO LIDAR. This evolution in LADAR is made possible by moving the mechanical complexity required for a scanning system into the electronics, where production, integration, testing and life-cycle costs can be significantly reduced. This technique requires a state of the art TOF read-out integrated circuit (ROIC) attached to a sensor array to collect high resolution temporal data, which is then processed through FPGAs. The number of calculations required to process the data is greatly reduced thanks to the fact that all points are captured at the same time and thus correlated. This correlation allows extremely efficient FPGA processing. This capability has been demonstrated in prototype form at both Marshal Space Flight Center and Langley Research Center on targets that represent docking and landing scenarios. This report outlines many aspects of this work as well as aspects of our recent testing at Marshall's Flight Robotics Laboratory. 3. A model and simulation to predict 3D imaging LADAR sensor systems performance in real-world type environments Grasso, Robert J.; Dippel, George F.; Russo, Leonard E. 2006-08-01 BAE SYSTEMS reports on a program to develop a high-fidelity model and simulation to predict the performance of angle-angle-range 3D flash LADAR Imaging Sensor systems. Accurate methods to model and simulate performance from 3D LADAR systems have been lacking, relying upon either single pixel LADAR performance or extrapolating from passive detection FPA performance. The model and simulation here is developed expressly for 3D angle-angle-range imaging LADAR systems. To represent an accurate "real world" type environment this model and simulation accounts for: 1) laser pulse shape; 2) detector array size; 3) detector noise figure; 4) detector gain; 5) target attributes; 6) atmospheric transmission; 7) atmospheric backscatter; 8) atmospheric turbulence; 9) obscurants; 10) obscurant path length, and; 11) platform motion. The angle-angle-range 3D flash LADAR model and simulation accounts for all pixels in the detector array by modeling and accounting for the non-uniformity of each individual pixel. Here, noise sources and gain are modeled based upon their pixel-to-pixel statistical variation. A cumulative probability function is determined by integrating the normal distribution with respect to detector gain, and, for each pixel, a random number is compared with the cumulative probability function resulting in a different gain for each pixel within the array. In this manner very accurate performance is determined pixel-by-pixel for the entire array. Model outputs are 3D images of the far-field distribution across the array as intercepted by the target, gain distribution, power distribution, average signal-to-noise, and probability of detection across the array. 4. The Development of a 3D LADAR Simulator Based on a Fast Target Impulse Response Generation Approach 2017-09-01 A new laser detection and ranging (LADAR) simulator has been developed, using MATLAB and its graphical user interface, to simulate direct detection time of flight LADAR systems, and to produce 3D simulated scanning images under a wide variety of conditions. This simulator models each stage from the laser source to data generation and can be considered as an efficient simulation tool to use when developing LADAR systems and their data processing algorithms. The novel approach proposed for this simulator is to generate the actual target impulse response. This approach is fast and able to deal with high scanning requirements without losing the fidelity that accompanies increments in speed. This leads to a more efficient LADAR simulator and opens up the possibility for simulating LADAR beam propagation more accurately by using a large number of laser footprint samples. The approach is to select only the parts of the target that lie in the laser beam angular field by mathematically deriving the required equations and calculating the target angular ranges. The performance of the new simulator has been evaluated under different scanning conditions, the results showing significant increments in processing speeds in comparison to conventional approaches, which are also used in this study as a point of comparison for the results. The results also show the simulator's ability to simulate phenomena related to the scanning process, for example, type of noise, scanning resolution and laser beam width. 5. Fusion of current technologies with real-time 3D MEMS ladar for novel security and defense applications Siepmann, James P. 2006-05-01 Through the utilization of scanning MEMS mirrors in ladar devices, a whole new range of potential military, Homeland Security, law enforcement, and civilian applications is now possible. Currently, ladar devices are typically large (>15,000 cc), heavy (>15 kg), and expensive (>100,000) while current MEMS ladar designs are more than a magnitude less, opening up a myriad of potential new applications. One such application with current technology is a GPS integrated MEMS ladar unit, which could be used for real-time border monitoring or the creation of virtual 3D battlefields after being dropped or propelled into hostile territory. Another current technology that can be integrated into a MEMS ladar unit is digital video that can give high resolution and true color to a picture that is then enhanced with range information in a real-time display format that is easier for the user to understand and assimilate than typical gray-scale or false color images. The problem with using 2-axis MEMS mirrors in ladar devices is that in order to have a resonance frequency capable of practical real-time scanning, they must either be quite small and/or have a low maximum tilt angle. Typically, this value has been less than (< or = to 10 mg-mm2-kHz2)-degrees. We have been able to solve this problem by using angle amplification techniques that utilize a series of MEMS mirrors and/or a specialized set of optics to achieve a broad field of view. These techniques and some of their novel applications mentioned will be explained and discussed herein. 6. Maritime target identification in flash-ladar imagery NASA Astrophysics Data System (ADS) Armbruster, Walter; Hammer, Marcus 2012-05-01 The paper presents new techniques and processing results for automatic segmentation, shape classification, generic pose estimation, and model-based identification of naval vessels in laser radar imagery. The special characteristics of focal plane array laser radar systems such as multiple reflections and intensity-dependent range measurements are incorporated into the algorithms. The proposed 3D model matching technique is probabilistic, based on the range error distribution, correspondence errors, the detection probability of potentially visible model points and false alarm errors. The match algorithm is robust against incomplete and inaccurate models, each model having been generated semi-automatically from a single range image. A classification accuracy of about 96% was attained, using a maritime database with over 8000 flash laser radar images of 146 ships at various ranges and orientations together with a model library of 46 vessels. Applications include military maritime reconnaissance, coastal surveillance, harbor security and anti-piracy operations. 7. Human and tree classification based on a model using 3D ladar in a GPS-denied environment NASA Astrophysics Data System (ADS) Cho, Kuk; Baeg, Seung-Ho; Park, Sangdeok 2013-05-01 This study explained a method to classify humans and trees by extraction their geometric and statistical features in data obtained from 3D LADAR. In a wooded GPS-denied environment, it is difficult to identify the location of unmanned ground vehicles and it is also difficult to properly recognize the environment in which these vehicles move. In this study, using the point cloud data obtained via 3D LADAR, a method to extract the features of humans, trees, and other objects within an environment was implemented and verified through the processes of segmentation, feature extraction, and classification. First, for the segmentation, the radially bounded nearest neighbor method was applied. Second, for the feature extraction, each segmented object was divided into three parts, and then their geometrical and statistical features were extracted. A human was divided into three parts: the head, trunk and legs. A tree was also divided into three parts: the top, middle, and bottom. The geometric features were the variance of the x-y data for the center of each part in an object, using the distance between the two central points for each part, using K-mean clustering. The statistical features were the variance of each of the parts. In this study, three, six and six features of data were extracted, respectively, resulting in a total of 15 features. Finally, after training the extracted data via an artificial network, new data were classified. This study showed the results of an experiment that applied an algorithm proposed with a vehicle equipped with 3D LADAR in a thickly forested area, which is a GPS-denied environment. A total of 5,158 segments were obtained and the classification rates for human and trees were 82.9% and 87.4%, respectively. 8. Dynamic visualization of three-dimensional images from multiple texel images created from fused ladar/digital imagery NASA Astrophysics Data System (ADS) Killpack, Cody C.; Budge, Scott E. 2017-03-01 The ability to create three-dimensional (3-D) image models, using registered texel images (fused ladar and digital imagery), is an important topic in remote sensing. These models are automatically generated by matching multiple texel images into a single common reference frame. However, rendering a sequence of independently registered texel images often provides challenges. Although accurately registered, the model textures are often incorrectly overlapped and interwoven when using standard rendering techniques. Consequently, corrections must be done after all the primitives have been rendered by determining the best texture for any viewable fragment in the model. This paper describes a technique to visualize a 3-D model image created from a set of registered texel images. The visualization is determined for each viewpoint. It is, therefore, necessary to determine which textures are overlapping and how to best combine them dynamically during the rendering process. The best texture for a particular pixel can be defined using 3-D geometric criteria, in conjunction with a real-time, view-dependent ranking algorithm. As a result, overlapping texture fragments can now be hidden, exposed, or blended according to their computed measure of reliability. The advantages of this technique are illustrated using artificial and real data examples. 9. 3-D Scene Reconstruction from Aerial Imagery DTIC Science & Technology 2012-03-01 or CloudCompare to view results [8, 4]. D.2 PMVS2/CMVS This effort used a version of PMVS2/CMVS specifically modified for the Microsoft Window’s...Calibration Homepage. Available at http : //www.vision.caltech.edu/bouguetj/calib doc/. [4] “ CloudCompare , 3D point cloudl and mesh processing 10. Ultra-High sensitivity APD based 3D LADAR sensors: linear mode photon counting LADAR camera for the Ultra-Sensitive Detector program NASA Astrophysics Data System (ADS) Asbrock, J.; Bailey, S.; Baley, D.; Boisvert, J.; Chapman, G.; Crawford, G.; de Lyon, T.; Drafahl, B.; Edwards, J.; Herrin, E.; Hoyt, C.; Jack, M.; Kvaas, R.; Liu, K.; McKeag, W.; Rajavel, R.; Randall, V.; Rengarajan, S.; Riker, J. 2008-04-01 Advanced LADAR receivers enable high accuracy identification of targets at ranges beyond standard EOIR sensors. Increased sensitivity of these receivers will enable reductions in laser power, hence more affordable, smaller sensors as well as much longer range of detection. Raytheon has made a recent breakthrough in LADAR architecture by combining very low noise ~ 30 electron front end amplifiers with moderate gain >60 Avalanche Photodiodes. The combination of these enables detection of laser pulse returns containing as few as one photon up to 1000s of photons. Because a lower APD gain is utilized the sensor operation differs dramatically from traditional "Geiger mode APD" LADARs. Linear mode photon counting LADAR offers advantages including: determination of intensity as well as time of arrival, nanosecond recovery times and discrimination between radiation events and signals. In our talk we will present an update of this development work: the basic amplifier and APD component performance, the front end architecture, the demonstration of single photon detection using a simple 4 × 4 SCA and the design and evaluation of critical components of a fully integrated photon counting camera under development in support of the Ultra-Sensitive Detector (USD) program sponsored by AFRL-Kirtland. 11. Automatic Reconstruction of Spacecraft 3D Shape from Imagery NASA Astrophysics Data System (ADS) Poelman, C.; Radtke, R.; Voorhees, H. We describe a system that computes the three-dimensional (3D) shape of a spacecraft from a sequence of uncalibrated, two-dimensional images. While the mathematics of multi-view geometry is well understood, building a system that accurately recovers 3D shape from real imagery remains an art. A novel aspect of our approach is the combination of algorithms from computer vision, photogrammetry, and computer graphics. We demonstrate our system by computing spacecraft models from imagery taken by the Air Force Research Laboratory's XSS-10 satellite and DARPA's Orbital Express satellite. Using feature tie points (each identified in two or more images), we compute the relative motion of each frame and the 3D location of each feature using iterative linear factorization followed by non-linear bundle adjustment. The "point cloud" that results from this traditional shape-from-motion approach is typically too sparse to generate a detailed 3D model. Therefore, we use the computed motion solution as input to a volumetric silhouette-carving algorithm, which constructs a solid 3D model based on viewpoint consistency with the image frames. The resulting voxel model is then converted to a facet-based surface representation and is texture-mapped, yielding realistic images from arbitrary viewpoints. We also illustrate other applications of the algorithm, including 3D mensuration and stereoscopic 3D movie generation. 12. Improvements in the Visualization of Stereoscopic 3D Imagery NASA Astrophysics Data System (ADS) Gurrieri, Luis E. 2015-09-01 A pleasant visualization of stereoscopic imagery must take into account factors that may produce eye strain and fatigue. Fortunately, our binocular vision system has embedded mechanisms to perceive depth for extended periods of time without producing eye fatigue; however, stereoscopic imagery may still induce visual discomfort in certain displaying scenarios. An important source of eye fatigue originates in the conflict between vergence eye movement and focusing mechanisms. Today's eye-tracking technology makes possible to know the viewers' gaze direction; hence, 3D imagery can be dynamically corrected based on this information. In this paper, I introduce a method to improve the visualization of stereoscopic imagery on planar displays based on emulating vergence and accommodation mechanisms of binocular human vision. Unlike other methods to improve the visual comfort that introduce depth distortions, in the stereoscopic visual media, this technique aims to produce a gentler and more natural binocular viewing experience without distorting the original depth of the scene. 13. 0.18 μm CMOS fully differential CTIA for a 32x16 ROIC for 3D ladar imaging systems NASA Astrophysics Data System (ADS) Helou, Jirar N.; Garcia, Jorge; Sarmiento, Mayra; Kiamilev, Fouad; Lawler, William 2006-08-01 We describe a 2-D fully differential Readout Integrated Circuit (ROIC) designed to convert the photocurrents from an array of differential metal-semiconductor-metal (MSM) detectors into voltage signals suitable for digitization and post processing. The 2-D MSM array and CMOS ROIC are designed to function as a front-end module for an amplitude modulated/continuous time AM/CW 3-D Ladar imager under development at the Army Research Laboratory. One important aspect of our ROIC design is scalability. Within reasonable power consumption and photodetector size constraints, the ROIC architecture presented here scales up linearly without compromising complexity. The other key feature of our ROIC design is the mitigation of local oscillator coupling. In our ladar imaging application, the signal demodulation process that takes place in the MSM detectors introduces parasitic radio frequency (rf) currents that can be 4 to 5 orders of magnitude greater than the signal of interest. We present a fully-differential photodetector architecture and a circuit level solution to reduce the parasitic effect. As a proof of principle we have fabricated a 0.18 μm CMOS 32x16 fully differential ROIC with an array of 32 correlated double sampling (cds) capacitive transimpedance amplifiers (CTIAs), and a custom printed circuit board equipped to verify the test chip functionality. In this paper we discuss the fully differential IC design architecture and implementation and present the future testing strategy. 14. The effects of different shape-based metrics on the identification of military targets from 3D ladar data NASA Astrophysics Data System (ADS) Meyer, Gregory J.; Weber, James R. 2006-02-01 The choice of shape metrics is important to effectively identify three-dimensional targets. The performance (expressed as a probability of correct classification) of four metrics using point clouds of military targets rendered using Irma, a government tool that simulates the output of an active ladar system, is compared across multiple ranges, sampling densities, target types, and noise levels. After understanding the range of operating conditions a classifier would be expected to see in the field, a process for determining the upper-bound of a classifier and the significance of this result is assessed. Finally, the effect of sampling density and variance in the position estimates on classification performance is shown. Classification performance significantly decreases when sampling density exceeds 10 degrees and the voxelized histogram metric outperforms the other three metrics used in this paper because of its performance in high-noise environments. Most importantly, this paper highlights a step-by-step method to test and evaluate shape metrics using accurate target models. 15. Advances in HgCdTe APDs and LADAR Receivers NASA Technical Reports Server (NTRS) Bailey, Steven; McKeag, William; Wang, Jinxue; Jack, Michael; Amzajerdian, Farzin 2010-01-01 Raytheon is developing NIR sensor chip assemblies (SCAs) for scanning and staring 3D LADAR systems. High sensitivity is obtained by integrating high performance detectors with gain i.e. APDs with very low noise Readout Integrated Circuits. Unique aspects of these designs include: independent acquisition (non-gated) of pulse returns, multiple pulse returns with both time and intensity reported to enable full 3D reconstruction of the image. Recent breakthrough in device design has resulted in HgCdTe APDs operating at 300K with essentially no excess noise to gains in excess of 100, low NEP <1nW and GHz bandwidths and have demonstrated linear mode photon counting. SCAs utilizing these high performance APDs have been integrated and demonstrated excellent spatial and range resolution enabling detailed 3D imagery both at short range and long ranges. In this presentation we will review progress in high resolution scanning, staring and ultra-high sensitivity photon counting LADAR sensors. 16. Depth-fused 3D imagery on an immaterial display. PubMed Lee, Cha; Diverdi, Stephen; Höllerer, Tobias 2009-01-01 We present an immaterial display that uses a generalized form of depth-fused 3D (DFD) rendering to create unencumbered 3D visuals. To accomplish this result, we demonstrate a DFD display simulator that extends the established depth-fused 3D principle by using screens in arbitrary configurations and from arbitrary viewpoints. The feasibility of the generalized DFD effect is established with a user study using the simulator. Based on these results, we developed a prototype display using one or two immaterial screens to create an unencumbered 3D visual that users can penetrate, examining the potential for direct walk-through and reach-through manipulation of the 3D scene. We evaluate the prototype system in formative and summative user studies and report the tolerance thresholds discovered for both tracking and projector errors. 17. High Accuracy 3D Processing of Satellite Imagery NASA Technical Reports Server (NTRS) Gruen, A.; Zhang, L.; Kocaman, S. 2007-01-01 Automatic DSM/DTM generation reproduces not only general features, but also detailed features of the terrain relief. Height accuracy of around 1 pixel in cooperative terrain. RMSE values of 1.3-1.5 m (1.0-2.0 pixels) for IKONOS and RMSE values of 2.9-4.6 m (0.5-1.0 pixels) for SPOT5 HRS. For 3D city modeling, the manual and semi-automatic feature extraction capability of SAT-PP provides a good basis. The tools of SAT-PP allowed the stereo-measurements of points on the roofs in order to generate a 3D city model with CCM The results show that building models with main roof structures can be successfully extracted by HRSI. As expected, with Quickbird more details are visible. 18. Imaging through obscurants with a heterodyne detection-based ladar system NASA Astrophysics Data System (ADS) Reibel, Randy R.; Roos, Peter A.; Kaylor, Brant M.; Berg, Trenton J.; Curry, James R. 2014-06-01 Bridger Photonics has been researching and developing a ladar system based on heterodyne detection for imaging through brownout and other DVEs. There are several advantages that an FMCW ladar system provides compared to direct detect pulsed time-of-flight systems including: 1) Higher average powers, 2) Single photon sensitive while remaining tolerant to strong return signals, 3) Doppler sensitivity for clutter removal, and 4) More flexible system for sensing during various stages of flight. In this paper, we provide a review of our sensor, discuss lessons learned during various DVE tests, and show our latest 3D imagery. 19. Impact of Building Heights on 3d Urban Density Estimation from Spaceborne Stereo Imagery NASA Astrophysics Data System (ADS) Peng, Feifei; Gong, Jianya; Wang, Le; Wu, Huayi; Yang, Jiansi 2016-06-01 In urban planning and design applications, visualization of built up areas in three dimensions (3D) is critical for understanding building density, but the accurate building heights required for 3D density calculation are not always available. To solve this problem, spaceborne stereo imagery is often used to estimate building heights; however estimated building heights might include errors. These errors vary between local areas within a study area and related to the heights of the building themselves, distorting 3D density estimation. The impact of building height accuracy on 3D density estimation must be determined across and within a study area. In our research, accurate planar information from city authorities is used during 3D density estimation as reference data, to avoid the errors inherent to planar information extracted from remotely sensed imagery. Our experimental results show that underestimation of building heights is correlated to underestimation of the Floor Area Ratio (FAR). In local areas, experimental results show that land use blocks with low FAR values often have small errors due to small building height errors for low buildings in the blocks; and blocks with high FAR values often have large errors due to large building height errors for high buildings in the blocks. Our study reveals that the accuracy of 3D density estimated from spaceborne stereo imagery is correlated to heights of buildings in a scene; therefore building heights must be considered when spaceborne stereo imagery is used to estimate 3D density to improve precision. 20. Coronary vessel trees from 3D imagery: A topological approach PubMed Central Szymczak, Andrzej; Stillman, Arthur; Tannenbaum, Allen; Mischaikow, Konstantin 2013-01-01 We propose a simple method for reconstructing vascular trees from 3D images. Our algorithm extracts persistent maxima of the intensity on all axis-aligned 2D slices of the input image. The maxima concentrate along 1D intensity ridges, in particular along blood vessels. We build a forest connecting the persistent maxima with short edges. The forest tends to approximate the blood vessels present in the image, but also contains numerous spurious features and often fails to connect segments belonging to one vessel in low contrast areas. We improve the forest by applying simple geometric filters that trim short branches, fill gaps in blood vessels and remove spurious branches from the vascular tree to be extracted. Experiments show that our technique can be applied to extract coronary trees from heart CT scans. PMID:16798058 1. Automatic building detection and 3D shape recovery from single monocular electro-optic imagery NASA Astrophysics Data System (ADS) Lavigne, Daniel A.; Saeedi, Parvaneh; Dlugan, Andrew; Goldstein, Norman; Zwick, Harold 2007-04-01 The extraction of 3D building geometric information from high-resolution electro-optical imagery is becoming a key element in numerous geospatial applications. Indeed, producing 3D urban models is a requirement for a variety of applications such as spatial analysis of urban design, military simulation, and site monitoring of a particular geographic location. However, almost all operational approaches developed over the years for 3D building reconstruction are semiautomated ones, where a skilled human operator is involved in the 3D geometry modeling of building instances, which results in a time-consuming process. Furthermore, such approaches usually require stereo image pairs, image sequences, or laser scanning of a specific geographic location to extract the 3D models from the imagery. Finally, with current techniques, the 3D geometric modeling phase may be characterized by the extraction of 3D building models with a low accuracy level. This paper describes the Automatic Building Detection (ABD) system and embedded algorithms currently under development. The ABD system provides a framework for the automatic detection of buildings and the recovery of 3D geometric models from single monocular electro-optic imagery. The system is designed in order to cope with multi-sensor imaging of arbitrary viewpoint variations, clutter, and occlusion. Preliminary results on monocular airborne and spaceborne images are provided. Accuracy assessment of detected buildings and extracted 3D building models from single airborne and spaceborne monocular imagery of real scenes are also addressed. Embedded algorithms are evaluated for their robustness to deal with relatively dense and complicated urban environments. 2. The Maintenance Of 3-D Scene Databases Using The Analytical Imagery Matching System (Aims) NASA Astrophysics Data System (ADS) Hovey, Stanford T. 1987-06-01 The increased demand for multi-resolution displays of simulated scene data for aircraft training or mission planning has led to a need for digital databases of 3-dimensional topography and geographically positioned objects. This data needs to be at varying resolutions or levels of detail as well as be positionally accurate to satisfy close-up and long distance scene views. The generation and maintenance processes for this type of digital database requires that relative and absolute spatial positions of geographic and cultural features be carefully controlled in order for the scenes to be representative and useful for simulation applications. Autometric, Incorporated has designed a modular Analytical Image Matching System (AIMS) which allows digital 3-D terrain feature data to be derived from cartographic and imagery sources by a combination of automatic and man-machine techniques. This system provides a means for superimposing the scenes of feature information in 3-D over imagery for updating. It also allows for real-time operator interaction between a monoscopic digital imagery display, a digital map display, a stereoscopic digital imagery display and automatically detected feature changes for transferring 3-D data from one coordinate system's frame of reference to another for updating the scene simulation database. It is an advanced, state-of-the-art means for implementing a modular, 3-D scene database maintenance capability, where original digital or converted-to-digital analog source imagery is used as a basic input to perform accurate updating. 3. Improved TDEM formation using fused ladar/digital imagery from a low-cost small UAV NASA Astrophysics Data System (ADS) Khatiwada, Bikalpa; Budge, Scott E. 2017-05-01 Formation of a Textured Digital Elevation Model (TDEM) has been useful in many applications in the fields of agriculture, disaster response, terrain analysis and more. Use of a low-cost small UAV system with a texel camera (fused lidar/digital imagery) can significantly reduce the cost compared to conventional aircraft-based methods. This paper reports continued work on this problem reported in a previous paper by Bybee and Budge, and reports improvements in performance. A UAV fitted with a texel camera is flown at a fixed height above the terrain and swaths of texel image data of the terrain below is taken continuously. Each texel swath has one or more lines of lidar data surrounded by a narrow strip of EO data. Texel swaths are taken such that there is some overlap from one swath to its adjacent swath. The GPS/IMU fitted on the camera also give coarse knowledge of attitude and position. Using this coarse knowledge and the information from the texel image, the error in the camera position and attitude is reduced which helps in producing an accurate TDEM. This paper reports improvements in the original work by using multiple lines of lidar data per swath. The final results are shown and analyzed for numerical accuracy. 4. The Effect of Underwater Imagery Radiometry on 3d Reconstruction and Orthoimagery NASA Astrophysics Data System (ADS) Agrafiotis, P.; Drakonakis, G. I.; Georgopoulos, A.; Skarlatos, D. 2017-02-01 The work presented in this paper investigates the effect of the radiometry of the underwater imagery on automating the 3D reconstruction and the produced orthoimagery. Main aim is to investigate whether pre-processing of the underwater imagery improves the 3D reconstruction using automated SfM - MVS software or not. Since the processing of images either separately or in batch is a time-consuming procedure, it is critical to determine the necessity of implementing colour correction and enhancement before the SfM - MVS procedure or directly to the final orthoimage when the orthoimagery is the deliverable. Two different test sites were used to capture imagery ensuring different environmental conditions, depth and complexity. Three different image correction methods are applied: A very simple automated method using Adobe Photoshop, a developed colour correction algorithm using the CLAHE (Zuiderveld, 1994) method and an implementation of the algorithm described in Bianco et al., (2015). The produced point clouds using the initial and the corrected imagery are then being compared and evaluated. 5. On Fundamental Evaluation Using Uav Imagery and 3d Modeling Software NASA Astrophysics Data System (ADS) Nakano, K.; Suzuki, H.; Tamino, T.; Chikatsu, H. 2016-06-01 Unmanned aerial vehicles (UAVs), which have been widely used in recent years, can acquire high-resolution images with resolutions in millimeters; such images cannot be acquired with manned aircrafts. Moreover, it has become possible to obtain a surface reconstruction of a realistic 3D model using high-overlap images and 3D modeling software such as Context capture, Pix4Dmapper, Photoscan based on computer vision technology such as structure from motion and multi-view stereo. 3D modeling software has many applications. However, most of them seem to not have obtained appropriate accuracy control in accordance with the knowledge of photogrammetry and/or computer vision. Therefore, we performed flight tests in a test field using an UAV equipped with a gimbal stabilizer and consumer grade digital camera. Our UAV is a hexacopter and can fly according to the waypoints for autonomous flight and can record flight logs. We acquired images from different altitudes such as 10 m, 20 m, and 30 m. We obtained 3D reconstruction results of orthoimages, point clouds, and textured TIN models for accuracy evaluation in some cases with different image scale conditions using 3D modeling software. Moreover, the accuracy aspect was evaluated for different units of input image—course unit and flight unit. This paper describes the fundamental accuracy evaluation for 3D modeling using UAV imagery and 3D modeling software from the viewpoint of close-range photogrammetry. 6. True 3D High Resolution imagery of a Buried Shipwreck: the Invincible (1758) NASA Astrophysics Data System (ADS) Dix, J. K.; Bull, J. M.; Henstock, T.; Gutowski, M.; Hogarth, P.; Leighton, T. G.; White, P. R. 2005-12-01 This paper will present the first true 3D high resolution acoustic imagery of a wreck site buried in the marine environment. Using a 3D Chirp system developed at the University of Southampton, a marine seismic survey of the mid-eighteenth century wreck site has been undertaken. The Invincible was a 74 gun warship built by the French in 1744, captured by the British in 1747 and subsequently lost off Portsmouth, UK in February 1758. The wreck was re-discovered by divers in 1979, partially buried on the margins of a mobile sandbank in approximately 8 metres of water. In 2004 the system was surveyed using a 60 channel, rigid framed 3D Chirp (1.5-13 kHz source sweep) system with integral RTK GPS and attitude systems. An area of 160 m x 160 m, centered over the wreck site, was surveyed with a total of 150 Gb data being acquired. The data was processed, using 3D Promax, to produce 25 cm bins with typical 3-6 fold coverage. The stacked traces have been visualized and interpreted using Kingdom Suite software. The final imagery shows at unprecedented resolution the full three-dimensional buried form of the wreck and it's relationship to the surrounding sedimentary sequences, enabling the full evolution of the site to be discussed. Further, the data is compared to previously acquired swath bathymetry and 2D seismic data in order to illustrate the impact of such a device for underwater cultural heritage management. 7. Influence of Gsd for 3d City Modeling and Visualization from Aerial Imagery NASA Astrophysics Data System (ADS) Alrajhi, Muhamad; Alam, Zafare; Afroz Khan, Mohammad; Alobeid, Abdalla 2016-06-01 Ministry of Municipal and Rural Affairs (MOMRA), aims to establish solid infrastructure required for 3D city modelling, for decision making to set a mark in urban development. MOMRA is responsible for the large scale mapping 1:1,000; 1:2,500; 1:10,000 and 1:20,000 scales for 10cm, 20cm and 40 GSD with Aerial Triangulation data. As 3D city models are increasingly used for the presentation exploration, and evaluation of urban and architectural designs. Visualization capabilities and animations support of upcoming 3D geo-information technologies empower architects, urban planners, and authorities to visualize and analyze urban and architectural designs in the context of the existing situation. To make use of this possibility, first of all 3D city model has to be created for which MOMRA uses the Aerial Triangulation data and aerial imagery. The main concise for 3D city modelling in the Kingdom of Saudi Arabia exists due to uneven surface and undulations. Thus real time 3D visualization and interactive exploration support planning processes by providing multiple stakeholders such as decision maker, architects, urban planners, authorities, citizens or investors with a three - dimensional model. Apart from advanced visualization, these 3D city models can be helpful for dealing with natural hazards and provide various possibilities to deal with exotic conditions by better and advanced viewing technological infrastructure. Riyadh on one side is 5700m above sea level and on the other hand Abha city is 2300m, this uneven terrain represents a drastic change of surface in the Kingdom, for which 3D city models provide valuable solutions with all possible opportunities. In this research paper: influence of different GSD (Ground Sample Distance) aerial imagery with Aerial Triangulation is used for 3D visualization in different region of the Kingdom, to check which scale is more sophisticated for obtaining better results and is cost manageable, with GSD (7.5cm, 10cm, 20cm and 40cm 8. Automatic orientation and 3D modelling from markerless rock art imagery NASA Astrophysics Data System (ADS) Lerma, J. L.; Navarro, S.; Cabrelles, M.; Seguí, A. E.; Hernández, D. 2013-02-01 This paper investigates the use of two detectors and descriptors on image pyramids for automatic image orientation and generation of 3D models. The detectors and descriptors replace manual measurements and are used to detect, extract and match features across multiple imagery. The Scale-Invariant Feature Transform (SIFT) and the Speeded Up Robust Features (SURF) will be assessed based on speed, number of features, matched features, and precision in image and object space depending on the adopted hierarchical matching scheme. The influence of applying in addition Area Based Matching (ABM) with normalised cross-correlation (NCC) and least squares matching (LSM) is also investigated. The pipeline makes use of photogrammetric and computer vision algorithms aiming minimum interaction and maximum accuracy from a calibrated camera. Both the exterior orientation parameters and the 3D coordinates in object space are sequentially estimated combining relative orientation, single space resection and bundle adjustment. The fully automatic image-based pipeline presented herein to automate the image orientation step of a sequence of terrestrial markerless imagery is compared with manual bundle block adjustment and terrestrial laser scanning (TLS) which serves as ground truth. The benefits of applying ABM after FBM will be assessed both in image and object space for the 3D modelling of a complex rock art shelter. 9. A study of the effects of degraded imagery on tactical 3D model generation using structure-from-motion NASA Astrophysics Data System (ADS) Bolick, Leslie; Harguess, Josh 2016-05-01 An emerging technology in the realm of airborne intelligence, surveillance, and reconnaissance (ISR) systems is structure-from-motion (SfM), which enables the creation of three-dimensional (3D) point clouds and 3D models from two-dimensional (2D) imagery. There are several existing tools, such as VisualSFM and open source project OpenSfM, to assist in this process, however, it is well-known that pristine imagery is usually required to create meaningful 3D data from the imagery. In military applications, such as the use of unmanned aerial vehicles (UAV) for surveillance operations, imagery is rarely pristine. Therefore, we present an analysis of structure-from-motion packages on imagery that has been degraded in a controlled manner. 10. Advances in LADAR Components and Subsystems at Raytheon NASA Technical Reports Server (NTRS) Jack, Michael; Chapman, George; Edwards, John; McKeag, William; Veeder, Tricia; Wehner, Justin; Roberts, Tom; Robinson, Tom; Neisz, James; Andressen, Cliff; Rinker, Robert; Hall, Donald N. B.; Jacobson, Shane M.; Amzajerdian, Farzin; Cook, T. Dean 2012-01-01 Raytheon is developing NIR sensor chip assemblies (SCAs) for scanning and staring 3D LADAR systems. High sensitivity is obtained by integrating high performance detectors with gain, i.e., APDs with very low noise Readout Integrated Circuits (ROICs). Unique aspects of these designs include: independent acquisition (non-gated) of pulse returns, multiple pulse returns with both time and intensity reported to enable full 3D reconstruction of the image. Recent breakthrough in device design has resulted in HgCdTe APDs operating at 300K with essentially no excess noise to gains in excess of 100, low NEP <1nW and GHz bandwidths and have demonstrated linear mode photon counting. SCAs utilizing these high performance APDs have been integrated and demonstrated excellent spatial and range resolution enabling detailed 3D imagery both at short range and long ranges. In the following we will review progress in real-time 3D LADAR imaging receiver products in three areas: (1) scanning 256 x 4 configuration for the Multi-Mode Sensor Seeker (MMSS) program and (2) staring 256 x 256 configuration for the Autonomous Landing and Hazard Avoidance Technology (ALHAT) lunar landing mission and (3) Photon-Counting SCAs which have demonstrated a dramatic reduction in dark count rate due to improved design, operation and processing. 11. Extracting Semantically Annotated 3d Building Models with Textures from Oblique Aerial Imagery NASA Astrophysics Data System (ADS) Frommholz, D.; Linkiewicz, M.; Meissner, H.; Dahlke, D.; Poznanska, A. 2015-03-01 This paper proposes a method for the reconstruction of city buildings with automatically derived textures that can be directly used for façade element classification. Oblique and nadir aerial imagery recorded by a multi-head camera system is transformed into dense 3D point clouds and evaluated statistically in order to extract the hull of the structures. For the resulting wall, roof and ground surfaces high-resolution polygonal texture patches are calculated and compactly arranged in a texture atlas without resampling. The façade textures subsequently get analyzed by a commercial software package to detect possible windows whose contours are projected into the original oriented source images and sparsely ray-casted to obtain their 3D world coordinates. With the windows being reintegrated into the previously extracted hull the final building models are stored as semantically annotated CityGML "LOD-2.5" objects. 12. Towards 3D Matching of Point Clouds Derived from Oblique and Nadir Airborne Imagery NASA Astrophysics Data System (ADS) Zhang, Ming Because of the low-expense high-efficient image collection process and the rich 3D and texture information presented in the images, a combined use of 2D airborne nadir and oblique images to reconstruct 3D geometric scene has a promising market for future commercial usage like urban planning or first responders. The methodology introduced in this thesis provides a feasible way towards fully automated 3D city modeling from oblique and nadir airborne imagery. In this thesis, the difficulty of matching 2D images with large disparity is avoided by grouping the images first and applying the 3D registration afterward. The procedure starts with the extraction of point clouds using a modified version of the RIT 3D Extraction Workflow. Then the point clouds are refined by noise removal and surface smoothing processes. Since the point clouds extracted from different image groups use independent coordinate systems, there are translation, rotation and scale differences existing. To figure out these differences, 3D keypoints and their features are extracted. For each pair of point clouds, an initial alignment and a more accurate registration are applied in succession. The final transform matrix presents the parameters describing the translation, rotation and scale requirements. The methodology presented in the thesis has been shown to behave well for test data. The robustness of this method is discussed by adding artificial noise to the test data. For Pictometry oblique aerial imagery, the initial alignment provides a rough alignment result, which contains a larger offset compared to that of test data because of the low quality of the point clouds themselves, but it can be further refined through the final optimization. The accuracy of the final registration result is evaluated by comparing it to the result obtained from manual selection of matched points. Using the method introduced, point clouds extracted from different image groups could be combined with each other to build a 13. 3D reconstruction optimization using imagery captured by unmanned aerial vehicles NASA Astrophysics Data System (ADS) Bassie, Abby L.; Meacham, Sean; Young, David; Turnage, Gray; Moorhead, Robert J. 2017-05-01 Because unmanned air vehicles (UAVs) are emerging as an indispensable image acquisition platform in precision agriculture, it is vitally important that researchers understand how to optimize UAV camera payloads for analysis of surveyed areas. In this study, imagery captured by a Nikon RGB camera attached to a Precision Hawk Lancaster was used to survey an agricultural field from six different altitudes ranging from 45.72 m (150 ft.) to 121.92 m (400 ft.). After collecting imagery, two different software packages (MeshLab and AgiSoft) were used to measure predetermined reference objects within six three-dimensional (3-D) point clouds (one per altitude scenario). In-silico measurements were then compared to actual reference object measurements, as recorded with a tape measure. Deviations of in-silico measurements from actual measurements were recorded as Δx, Δy, and Δz. The average measurement deviation in each coordinate direction was then calculated for each of the six flight scenarios. Results from MeshLab vs. AgiSoft offered insight into the effectiveness of GPS-defined point cloud scaling in comparison to user-defined point cloud scaling. In three of the six flight scenarios flown, MeshLab's 3D imaging software (user-defined scale) was able to measure object dimensions from 50.8 to 76.2 cm (20-30 inches) with greater than 93% accuracy. The largest average deviation in any flight scenario from actual measurements was 14.77 cm (5.82 in.). Analysis of the point clouds in AgiSoft (GPS-defined scale) yielded even smaller Δx, Δy, and Δz than the MeshLab measurements in over 75% of the flight scenarios. The precisions of these results are satisfactory in a wide variety of precision agriculture applications focused on differentiating and identifying objects using remote imagery. 14. Public engagement in 3D flood modelling through integrating crowd sourced imagery with UAV photogrammetry to create a 3D flood hydrograph. NASA Astrophysics Data System (ADS) Bond, C. E.; Howell, J.; Butler, R. 2016-12-01 With an increase in flood and storm events affecting infrastructure the role of weather systems, in a changing climate, and their impact is of increasing interest. Here we present a new workflow integrating crowd sourced imagery from the public with UAV photogrammetry to create, the first 3D hydrograph of a major flooding event. On December 30th 2015, Storm Frank resulted in high magnitude rainfall, within the Dee catchment in Aberdeenshire, resulting in the highest ever-recorded river level for the Dee, with significant impact on infrastructure and river morphology. The worst of the flooding occurred during daylight hours and was digitally captured by the public on smart phones and cameras. After the flood event a UAV was used to shoot photogrammetry to create a textured elevation model of the area around Aboyne Bridge on the River Dee. A media campaign aided crowd sourced digital imagery from the public, resulting in over 1,000 images submitted by the public. EXIF data captured by the imagery of the time, date were used to sort the images into a time series. Markers such as signs, walls, fences and roads within the images were used to determine river level height through the flood, and matched onto the elevation model to contour the change in river level. The resulting 3D hydrograph shows the build up of water on the up-stream side of the Bridge that resulted in significant scouring and under-mining in the flood. We have created the first known data based 3D hydrograph for a river section, from a UAV photogrammetric model and crowd sourced imagery. For future flood warning and infrastructure management a solution that allows a realtime hydrograph to be created utilising augmented reality to integrate the river level information in crowd sourced imagery directly onto a 3D model, would significantly improve management planning and infrastructure resilience assessment. 15. Quality Analysis on 3d Buidling Models Reconstructed from Uav Imagery NASA Astrophysics Data System (ADS) Jarzabek-Rychard, M.; Karpina, M. 2016-06-01 Recent developments in UAV technology and structure from motion techniques have effected that UAVs are becoming standard platforms for 3D data collection. Because of their flexibility and ability to reach inaccessible urban parts, drones appear as optimal solution for urban applications. Building reconstruction from the data collected with UAV has the important potential to reduce labour cost for fast update of already reconstructed 3D cities. However, especially for updating of existing scenes derived from different sensors (e.g. airborne laser scanning), a proper quality assessment is necessary. The objective of this paper is thus to evaluate the potential of UAV imagery as an information source for automatic 3D building modeling at LOD2. The investigation process is conducted threefold: (1) comparing generated SfM point cloud to ALS data; (2) computing internal consistency measures of the reconstruction process; (3) analysing the deviation of Check Points identified on building roofs and measured with a tacheometer. In order to gain deep insight in the modeling performance, various quality indicators are computed and analysed. The assessment performed according to the ground truth shows that the building models acquired with UAV-photogrammetry have the accuracy of less than 18 cm for the plannimetric position and about 15 cm for the height component. 16. Experiments with Uas Imagery for Automatic Modeling of Power Line 3d Geometry NASA Astrophysics Data System (ADS) Jóźków, G.; Vander Jagt, B.; Toth, C. 2015-08-01 The ideal mapping technology for transmission line inspection is the airborne LiDAR executed from helicopter platforms. It allows for full 3D geometry extraction in highly automated manner. Large scale aerial images can be also used for this purpose, however, automation is possible only for finding transmission line positions (2D geometry), and the sag needs to be estimated manually. For longer lines, these techniques are less expensive than ground surveys, yet they are still expensive. UAS technology has the potential to reduce these costs, especially if using inexpensive platforms with consumer grade cameras. This study investigates the potential of using high resolution UAS imagery for automatic modeling of transmission line 3D geometry. The key point of this experiment was to employ dense matching algorithms to appropriately acquired UAS images to have points created also on wires. This allowed to model the 3D geometry of transmission lines similarly to LiDAR acquired point clouds. Results showed that the transmission line modeling is possible with a high internal accuracy for both, horizontal and vertical directions, even when wires were represented by a partial (sparse) point cloud. 17. Quality of 3d Point Clouds from Highly Overlapping Uav Imagery NASA Astrophysics Data System (ADS) Haala, N.; Cramer, M.; Rothermel, M. 2013-08-01 UAVs are becoming standard platforms for photogrammetric data capture especially while aiming at large scale aerial mapping for areas of limited extent. Such applications especially benefit from the very reasonable price of a small light UAS including control system and standard consumer grade digital camera, which is some orders of magnitude lower compared to digital photogrammetric systems. Within the paper the capability of UAV-based data collection will be evaluated for two different consumer camera systems and compared to an aerial survey with a state-of-the-art digital airborne camera system. During this evaluation, the quality of 3D point clouds generated by dense multiple image matching will be used as a benchmark. Also due to recent software developments such point clouds can be generated at a resolution similar to the ground sampling distance of the available imagery and are used for an increasing number of applications. Usually, image matching benefits from the good images quality as provided from digital airborne camera systems, which is frequently not available from the low-cost sensor components used for UAV image collection. Within the paper an investigation on UAV-based 3D data capture will be presented. For this purpose dense 3D point clouds are generated for a test area from three different platforms: first a UAV with a light weight compact camera, second a system using a system camera and finally a medium-format airborne digital camera system. Despite the considerable differences in system costs, suitable results can be derived from all data, especially if large redundancy is available such highly overlapping image blocks are not only beneficial during georeferencing, but are especially advantageous while aiming at a dense and accurate image based 3D surface reconstruction. 18. 3D Modeling of Building Indoor Spaces and Closed Doors from Imagery and Point Clouds PubMed Central Díaz-Vilariño, Lucía; Khoshelham, Kourosh; Martínez-Sánchez, Joaquín; Arias, Pedro 2015-01-01 3D models of indoor environments are increasingly gaining importance due to the wide range of applications to which they can be subjected: from redesign and visualization to monitoring and simulation. These models usually exist only for newly constructed buildings; therefore, the development of automatic approaches for reconstructing 3D indoors from imagery and/or point clouds can make the process easier, faster and cheaper. Among the constructive elements defining a building interior, doors are very common elements and their detection can be very useful either for knowing the environment structure, to perform an efficient navigation or to plan appropriate evacuation routes. The fact that doors are topologically connected to walls by being coplanar, together with the unavoidable presence of clutter and occlusions indoors, increases the inherent complexity of the automation of the recognition process. In this work, we present a pipeline of techniques used for the reconstruction and interpretation of building interiors based on point clouds and images. The methodology analyses the visibility problem of indoor environments and goes in depth with door candidate detection. The presented approach is tested in real data sets showing its potential with a high door detection rate and applicability for robust and efficient envelope reconstruction. PMID:25654723 19. 3D modeling of building indoor spaces and closed doors from imagery and point clouds. PubMed Díaz-Vilariño, Lucía; Khoshelham, Kourosh; Martínez-Sánchez, Joaquín; Arias, Pedro 2015-02-03 3D models of indoor environments are increasingly gaining importance due to the wide range of applications to which they can be subjected: from redesign and visualization to monitoring and simulation. These models usually exist only for newly constructed buildings; therefore, the development of automatic approaches for reconstructing 3D indoors from imagery and/or point clouds can make the process easier, faster and cheaper. Among the constructive elements defining a building interior, doors are very common elements and their detection can be very useful either for knowing the environment structure, to perform an efficient navigation or to plan appropriate evacuation routes. The fact that doors are topologically connected to walls by being coplanar, together with the unavoidable presence of clutter and occlusions indoors, increases the inherent complexity of the automation of the recognition process. In this work, we present a pipeline of techniques used for the reconstruction and interpretation of building interiors based on point clouds and images. The methodology analyses the visibility problem of indoor environments and goes in depth with door candidate detection. The presented approach is tested in real data sets showing its potential with a high door detection rate and applicability for robust and efficient envelope reconstruction. 20. 3D visualization of movements can amplify motor cortex activation during subsequent motor imagery PubMed Central Sollfrank, Teresa; Hart, Daniel; Goodsell, Rachel; Foster, Jonathan; Tan, Tele 2015-01-01 A repetitive movement practice by motor imagery (MI) can influence motor cortical excitability in the electroencephalogram (EEG). This study investigated if a realistic visualization in 3D of upper and lower limb movements can amplify motor related potentials during subsequent MI. We hypothesized that a richer sensory visualization might be more effective during instrumental conditioning, resulting in a more pronounced event related desynchronization (ERD) of the upper alpha band (10–12 Hz) over the sensorimotor cortices thereby potentially improving MI based brain-computer interface (BCI) protocols for motor rehabilitation. The results show a strong increase of the characteristic patterns of ERD of the upper alpha band components for left and right limb MI present over the sensorimotor areas in both visualization conditions. Overall, significant differences were observed as a function of visualization modality (VM; 2D vs. 3D). The largest upper alpha band power decrease was obtained during MI after a 3-dimensional visualization. In total in 12 out of 20 tasks the end-user of the 3D visualization group showed an enhanced upper alpha ERD relative to 2D VM group, with statistical significance in nine tasks.With a realistic visualization of the limb movements, we tried to increase motor cortex activation during subsequent MI. The feedback and the feedback environment should be inherently motivating and relevant for the learner and should have an appeal of novelty, real-world relevance or aesthetic value (Ryan and Deci, 2000; Merrill, 2007). Realistic visual feedback, consistent with the participant’s MI, might be helpful for accomplishing successful MI and the use of such feedback may assist in making BCI a more natural interface for MI based BCI rehabilitation. PMID:26347642 1. 3D visualization of movements can amplify motor cortex activation during subsequent motor imagery. PubMed Sollfrank, Teresa; Hart, Daniel; Goodsell, Rachel; Foster, Jonathan; Tan, Tele 2015-01-01 A repetitive movement practice by motor imagery (MI) can influence motor cortical excitability in the electroencephalogram (EEG). This study investigated if a realistic visualization in 3D of upper and lower limb movements can amplify motor related potentials during subsequent MI. We hypothesized that a richer sensory visualization might be more effective during instrumental conditioning, resulting in a more pronounced event related desynchronization (ERD) of the upper alpha band (10-12 Hz) over the sensorimotor cortices thereby potentially improving MI based brain-computer interface (BCI) protocols for motor rehabilitation. The results show a strong increase of the characteristic patterns of ERD of the upper alpha band components for left and right limb MI present over the sensorimotor areas in both visualization conditions. Overall, significant differences were observed as a function of visualization modality (VM; 2D vs. 3D). The largest upper alpha band power decrease was obtained during MI after a 3-dimensional visualization. In total in 12 out of 20 tasks the end-user of the 3D visualization group showed an enhanced upper alpha ERD relative to 2D VM group, with statistical significance in nine tasks.With a realistic visualization of the limb movements, we tried to increase motor cortex activation during subsequent MI. The feedback and the feedback environment should be inherently motivating and relevant for the learner and should have an appeal of novelty, real-world relevance or aesthetic value (Ryan and Deci, 2000; Merrill, 2007). Realistic visual feedback, consistent with the participant's MI, might be helpful for accomplishing successful MI and the use of such feedback may assist in making BCI a more natural interface for MI based BCI rehabilitation. 2. A photogrammetric approach for real-time 3D localization and tracking of pedestrians in monocular infrared imagery NASA Astrophysics Data System (ADS) Kundegorski, Mikolaj E.; Breckon, Toby P. 2014-10-01 Target tracking within conventional video imagery poses a significant challenge that is increasingly being addressed via complex algorithmic solutions. The complexity of this problem can be fundamentally attributed to the ambiguity associated with actual 3D scene position of a given tracked object in relation to its observed position in 2D image space. We propose an approach that challenges the current trend in complex tracking solutions by addressing this fundamental ambiguity head-on. In contrast to prior work in the field, we leverage the key advantages of thermal-band infrared (IR) imagery for the pedestrian localization to show that robust localization and foreground target separation, afforded via such imagery, facilities accurate 3D position estimation to within the error bounds of conventional Global Position System (GPS) positioning. This work investigates the accuracy of classical photogrammetry, within the context of current target detection and classification techniques, as a means of recovering the true 3D position of pedestrian targets within the scene. Based on photogrammetric estimation of target position, we then illustrate the efficiency of regular Kalman filter based tracking operating on actual 3D pedestrian scene trajectories. We present both a statistical and experimental analysis of the associated errors of this approach in addition to real-time 3D pedestrian tracking using monocular infrared (IR) imagery from a thermal-band camera. 3. Three-dimensional landing zone ladar NASA Astrophysics Data System (ADS) Savage, James; Goodrich, Shawn; Burns, H. N. 2016-05-01 Three-Dimensional Landing Zone (3D-LZ) refers to a series of Air Force Research Laboratory (AFRL) programs to develop high-resolution, imaging ladar to address helicopter approach and landing in degraded visual environments with emphasis on brownout; cable warning and obstacle avoidance; and controlled flight into terrain. Initial efforts adapted ladar systems built for munition seekers, and success led to a the 3D-LZ Joint Capability Technology Demonstration (JCTD) , a 27-month program to develop and demonstrate a ladar subsystem that could be housed with the AN/AAQ-29 FLIR turret flown on US Air Force Combat Search and Rescue (CSAR) HH-60G Pave Hawk helicopters. Following the JCTD flight demonstration, further development focused on reducing size, weight, and power while continuing to refine the real-time geo-referencing, dust rejection, obstacle and cable avoidance, and Helicopter Terrain Awareness and Warning (HTAWS) capability demonstrated under the JCTD. This paper summarizes significant ladar technology development milestones to date, individual LADAR technologies within 3D-LZ, and results of the flight testing. 4. Automated 3D modelling of buildings from aerial and space imagery using image understanding techniques NASA Astrophysics Data System (ADS) Kim, Taejung The development of a fully automated mapping system is one of the fundamental goals in photogrammetry and remote sensing. As an approach towards this goal, this thesis describes the work carried out in the automated 3D modelling of buildings in urban scenes. The whole work is divided into three parts: the development of an automated height extraction system, the development of an automated building detection system, and the combination of these two systems. After an analysis of the key problems of urban-area imagery for stereo matching, buildings were found to create isolated regions and blunders. From these findings, an automated building height extraction system was developed. This stereoscopic system is based on a pyramidal (area-based) matching algorithm with automatic seed points and a tile-based control strategy. To remove possible blunders and extract buildings from other background objects, a series of "smart" operations using linear elements from buildings were also applied. A new monoscopic building detection system was developed based on a graph constructed from extracted lines and their relations. After extracting lines from a single image using low-level image processing techniques, line relations are searched for and a graph constructed. By finding closed loops in the graph, building hypotheses are generated. These are then merged and verified using shadow analysis and perspective geometry. After verification, each building hypothesis indicates either a building or a part of a building. By combining results from these two systems, 3D building roofs can be modelled automatically. The modelling is performed using height information obtained from the height extraction system and interpolation boundaries obtained from the building detection system. Other fusion techniques and the potential improvements due to these are also discussed. Quantitative analysis was performed for each algorithm presented in this thesis and the results support the newly 5. Indoor imagery with a 3D through-wall synthetic aperture radar NASA Astrophysics Data System (ADS) Sévigny, Pascale; DiFilippo, David J.; Laneve, Tony; Fournier, Jonathan 2012-06-01 Through-wall radar imaging is an emerging technology with great interest to military and police forces operating in an urban environment. A through-wall imaging radar can potentially provide interior room layouts as well as detection and localization of targets of interest within a building. In this paper, we present our through-wall radar system mounted on the side of a vehicle and driven along a path in front of a building of interest. The vehicle is equipped with a LIDAR (Light Detection and Ranging) and motion sensors that provide auxiliary information. The radar uses an ultra wideband frequency-modulated continuous wave (FMCW) waveform to obtain high range resolution. Our system is composed of a vertical linear receive array to discriminate targets in elevation, and two transmit elements operated in a slow multiple-input multiple output (MIMO) configuration to increase the achievable elevation resolution. High resolution in the along-track direction is obtained through synthetic aperture radar (SAR) techniques. We present experimental results that demonstrate the 3-D capability of the radar. We further demonstrate target detection behind challenging walls, and imagery of internal wall features. Finally, we discuss future work. 6. 3D Visualization of an Invariant Display Strategy for Hyperspectral Imagery DTIC Science & Technology 2002-12-01 Region of Interest (ROI) in HSV color space model in 3D, and viewing the 2D resultant image. A demonstration application uses Java language...Visualization, X3D, Java Xj3d API 15. NUMBER OF PAGES 106 16. PRICE CODE 17. SECURITY CLASSIFICATION OF REPORT Unclassified 18. SECURITY...application uses Java language including Java2D, Xj3D Player, Document Object Model (DOM) Application Program Interfaces (API), and Extensible 3D Language 7. Comparison of 32 x 128 and 32 x 32 Geiger-mode APD FPAs for single photon 3D LADAR imaging NASA Astrophysics Data System (ADS) Itzler, Mark A.; Entwistle, Mark; Owens, Mark; Patel, Ketan; Jiang, Xudong; Slomkowski, Krystyna; Rangwala, Sabbir; Zalud, Peter F.; Senko, Tom; Tower, John; Ferraro, Joseph 2011-05-01 We present results obtained from 3D imaging focal plane arrays (FPAs) employing planar-geometry InGaAsP/InP Geiger-mode avalanche photodiodes (GmAPDs) with high-efficiency single photon sensitivity at 1.06 μm. We report results obtained for new 32 x 128 format FPAs with 50 μm pitch and compare these results to those obtained for 32 x 32 format FPAs with 100 μm pitch. We show excellent pixel-level yield-including 100% pixel operability-for both formats. The dark count rate (DCR) and photon detection efficiency (PDE) performance is found to be similar for both types of arrays, including the fundamental DCR vs. PDE tradeoff. The optical crosstalk due to photon emission induced by pixel-level avalanche detection events is found to be qualitatively similar for both formats, with some crosstalk metrics for the 32 x 128 format found to be moderately elevated relative to the 32 x 32 FPA results. Timing jitter measurements are also reported for the 32 x 128 FPAs. 8. How much camera separation should be used for the capture and presentation of 3D stereoscopic imagery on binocular HMDs? NASA Astrophysics Data System (ADS) McIntire, John; Geiselman, Eric; Heft, Eric; Havig, Paul 2011-06-01 Designers, researchers, and users of binocular stereoscopic head- or helmet-mounted displays (HMDs) face the tricky issue of what imagery to present in their particular displays, and how to do so effectively. Stereoscopic imagery must often be created in-house with a 3D graphics program or from within a 3D virtual environment, or stereoscopic photos/videos must be carefully captured, perhaps for relaying to an operator in a teleoperative system. In such situations, the question arises as to what camera separation (real or virtual) is appropriate or desirable for end-users and operators. We review some of the relevant literature regarding the question of stereo pair camera separation using deskmounted or larger scale stereoscopic displays, and employ our findings to potential HMD applications, including command & control, teleoperation, information and scientific visualization, and entertainment. 9. A novel window based method for approximating the Hausdorff in 3D range imagery. SciTech Connect Koch, Mark William 2004-10-01 Matching a set of 3D points to another set of 3D points is an important part of any 3D object recognition system. The Hausdorff distance is known for it robustness in the face of obscuration, clutter, and noise. We show how to approximate the 3D Hausdorff fraction with linear time complexity and quadratic space complexity. We empirically demonstrate that the approximation is very good when compared to actual Hausdorff distances. 10. Meteoroid and debris special investigation group; status of 3-D crater analysis from binocular imagery NASA Technical Reports Server (NTRS) Sapp, Clyde A.; See, Thomas H.; Zolensky, Michael E. 1992-01-01 During the 3 month deintegration of the LDEF, the M&D SIG generated approximately 5000 digital color stereo image pairs of impact related features from all space exposed surfaces. Currently, these images are being processed at JSC to yield more accurate feature information. Work is currently underway to determine the minimum number of data points necessary to parametrically define impact crater morphologies in order to minimize the man-hour intensive task of tie point selection. Initial attempts at deriving accurate crater depth and diameter measurements from binocular imagery were based on the assumption that the crater geometries were best defined by paraboloid. We made no assumptions regarding the crater depth/diameter ratios but instead allowed each crater to define its own coefficients by performing a least-squares fit based on user-selected tiepoints. Initial test cases resulted in larger errors than desired, so it was decided to test our basic assumptions that the crater geometries could be parametrically defined as paraboloids. The method for testing this assumption was to carefully slice test craters (experimentally produced in an appropriate aluminum alloy) vertically through the center resulting in a readily visible cross-section of the crater geometry. Initially, five separate craters were cross-sectioned in this fashion. A digital image of each cross-section was then created, and the 2-D crater geometry was then hand-digitized to create a table of XY position for each crater. A 2nd order polynomial (parabolic) was fitted to the data using a least-squares approach. The differences between the fit equation and the actual data were fairly significant, and easily large enough to account for the errors found in the 3-D fits. The differences between the curve fit and the actual data were consistent between the caters. This consistency suggested that the differences were due to the fact that a parabola did not sufficiently define the generic crater geometry 11. Meteoroid and debris special investigation group; status of 3-D crater analysis from binocular imagery NASA Technical Reports Server (NTRS) Sapp, Clyde A.; See, Thomas H.; Zolensky, Michael E. 1992-01-01 During the 3 month deintegration of the LDEF, the M&D SIG generated approximately 5000 digital color stereo image pairs of impact related features from all space exposed surfaces. Currently, these images are being processed at JSC to yield more accurate feature information. Work is currently underway to determine the minimum number of data points necessary to parametrically define impact crater morphologies in order to minimize the man-hour intensive task of tie point selection. Initial attempts at deriving accurate crater depth and diameter measurements from binocular imagery were based on the assumption that the crater geometries were best defined by paraboloid. We made no assumptions regarding the crater depth/diameter ratios but instead allowed each crater to define its own coefficients by performing a least-squares fit based on user-selected tiepoints. Initial test cases resulted in larger errors than desired, so it was decided to test our basic assumptions that the crater geometries could be parametrically defined as paraboloids. The method for testing this assumption was to carefully slice test craters (experimentally produced in an appropriate aluminum alloy) vertically through the center resulting in a readily visible cross-section of the crater geometry. Initially, five separate craters were cross-sectioned in this fashion. A digital image of each cross-section was then created, and the 2-D crater geometry was then hand-digitized to create a table of XY position for each crater. A 2nd order polynomial (parabolic) was fitted to the data using a least-squares approach. The differences between the fit equation and the actual data were fairly significant, and easily large enough to account for the errors found in the 3-D fits. The differences between the curve fit and the actual data were consistent between the caters. This consistency suggested that the differences were due to the fact that a parabola did not sufficiently define the generic crater geometry 12. Dual Mode (MWIR AND LADAR) Seeker for Missile Defense DTIC Science & Technology 2002-07-29 Dimensional Imaging Laser Radar Using Microchip Lasers and Geiger - Mode Avalanche July 2000. The image in figure 6 is actually 3-D since each point has...micro-dynamic information used for discrimination. The paper details why a Geiger mode flash ladar was selected for this application and how its...is illustrated in Figure 3 for a direct detection ladar operating in the Geiger mode where a single returned photon is detected. Curves represent 4 13. 3D Building Modeling and Reconstruction using Photometric Satellite and Aerial Imageries NASA Astrophysics Data System (ADS) Izadi, Mohammad In this thesis, the problem of three dimensional (3D) reconstruction of building models using photometric satellite and aerial images is investigated. Here, two systems are pre-sented: 1) 3D building reconstruction using a nadir single-view image, and 2) 3D building reconstruction using slant multiple-view aerial images. The first system detects building rooftops in orthogonal aerial/satellite images using a hierarchical segmentation algorithm and a shadow verification approach. The heights of detected buildings are then estimated using a fuzzy rule-based method, which measures the height of a building by comparing its predicted shadow region with the actual shadow evidence in the image. This system finally generated a KML (Keyhole Markup Language) file as the output, that contains 3D models of detected buildings. The second system uses the geolocation information of a scene containing a building of interest and uploads all slant-view images that contain this scene from an input image dataset. These images are then searched automatically to choose image pairs with different views of the scene (north, east, south and west) based on the geolocation and auxiliary data accompanying the input data (metadata that describes the acquisition parameters at the capture time). The camera parameters corresponding to these images are refined using a novel point matching algorithm. Next, the system independently reconstructs 3D flat surfaces that are visible in each view using an iterative algorithm. 3D surfaces generated for all views are combined, and redundant surfaces are removed to create a complete set of 3D surfaces. Finally, the combined 3D surfaces are connected together to generate a more complete 3D model. For the experimental results, both presented systems are evaluated quantitatively and qualitatively and different aspects of the two systems including accuracy, stability, and execution time are discussed. 14. 3D exploitation of large urban photo archives NASA Astrophysics Data System (ADS) Cho, Peter; Snavely, Noah; Anderson, Ross 2010-04-01 Recent work in computer vision has demonstrated the potential to automatically recover camera and scene geometry from large collections of uncooperatively-collected photos. At the same time, aerial ladar and Geographic Information System (GIS) data are becoming more readily accessible. In this paper, we present a system for fusing these data sources in order to transfer 3D and GIS information into outdoor urban imagery. Applying this system to 1000+ pictures shot of the lower Manhattan skyline and the Statue of Liberty, we present two proof-of-concept examples of geometry-based photo enhancement which are difficult to perform via conventional image processing: feature annotation and image-based querying. In these examples, high-level knowledge projects from 3D world-space into georegistered 2D image planes and/or propagates between different photos. Such automatic capabilities lay the groundwork for future real-time labeling of imagery shot in complex city environments by mobile smart phones. 15. Uncertainty preserving patch-based online modeling for 3D model acquisition and integration from passive motion imagery NASA Astrophysics Data System (ADS) Tang, Hao; Chang, Peng; Molina, Edgardo; Zhu, Zhigang 2012-06-01 In both military and civilian applications, abundant data from diverse sources captured on airborne platforms are often available for a region attracting interest. Since the data often includes motion imagery streams collected from multiple platforms flying at different altitudes, with sensors of different field of views (FOVs), resolutions, frame rates and spectral bands, it is imperative that a cohesive site model encompassing all the information can be quickly built and presented to the analysts. In this paper, we propose to develop an Uncertainty Preserving Patch-based Online Modeling System (UPPOMS) leading towards the automatic creation and updating of a cohesive, geo-registered, uncertaintypreserving, efficient 3D site terrain model from passive imagery with varying field-of-views and phenomenologies. The proposed UPPOMS has the following technical thrusts that differentiate our approach from others: (1) An uncertaintypreserved, patch-based 3D model is generated, which enables the integration of images captured with a mixture of NFOV and WFOV and/or visible and infrared motion imagery sensors. (2) Patch-based stereo matching and multi-view 3D integration are utilized, which are suitable for scenes with many low texture regions, particularly in mid-wave infrared images. (3) In contrast to the conventional volumetric algorithms, whose computational and storage costs grow exponentially with the amount of input data and the scale of the scene, the proposed UPPOMS system employs an online algorithmic pipeline, and scales well to large amount of input data. Experimental results and discussions of future work will be provided. 16. Lift-Off: Using Reference Imagery and Freehand Sketching to Create 3D Models in VR. PubMed Jackson, Bret; Keefe, Daniel F 2016-04-01 Three-dimensional modeling has long been regarded as an ideal application for virtual reality (VR), but current VR-based 3D modeling tools suffer from two problems that limit creativity and applicability: (1) the lack of control for freehand modeling, and (2) the difficulty of starting from scratch. To address these challenges, we present Lift-Off, an immersive 3D interface for creating complex models with a controlled, handcrafted style. Artists start outside of VR with 2D sketches, which are then imported and positioned in VR. Then, using a VR interface built on top of image processing algorithms, 2D curves within the sketches are selected interactively and "lifted" into space to create a 3D scaffolding for the model. Finally, artists sweep surfaces along these curves to create 3D models. Evaluations are presented for both long-term users and for novices who each created a 3D sailboat model from the same starting sketch. Qualitative results are positive, with the visual style of the resulting models of animals and other organic subjects as well as architectural models matching what is possible with traditional fine art media. In addition, quantitative data from logging features built into the software are used to characterize typical tool use and suggest areas for further refinement of the interface. 17. Dubai 3d Textuerd Mesh Using High Quality Resolution Vertical/oblique Aerial Imagery NASA Astrophysics Data System (ADS) Tayeb Madani, Adib; Ziad Ahmad, Abdullateef; Christoph, Lueken; Hammadi, Zamzam; Manal Abdullah Sabeal, Manal Abdullah x. 2016-06-01 Providing high quality 3D data with reasonable quality and cost were always essential, affording the core data and foundation for developing an information-based decision-making tool of urban environments with the capability of providing decision makers, stakeholders, professionals, and public users with 3D views and 3D analysis tools of spatial information that enables real-world views. Helps and assist in improving users' orientation and also increase their efficiency in performing their tasks related to city planning, Inspection, infrastructures, roads, and cadastre management. In this paper, the capability of multi-view Vexcel UltraCam Osprey camera images is examined to provide a 3D model of building façades using an efficient image-based modeling workflow adopted by commercial software's. The main steps of this work include: Specification, point cloud generation, and 3D modeling. After improving the initial values of interior and exterior parameters at first step, an efficient image matching technique such as Semi Global Matching (SGM) is applied on the images to generate point cloud. Then, a mesh model of points is calculated using and refined to obtain an accurate model of buildings. Finally, a texture is assigned to mesh in order to create a realistic 3D model. The resulting model has provided enough LoD2 details of the building based on visual assessment. The objective of this paper is neither comparing nor promoting a specific technique over the other and does not mean to promote a sensor-based system over another systems or mechanism presented in existing or previous paper. The idea is to share experience. 18. Detection of flood effects in montane streams based on fusion of 2D and 3D information from UAV imagery NASA Astrophysics Data System (ADS) Langhammer, Jakub; Vacková, Tereza 2017-04-01 In the contribution, we are presenting a novel method, enabling objective detection and classification of the alluvial features resulting from flooding, based on the imagery, acquired by the unmanned aerial vehicles (UAVs, drones). We have proposed and tested a workflow, using two key data products of the UAV photogrammetry - the 2D orthoimage and 3D digital elevation model, together with derived information on surface texture for the consequent classification of erosional and depositional features resulting from the flood. The workflow combines the photogrammetric analysis of the UAV imagery, texture analysis of the DEM, and the supervised image classification. Application of the texture analysis and use of DEM data is aimed to enhance 2D information, resulting from the high-resolution orthoimage by adding the newly derived bands, which enhance potential for detection and classification of key types of fluvial features in the stream and the floodplain. The method was tested on the example of a snowmelt-driven flood in a montane stream in Sumava Mts., Czech Republic, Central Europe, that occurred in December 2015. Using the UAV platform DJI Inspire 1 equipped with the RGB camera there was acquired imagery covering a 1 km long stretch of a meandering creek with elevated fluvial dynamics. Agisoft Photoscan Pro was used to derive a point cloud and further the high-resolution seamless orthoimage and DEM, Orfeo toolkit and SAGA GIS tools were used for DEM analysis. From the UAV-based data inputs, a multi-band dataset was derived as a source for the consequent classification of fluvial landforms. The RGB channels of the derived orthoimage were completed by the selected texture feature layers and the information on 3D properties of the riverscape - the normalized DEM and terrain ruggedness. Haralick features, derived from the RGB channels, are used for extracting information on the surface texture, the terrain ruggedness index is used as a measure of local topographical 19. LADAR for structural damage detection NASA Astrophysics Data System (ADS) Moosa, Adil G.; Fu, Gongkang 1999-12-01 LADAR here stands for laser radar, using laser reflectivity for measurement. This paper presents a new technique using LADAR for structure evaluation. It is experimentally investigated in the laboratory. With cooperation of the US Federal Highway Administration, a recently developed LADAR system was used to measure structural deformation. The data were then treated for reducing noise and used to derive multiple features for diagnosis. The results indicate a promising direction of nondestructive evaluation using LADAR. 20. Assimilation of high resolution satellite imagery into the 3D-CMCC forest ecosystem model NASA Astrophysics Data System (ADS) Natali, S.; Collalti, A.; Candini, A.; Della Vecchia, A.; Valentini, R. 2012-04-01 The use of satellite observations for the accurate monitoring of the terrestrial biosphere has been carried out since the very early stage of remote sensing applications. The possibility to observe the ground surface with different wavelengths and different observation modes (namely active and passive observations) has given to the scientific community an invaluable tool for the observation of wide areas with a resolution down to the single tree. On the other hand, the continuous development of forest ecosystem models has permitted to perform simulations of complex ("natural") forest scenarios to evaluate forest status, forest growth and future dynamics. Both remote sensing and modelling forest assessment methods have advantages and disadvantages that could be overcome by the adoption of an integrated approach. In the framework of the European Space Agency Project KLAUS, high resolution optical satellite data has been integrated /assimilated into a forest ecosystem model (named 3D-CMCC) specifically developed for multi-specie, multi-age forests. 3D-CMCC permits to simulate forest areas with different forest layers, with different trees at different age on the same point. Moreover, the model permits to simulate management activities on the forest, thus evaluating the carbon stock evolution following a specific management scheme. The model has been modified including satellite data at 10m resolution, permitting the use of directly measured information, adding to the model the real phenological cycle of each simulated point. Satellite images have been collected by the JAXA ALOS-AVNIR-2 sensor. The integration schema has permitted to identify a spatial domain in which each pixel is characterised by a forest structure (species, ages, soil parameters), meteo-climatological parameters and estimated Leaf Area Index from satellite. The resulting software package (3D-CMCC-SAT) is built around 3D-CMCC: 2D / 3D input datasets are processed iterating on each point of the 1. Extracting and analyzing micro-Doppler from ladar signatures NASA Astrophysics Data System (ADS) Tahmoush, Dave 2015-05-01 Ladar and other 3D imaging modalities have the capability of creating 3D micro-Doppler to analyze the micro-motions of human subjects. An additional capability to the recognition of micro-motion is the recognition of the moving part, such as the hand or arm. Combined with measured RCS values of the body, ladar imaging can be used to ground-truth the more sensitive radar micro-Doppler measurements and associate the moving part of the subject with the measured Doppler and RCS from the radar system. The 3D ladar signatures can also be used to classify activities and actions on their own, achieving an 86% accuracy using a micro-Doppler based classification strategy. 2. 3-D Raman Imagery and Atomic Force Microscopy of Ancient Microscopic Fossils NASA Astrophysics Data System (ADS) Schopf, J. 2003-12-01 Investigations of the Precambrian (~540- to ~3,500-Ma-old) fossil record depend critically on identification of authentic microbial fossils. Combined with standard paleontologic studies (e.g., of paleoecologic setting, population structure, cellular morphology, preservational variants), two techniques recently introduced to such studies -- Raman imagery and atomic force microscopy -- can help meet this need. Laser-Raman imagery is a non-intrusive, non-destructive technique that can be used to demonstrate a micron-scale one-to-one correlation between optically discernable morphology and the organic (kerogenous) composition of individual microbial fossils(1,2), a prime indicator of biogencity. Such analyses can be used to characterize the molecular-structural makeup of organic-walled microscopic fossils both in acid-resistant residues and in petrographic thin sections, and whether the fossils analyzed are exposed at the upper surface of, or are embedded within (to depths >65 microns), the section studied. By providing means to map chemically, in three dimensions, whole fossils or parts of such fossils(3), Raman imagery can also show the presence of cell lumina, interior cellular cavities, another prime indicator of biogenicity. Atomic force microscopy (AFM) has been used to visualize the nanometer-scale structure of the kerogenous components of single Precambrian microscopic fossils(4). Capable of analyzing minute fragments of ancient organic matter exposed at the upper surface of thin sections (or of kerogen particles deposited on flat surfaces), such analyses hold promise not only for discriminating between biotic and abiotic micro-objects but for elucidation of the domain size -- and, thus, the degree of graphitization -- of the graphene subunits of the carbonaceous matter analyzed. These techniques -- both new to paleobiology -- can provide useful insight into the biogenicity and geochemical maturity of ancient organic matter. References: (1) Kudryavtsev, A.B. et 3. The Maradi fault zone: 3-D imagery of a classic wrench fault in Oman SciTech Connect Neuhaus, D. ) 1993-09-01 The Maradi fault zone extends for almost 350 km in a north-northwest-south-southeast direction from the Oman Mountain foothills into the Arabian Sea, thereby dissecting two prolific hydrocarbon provinces, the Ghaba and Fahud salt basins. During its major Late Cretaceous period of movement, the Maradi fault zone acted as a left-lateral wrench fault. An early exploration campaign based on two-dimensional seismic targeted at fractured Cretaceous carbonates had mixed success and resulted in the discovery of one producing oil field. The structural complexity, rapidly varying carbonate facies, and uncertain fracture distribution prevented further drilling activity. In 1990 a three-dimensional (3-D) seismic survey covering some 500 km[sup 2] was acquired over the transpressional northern part of the Maradi fault zone. The good data quality and the focusing power of 3-D has enabled stunning insight into the complex structural style of a [open quotes]textbook[close quotes] wrench fault, even at deeper levels and below reverse faults hitherto unexplored. Subtle thickness changes within the carbonate reservoir and the unconformably overlying shale seal provided the tool for the identification of possible shoals and depocenters. Horizon attribute maps revealed in detail the various structural components of the wrench assemblage and highlighted areas of increased small-scale faulting/fracturing. The results of four recent exploration wells will be demonstrated and their impact on the interpretation discussed. 4. Textured digital elevation model formation from low-cost UAV LADAR/digital image data NASA Astrophysics Data System (ADS) Bybee, Taylor C.; Budge, Scott E. 2015-05-01 Textured digital elevation models (TDEMs) have valuable use in precision agriculture, situational awareness, and disaster response. However, scientific-quality models are expensive to obtain using conventional aircraft-based methods. The cost of creating an accurate textured terrain model can be reduced by using a low-cost (<20k) UAV system fitted with ladar and electro-optical (EO) sensors. A texel camera fuses calibrated ladar and EO data upon simultaneous capture, creating a texel image. This eliminates the problem of fusing the data in a post-processing step and enables both 2D- and 3D-image registration techniques to be used. This paper describes formation of TDEMs using simulated data from a small UAV gathering swaths of texel images of the terrain below. Being a low-cost UAV, only a coarse knowledge of position and attitude is known, and thus both 2D- and 3D-image registration techniques must be used to register adjacent swaths of texel imagery to create a TDEM. The process of creating an aggregate texel image (a TDEM) from many smaller texel image swaths is described. The algorithm is seeded with the rough estimate of position and attitude of each capture. Details such as the required amount of texel image overlap, registration models, simulated flight patterns (level and turbulent), and texture image formation are presented. In addition, examples of such TDEMs are shown and analyzed for accuracy. 5. Inlining 3d Reconstruction, Multi-Source Texture Mapping and Semantic Analysis Using Oblique Aerial Imagery Frommholz, D.; Linkiewicz, M.; Poznanska, A. M. 2016-06-01 This paper proposes an in-line method for the simplified reconstruction of city buildings from nadir and oblique aerial images that at the same time are being used for multi-source texture mapping with minimal resampling. Further, the resulting unrectified texture atlases are analyzed for façade elements like windows to be reintegrated into the original 3D models. Tests on real-world data of Heligoland/ Germany comprising more than 800 buildings exposed a median positional deviation of 0.31 m at the façades compared to the cadastral map, a correctness of 67% for the detected windows and good visual quality when being rendered with GPU-based perspective correction. As part of the process building reconstruction takes the oriented input images and transforms them into dense point clouds by semi-global matching (SGM). The point sets undergo local RANSAC-based regression and topology analysis to detect adjacent planar surfaces and determine their semantics. Based on this information the roof, wall and ground surfaces found get intersected and limited in their extension to form a closed 3D building hull. For texture mapping the hull polygons are projected into each possible input bitmap to find suitable color sources regarding the coverage and resolution. Occlusions are detected by ray-casting a full-scale digital surface model (DSM) of the scene and stored in pixel-precise visibility maps. These maps are used to derive overlap statistics and radiometric adjustment coefficients to be applied when the visible image parts for each building polygon are being copied into a compact texture atlas without resampling whenever possible. The atlas bitmap is passed to a commercial object-based image analysis (OBIA) tool running a custom rule set to identify windows on the contained façade patches. Following multi-resolution segmentation and classification based on brightness and contrast differences potential window objects are evaluated against geometric constraints and 6. Very fast road database verification using textured 3D city models obtained from airborne imagery Bulatov, Dimitri; Ziems, Marcel; Rottensteiner, Franz; Pohl, Melanie 2014-10-01 7. 3D Reconstruction of a Shallow Archaeological Site From High Resolution Acoustic Imagery: A Case Study Plets, R. M.; Dix, J. K.; Adams, J. R.; Best, A. I. 2005-12-01 High resolution acoustic surveying for buried objects in the shallow waters of the inter-tidal to sub-tidal zone is a major challenge to many sectors of the marine surveying community. This is a consequence of a number of issues such as the relationship between water depth and acoustic acquisition geometry; problems of vessel induced bubble clouds reducing the signal-to-noise (SNR) ratio; and the necessity of high spatial survey accuracy in three-dimensions. These challenges are particularly acute for the marine archaeological community, who are frequently required to non-destructively investigate shallow-water (< 5 m) sites. This paper addresses these challenges and demonstrates the potential of imaging buried objects in extremely shallow environments by describing a seamless marine archaeological and geophysical investigation of a buried shipwreck: Henry V's great flagship', the Grace Dieu (1418). The site, located in the Hamble River (UK), is typically covered by 2-5 m of water, and is partially buried within muddy inter-tidal sediments. At exceptionally low tides, during the spring equinox, a few of the marginal timbers are exposed. The marine survey utilised three different deployment methods of a Chirp system: two 2D Chirp systems, each emitting different frequencies and accompanied by different navigational systems (DGPS versus RTK), and a 3D Chirp system with RTK positioning capability. In all cases, the source was towed over the site using diver power. Close survey line spacing, accurate navigation and decimetre scale vertical and horizontal resolution acoustic data enabled the construction of a pseudo and full 3D image of this buried wreck site. This has been calibrated against known archaeological site investigation data and an RTK-GPS terrestrial survey. This data has identified the true plan form and dimensions of the remaining segments of the vessel, supporting the assertion that it was the most significant naval design for over two centuries. It has 8. Optimal-tradeoff circular harmonic function filters for 3D target recognition Vijaya Kumar, Bhagavatula V. K.; Xie, Chunyan; Mahalanobis, Abhijit 2003-09-01 3D target recognition is of significant interest because representing the object in 3D space couuld essentially provide a solution to pose variation and self-occlusion problems that are big challenges in 2D pattern recognition. Correlation filers have been used in a variety of 2D pattern matching applications and many correlation filter designs have been developed to handle problems such as rotations. Correlation filters also offer other benefits such as shift-invariance, graceful degradation and closed-form solutions. The 3D extension of correlation filter is a natural extension to handle 3D pattern recognition problem. In this paper, we propose a 3D correlation filter design method based on cylindrical circular harmonic function (CCHF) and use LADAR imagery to illustrate the good performance of CCHF filters. Johnson, Kenneth; Vaidyanathan, Mohan; Xue, Song; Tennant, William E.; Kozlowski, Lester J.; Hughes, Gary W.; Smith, Duane D. 2001-09-01 We are developing a novel 2D focal plane array (FPA) with read-out integrated circuit (ROIC) on a single chip for 3D laser radar imaging. The ladar will provide high-resolution range and range-resolved intensity images for detection and identification of difficult targets. The initial full imaging-camera-on-a-chip system will be a 64 by 64 element, 100-micrometers pixel-size detector array that is directly bump bonded to a low-noise 64 by 64 array silicon CMOS-based ROIC. The architecture is scalable to 256 by 256 or higher arrays depending on the system application. The system will provide all the required electronic processing at pixel level and the smart FPA enables directly producing the 3D or 4D format data to be captured with a single laser pulse. The detector arrays are made of uncooled InGaAs PIN device for SWIR imaging at 1.5 micrometers wavelength and cooled HgCdTe PIN device for MWIR imaging at 3.8 micrometers wavelength. We are also investigating concepts using multi-color detector arrays for simultaneous imaging at multiple wavelengths that would provide additional spectral dimension capability for enhanced detection and identification of deep-hide targets. The system is suited for flash ladar imaging, for combat identification of ground targets from airborne platforms, flash-ladar imaging seekers, and autonomous robotic/automotive vehicle navigation and collision avoidance applications. 10. Automatic Detection, Segmentation and Classification of Retinal Horizontal Neurons in Large-scale 3D Confocal Imagery SciTech Connect Karakaya, Mahmut; Kerekes, Ryan A; Gleason, Shaun Scott; Martins, Rodrigo; Dyer, Michael 2011-01-01 Automatic analysis of neuronal structure from wide-field-of-view 3D image stacks of retinal neurons is essential for statistically characterizing neuronal abnormalities that may be causally related to neural malfunctions or may be early indicators for a variety of neuropathies. In this paper, we study classification of neuron fields in large-scale 3D confocal image stacks, a challenging neurobiological problem because of the low spatial resolution imagery and presence of intertwined dendrites from different neurons. We present a fully automated, four-step processing approach for neuron classification with respect to the morphological structure of their dendrites. In our approach, we first localize each individual soma in the image by using morphological operators and active contours. By using each soma position as a seed point, we automatically determine an appropriate threshold to segment dendrites of each neuron. We then use skeletonization and network analysis to generate the morphological structures of segmented dendrites, and shape-based features are extracted from network representations of each neuron to characterize the neuron. Based on qualitative results and quantitative comparisons, we show that we are able to automatically compute relevant features that clearly distinguish between normal and abnormal cases for postnatal day 6 (P6) horizontal neurons. 11. Automatic detection, segmentation and characterization of retinal horizontal neurons in large-scale 3D confocal imagery Karakaya, Mahmut; Kerekes, Ryan A.; Gleason, Shaun S.; Martins, Rodrigo A. P.; Dyer, Michael A. 2011-03-01 Automatic analysis of neuronal structure from wide-field-of-view 3D image stacks of retinal neurons is essential for statistically characterizing neuronal abnormalities that may be causally related to neural malfunctions or may be early indicators for a variety of neuropathies. In this paper, we study classification of neuron fields in large-scale 3D confocal image stacks, a challenging neurobiological problem because of the low spatial resolution imagery and presence of intertwined dendrites from different neurons. We present a fully automated, four-step processing approach for neuron classification with respect to the morphological structure of their dendrites. In our approach, we first localize each individual soma in the image by using morphological operators and active contours. By using each soma position as a seed point, we automatically determine an appropriate threshold to segment dendrites of each neuron. We then use skeletonization and network analysis to generate the morphological structures of segmented dendrites, and shape-based features are extracted from network representations of each neuron to characterize the neuron. Based on qualitative results and quantitative comparisons, we show that we are able to automatically compute relevant features that clearly distinguish between normal and abnormal cases for postnatal day 6 (P6) horizontal neurons. 12. Knowledge Based 3d Building Model Recognition Using Convolutional Neural Networks from LIDAR and Aerial Imageries Alidoost, F.; Arefi, H. 2016-06-01 In recent years, with the development of the high resolution data acquisition technologies, many different approaches and algorithms have been presented to extract the accurate and timely updated 3D models of buildings as a key element of city structures for numerous applications in urban mapping. In this paper, a novel and model-based approach is proposed for automatic recognition of buildings' roof models such as flat, gable, hip, and pyramid hip roof models based on deep structures for hierarchical learning of features that are extracted from both LiDAR and aerial ortho-photos. The main steps of this approach include building segmentation, feature extraction and learning, and finally building roof labeling in a supervised pre-trained Convolutional Neural Network (CNN) framework to have an automatic recognition system for various types of buildings over an urban area. In this framework, the height information provides invariant geometric features for convolutional neural network to localize the boundary of each individual roofs. CNN is a kind of feed-forward neural network with the multilayer perceptron concept which consists of a number of convolutional and subsampling layers in an adaptable structure and it is widely used in pattern recognition and object detection application. Since the training dataset is a small library of labeled models for different shapes of roofs, the computation time of learning can be decreased significantly using the pre-trained models. The experimental results highlight the effectiveness of the deep learning approach to detect and extract the pattern of buildings' roofs automatically considering the complementary nature of height and RGB information. 13. 3D Case Studies of Monitoring Dynamic Structural Tests using Long Exposure Imagery McCarthy, D. M. J.; Chandler, J. H.; Palmeri, A. 2014-06-01 Structural health monitoring uses non-destructive testing programmes to detect long-term degradation phenomena in civil engineering structures. Structural testing may also be carried out to assess a structure's integrity following a potentially damaging event. Such investigations are increasingly carried out with vibration techniques, in which the structural response to artificial or natural excitations is recorded and analysed from a number of monitoring locations. Photogrammetry is of particular interest here since a very high number of monitoring locations can be measured using just a few images. To achieve the necessary imaging frequency to capture the vibration, it has been necessary to reduce the image resolution at the cost of spatial measurement accuracy. Even specialist sensors are limited by a compromise between sensor resolution and imaging frequency. To alleviate this compromise, a different approach has been developed and is described in this paper. Instead of using high-speed imaging to capture the instantaneous position at each epoch, long-exposure images are instead used, in which the localised image of the object becomes blurred. The approach has been extended to create 3D displacement vectors for each target point via multiple camera locations, which allows the simultaneous detection of transverse and torsional mode shapes. The proposed approach is frequency invariant allowing monitoring of higher modal frequencies irrespective of a sampling frequency. Since there is no requirement for imaging frequency, a higher image resolution is possible for the most accurate spatial measurement. The results of a small scale laboratory test using off-the-shelf consumer cameras are demonstrated. A larger experiment also demonstrates the scalability of the approach. 14. Quantification of gully volume using very high resolution DSM generated through 3D reconstruction from airborne and field digital imagery Castillo, Carlos; Zarco-Tejada, Pablo; Laredo, Mario; Gómez, Jose Alfonso 2013-04-01 Major advances have been made recently in automatic 3D photo-reconstruction techniques using uncalibrated and non-metric cameras (James and Robson, 2012). However, its application on soil conservation studies and landscape feature identification is currently at the outset. The aim of this work is to compare the performance of a remote sensing technique using a digital camera mounted on an airborne platform, with 3D photo-reconstruction, a method already validated for gully erosion assessment purposes (Castillo et al., 2012). A field survey was conducted in November 2012 in a 250 m-long gully located in field crops on a Vertisol in Cordoba (Spain). The airborne campaign was conducted with a 4000x3000 digital camera installed onboard an aircraft flying at 300 m above ground level to acquire 6 cm resolution imagery. A total of 990 images were acquired over the area ensuring a large overlap in the across- and along-track direction of the aircraft. An ortho-mosaic and the digital surface model (DSM) were obtained through automatic aerial triangulation and camera calibration methods. For the field-level photo-reconstruction technique, the gully was divided in several reaches to allow appropriate reconstruction (about 150 pictures taken per reach) and, finally, the resulting point clouds were merged into a unique mesh. A centimetric-accuracy GPS provided a benchmark dataset for gully perimeter and distinguishable reference points in order to allow the assessment of measurement errors of the airborne technique and the georeferenciation of the photo-reconstruction 3D model. The uncertainty on the gully limits definition was explicitly addressed by comparison of several criteria obtained by 3D models (slope and second derivative) with the outer perimeter obtained by the GPS operator identifying visually the change in slope at the top of the gully walls. In this study we discussed the magnitude of planimetric and altimetric errors and the differences observed between the 15. Ladar image recognition using synthetically derived discrete phase-amplitude filters in an optical correlator Calloway, David; Goldstein, Dennis H. 2002-03-01 Correlation filters using computer-generated laser radar imagery have been constructed. This paper describes how the filters were constructed and reports correlating result with the synthetic imagery used in the training set, with real ladar imagery of equivalent targets, and with real ladar imagery of false targets. A comprehensive set of images was collected on the Eglin Test Range using a direct-detect scanning ladar mounted on a 100-meter tower. Various targets were placed on a large turntable and ladar range and intensity data were collected at various aspect and depression angles. The Irma scene generation software package was then used to generate synthetic ladar imagery for these targets at a similar set of range, aspect, and depression angles. Several different techniques were used to generate the filters and to process the imagery used in this research. This paper describes one of the most successful techniques. The paper provides details on the iterative approach used to generate composite filters, describes how they were applied, and compares the results produced from synthetic and real target imagery. This experiment was considered a success since the synthetically derived filters were capable of recognizing images of real targets while rejecting false targets. Broome, Kent W.; Carstens, Anne M.; Hudson, J. Roger; Yates, Kenneth L. 1997-08-01 The Armament Directorate of Wright Laboratory is tasked with pursuing technologies that lead towards autonomous guidance for conventional munitions. Seeker technologies pursued include SAR, imaging infrared, millimeter wave, and laser radar seekers. Laser Radar, or LADAR, systems using uncooled diode pumped solid state lasers operating around 1 micrometers are active sensors providing high resolution range and intensity imagery. LADAR is not susceptible to variations common to thermal IR systems, allowing greater simplicity of autonomous target acquisition algorithms. Therefore, LADAR sensors combined with advanced algorithms provide robust seeker technology capable of autonomous precision guidance. The small smart bomb (SSB) is a next generation weapon concept requiring this precision guidance. The 250 pound SSB penetrator provides the lethality of 2000 pound penetrators by delivering 50 pounds of high explosive with surgical precision. Space limitations, tightly controlled impact conditions, and high weapon velocities suggest laser radar as a candidate seeker. This paper discusses phase I of the DASSL program in which SSB weapon requirements are flowed down to seeker requirements through a structured system requirement analysis, and discusses how these seeker requirements affect seeker design. 17. Ladar image synthesis with comprehensive sensor model Wellfare, Michael R.; Love, Leslie A.; McCarley, Karen A.; Prestwood, Lee 1996-06-01 A new technique for realistic synthesis of ladar imagery has been developed for the Irma scene generation code, version 4.0. A wide range of phenomenological effects as well as internal sensor effects can be modeled in detail. Both solid state and CO2 time-of-flight measurement pulsed laser radars are supported for the monostatic case. Since the active range gate signal is computed, effects of multiple objects within the beam can be studied. User-definable processing operations allow evaluation of signal processing algorithms for design studies. 18. Mapping tropical biodiversity using spectroscopic imagery : characterization of structural and chemical diversity with 3-D radiative transfer modeling Feret, J. B.; Gastellu-Etchegorry, J. P.; Lefèvre-Fonollosa, M. J.; Proisy, C.; Asner, G. P. 2014-12-01 The accelerating loss of biodiversity is a major environmental trend. Tropical ecosystems are particularly threatened due to climate change, invasive species, farming and natural resources exploitation. Recent advances in remote sensing of biodiversity confirmed the potential of high spatial resolution spectroscopic imagery for species identification and biodiversity mapping. Such information bridges the scale-gap between small-scale, highly detailed field studies and large-scale, low-resolution satellite observations. In order to produce fine-scale resolution maps of canopy alpha-diversity and beta-diversity of the Peruvian Amazonian forest, we designed, applied and validated a method based on spectral variation hypothesis to CAO AToMS (Carnegie Airborne Observatory Airborne Taxonomic Mapping System) images, acquired from 2011 to 2013. There is a need to understand on a quantitative basis the physical processes leading to this spectral variability. This spectral variability mainly depends on canopy chemistry, structure, and sensor's characteristics. 3D radiative transfer modeling provides a powerful framework for the study of the relative influence of each of these factors in dense and complex canopies. We simulated series of spectroscopic images with the 3D radiative model DART, with variability gradients in terms of leaf chemistry, individual tree structure, spatial and spectral resolution, and applied methods for biodiversity mapping. This sensitivity study allowed us to determine the relative influence of these factors on the radiometric signal acquired by different types of sensors. Such study is particularly important to define the domain of validity of our approach, to refine requirements for the instrumental specifications, and to help preparing hyperspectral spatial missions to be launched at the horizon 2015-2025 (EnMAP, PRISMA, HISUI, SHALOM, HYSPIRI, HYPXIM). Simulations in preparation include topographic variations in order to estimate the robustness 19. Combining Public Domain and Professional Panoramic Imagery for the Accurate and Dense 3d Reconstruction of the Destroyed Bel Temple in Palmyra Wahbeh, W.; Nebiker, S.; Fangi, G. 2016-06-01 This paper exploits the potential of dense multi-image 3d reconstruction of destroyed cultural heritage monuments by either using public domain touristic imagery only or by combining the public domain imagery with professional panoramic imagery. The focus of our work is placed on the reconstruction of the temple of Bel, one of the Syrian heritage monuments, which was destroyed in September 2015 by the so called "Islamic State". The great temple of Bel is considered as one of the most important religious buildings of the 1st century AD in the East with a unique design. The investigations and the reconstruction were carried out using two types of imagery. The first are freely available generic touristic photos collected from the web. The second are panoramic images captured in 2010 for documenting those monuments. In the paper we present a 3d reconstruction workflow for both types of imagery using state-of-the art dense image matching software, addressing the non-trivial challenges of combining uncalibrated public domain imagery with panoramic images with very wide base-lines. We subsequently investigate the aspects of accuracy and completeness obtainable from the public domain touristic images alone and from the combination with spherical panoramas. We furthermore discuss the challenges of co-registering the weakly connected 3d point cloud fragments resulting from the limited coverage of the touristic photos. We then describe an approach using spherical photogrammetry as a virtual topographic survey allowing the co-registration of a detailed and accurate single 3d model of the temple interior and exterior. 20. Flight test results of ladar brownout look-through capability Stelmash, Stephen; Münsterer, Thomas; Kramper, Patrick; Samuelis, Christian; Bühler, Daniel; Wegner, Matthias; Sheth, Sagar 2015-06-01 The paper discusses recent results of flight tests performed with the Airbus Defence and Space ladar system at Yuma Proving Grounds. The ladar under test was the SferiSense® system which is in operational use as an in-flight obstacle warning and avoidance system on the NH90 transport helicopter. Just minor modifications were done on the sensor firmware to optimize its performance in brownout. Also a new filtering algorithm fitted to segment dust artefacts out of the collected 3D data in real-time was employed. The results proved that this ladar sensor is capable to detect obstacles through brownout dust clouds with a depth extending up to 300 meters from the landing helicopter. 1. Large format geiger-mode avalanche photodiode LADAR camera Yuan, Ping; Sudharsanan, Rengarajan; Bai, Xiaogang; Labios, Eduardo; Morris, Bryan; Nicholson, John P.; Stuart, Gary M.; Danny, Harrison 2013-05-01 Recently Spectrolab has successfully demonstrated a compact 32x32 Laser Detection and Range (LADAR) camera with single photo-level sensitivity with small size, weight, and power (SWAP) budget for threedimensional (3D) topographic imaging at 1064 nm on various platforms. With 20-kHz frame rate and 500- ps timing uncertainty, this LADAR system provides coverage down to inch-level fidelity and allows for effective wide-area terrain mapping. At a 10 mph forward speed and 1000 feet above ground level (AGL), it covers 0.5 square-mile per hour with a resolution of 25 in2/pixel after data averaging. In order to increase the forward speed to fit for more platforms and survey a large area more effectively, Spectrolab is developing 32x128 Geiger-mode LADAR camera with 43 frame rate. With the increase in both frame rate and array size, the data collection rate is improved by 10 times. With a programmable bin size from 0.3 ps to 0.5 ns and 14-bit timing dynamic range, LADAR developers will have more freedom in system integration for various applications. Most of the special features of Spectrolab 32x32 LADAR camera, such as non-uniform bias correction, variable range gate width, windowing for smaller arrays, and short pixel protection, are implemented in this camera. SciTech Connect Stappaerts, E A; Scharlemann, E 2005-02-07 We report a differential synthetic aperture ladar (DSAL) concept that relaxes platform and laser requirements compared to conventional SAL. Line-of-sight translation/vibration constraints are reduced by several orders of magnitude, while laser frequency stability is typically relaxed by an order of magnitude. The technique is most advantageous for shorter laser wavelengths, ultraviolet to mid-infrared. Analytical and modeling results, including the effect of speckle and atmospheric turbulence, are presented. Synthetic aperture ladars are of growing interest, and several theoretical and experimental papers have been published on the subject. Compared to RF synthetic aperture radar (SAR), platform/ladar motion and transmitter bandwidth constraints are especially demanding at optical wavelengths. For mid-IR and shorter wavelengths, deviations from a linear trajectory along the synthetic aperture length have to be submicron, or their magnitude must be measured to that precision for compensation. The laser coherence time has to be the synthetic aperture transit time, or transmitter phase has to be recorded and a correction applied on detection. 3. Improved registration for 3D image creation using multiple texel images and incorporating low-cost GPS/INS measurements Budge, Scott E.; Xie, Xuan 2014-06-01 The creation of 3D imagery is an important topic in remote sensing. Several methods have been developed to create 3D images from fused ladar and digital images, known as texel images. These methods have the advantage of using both the 3D ladar information and the 2D digital imagery directly, since texel images are fused during data acquisition. A weakness of these methods is that they are dependent on correlating feature points in the digital images. This can be difficult when image perspectives are significantly different, leading to low correlation values between matching feature points. This paper presents a method to improve the quality of 3D images created using existing approaches that register multiple texel images. The proposed method incorporates relatively low accuracy measurements of the position and attitude of the texel camera from a low-cost GPS/INS into the registration process. This information can improve the accuracy and robustness of the registered texel images over methods based on point-cloud merging or image registration alone. In addition, the dependence on feature point correlation is eliminated. Examples illustrate the value of this method for significant image perspective differences. 4. Fusion of LADAR with SAR for precision strike SciTech Connect Cress, D.H.; Muguira, M.R. 1995-03-01 This paper presents a concept for fusing 3-dimensional image reconnaissance data with LADAR imagery for aim point refinement. The approach is applicable to fixed or quasi-fixed targets. Quasi-fixed targets are targets that are not expected to be moved between the time of reconnaissance and the time of target engagement. The 3-dimensional image data is presumed to come from standoff reconnaissance assets tens to hundreds of kilometers from the target area or acquisitions prior to hostilities. Examples are synthetic aperture radar (SAR) or stereoprocessed satellite imagery. SAR can be used to generate a 3-dimensional map of the surface through processing of data acquired with conventional SAR acquired using two closely spaced, parallel reconnaissance paths, either airborne or satellite based. Alternatively, a specialized airborne SAR having two receiving antennas may be used for data acquisition. The data sets used in this analysis are: (1) LADAR data acquired using a Hughes-Danbury system flown over a portion of Kirtland AFB during the period September 15--16, 1993; (2) two pass interferometric SAR data flown over a terrain-dominated area of Kirtland AFB; (3) 3-dimensional mapping of an urban-dominated area of the Sandia National Laboratories and adjacent cultural area extracted from aerial photography by Vexcel Corporation; (4) LADAR data acquired at Eglin AFB under Wright Laboratorys Advanced Technology Ladar System (ATLAS) program using a 60 {mu}J, 75 KHz Co{sub 2} laser; and (5) two pass interferometric SAR data generated by Sandias STRIP DCS (Data Collection System) radar corresponding to the ATLAS LADAR data. The cultural data set was used in the urban area rather than SAR because high quality interferometric SAR data were not available for the urban-type area. 5. Echo signal modeling of imaging LADAR target simulator Xu, Rui; Shi, Rui; Wang, Xin; Li, Zhuo 2014-11-01 LADAR guidance technology is one of the most promising precision guidance technologies. In the aim of simulating the return waveform of the target, a 3D geometrical model of a target is built and mathematical model of target echo signal for imaging LADAR target simulator is established by using the coordinate transformation, radar equation and ranging equation. First, the 3D geometrical data of the object model is obtained by 3D geometrical modeling. Then, target coordinate system and viewpoint coordinate system are created respectively. 3D geometrical model is built in the target coordinate system. The 3D geometrical model is transformed to the viewpoint coordinate system based on the derived relationship between the two coordinate systems. Furthermore, the range information of the target could be obtained under viewpoint coordinate system. Thus, the data of the target echo signal can be obtained by using radar equation and ranging equation. Finally, the echo signal can be exported through corresponding data interface. In order to validate the method proposed in this paper, the echo signal generated by a typical target is computed and compared with the theory solutions. The signals can be applied to drive target simulator to generate a physical target LADAR image. Schwartz, William C.; Wangler, Richard J.; Condatore, Lawrence A., Jr. 1995-06-01 This paper discusses new techniques for providing a FLIR like', multi-pixel range receiver for applications of control and guidance by an active LADAR system. The major tradeoffs in developing a LADAR sensor with multi-pixel high resolution capabilities using conventional techniques are large size, high cost, or a slow frame rate. SEO has conceived and is currently developing a new receiver technique using a Charge Coupled Device array element that shows great promise for overcoming all of these drawbacks. Although this technique is a new approach for LADAR sensors, it is a concept that has been used for decades in the receivers of common-module FLIR systems. 7. Study on key techniques for synthetic aperture ladar system Cao, Changqing; Zeng, Xiaodong; Feng, Zhejun; Zhang, Wenrui; Su, Lei 2008-03-01 The spatial resolution of a conventional imaging LADAR system is constrained by the diffraction limit of the telescope aperture. The purpose of this work is to investigate Synthetic Aperture Imaging LADAR (SAIL), which employs aperture synthesis with coherent laser radar to overcome the diffraction limit and achieve fine-resolution, long range, two-dimensional imaging with modest aperture diameters. Because of many advantages, LADAR based on synthetic aperture theory is becoming research hotspot and practicality. Synthetic Aperture LADAR (SAL) technology satisfies the critical need for reliable, long-range battlefield awareness. An image that takes radar tens of seconds to produce can be produced in a few thousands of a second at optical frequencies. While radar waves respond to macroscopic features such as corners, edges, and facets, laser waves interact with microscopic surface characteristics, which results in imagery that appears more familiar and is more easily interpreted. SAL could provide high resolution optical/infrared imaging. In the present paper we have tried to answer three questions: (1) the process of collecting the samples over the large "synthetic" aperture; (2) differences between SAR and SAL; (3) the key techniques for SAL system. The principle and progress of SAL are introduced and a typical SAL system is described. Beam stabilization, chirp laser, and heterodyne detection, which are among the most challenging aspects of SAL, are discussed in detail. 8. Range Precision of LADAR Systems DTIC Science & Technology 2008-09-01 Some LADARs interfere received laser light with a local laser in order to measure Doppler shift [8]. These coherent LADARs are sensitive enough to...Number of Ions,” Physical Review , 72 (1):26–29 (July 1947). 8. Frehlich R. and Kavaya M. “ Coherent laser radar performance for general at- mospheric...of Coherent and Direct Detection Intensity Receivers,” Proc. SPIE Vol. 4377, Laser Radar Technology and Applications VI , 251–262 (2001). 10. Gini F 9. Comparison of 3D representations depicting micro folds: overlapping imagery vs. time-of-flight laser scanner Vaiopoulos, Aristidis D.; Georgopoulos, Andreas; Lozios, Stylianos G. 2012-10-01 A relatively new field of interest, which continuously gains grounds nowadays, is digital 3D modeling. However, the methodologies, the accuracy and the time and effort required to produce a high quality 3D model have been changing drastically the last few years. Whereas in the early days of digital 3D modeling, 3D models were only accessible to computer experts in animation, working many hours in expensive sophisticated software, today 3D modeling has become reasonably fast and convenient. On top of that, with online 3D modeling software, such as 123D Catch, nearly everyone can produce 3D models with minimum effort and at no cost. The only requirement is panoramic overlapping images, of the (still) objects the user wishes to model. This approach however, has limitations in the accuracy of the model. An objective of the study is to examine these limitations by assessing the accuracy of this 3D modeling methodology, with a Terrestrial Laser Scanner (TLS). Therefore, the scope of this study is to present and compare 3D models, produced with two different methods: 1) Traditional TLS method with the instrument ScanStation 2 by Leica and 2) Panoramic overlapping images obtained with DSLR camera and processed with 123D Catch free software. The main objective of the study is to evaluate advantages and disadvantages of the two 3D model producing methodologies. The area represented with the 3D models, features multi-scale folding in a cipollino marble formation. The most interesting part and most challenging to capture accurately, is an outcrop which includes vertically orientated micro folds. These micro folds have dimensions of a few centimeters while a relatively strong relief is evident between them (perhaps due to different material composition). The area of interest is located in Mt. Hymittos, Greece. 10. 3D Visualisation and Artistic Imagery to Enhance Interest in "Hidden Environments"--New Approaches to Soil Science ERIC Educational Resources Information Center Gilford, J.; Falconer, R. E.; Wade, R.; Scott-Brown, K. C. 2014-01-01 Interactive Virtual Environments (VEs) have the potential to increase student interest in soil science. Accordingly a bespoke "soil atlas" was created using Java3D as an interactive 3D VE, to show soil information in the context of (and as affected by) the over-lying landscape. To display the below-ground soil characteristics, four sets… 11. 3D Visualisation and Artistic Imagery to Enhance Interest in "Hidden Environments"--New Approaches to Soil Science ERIC Educational Resources Information Center Gilford, J.; Falconer, R. E.; Wade, R.; Scott-Brown, K. C. 2014-01-01 Interactive Virtual Environments (VEs) have the potential to increase student interest in soil science. Accordingly a bespoke "soil atlas" was created using Java3D as an interactive 3D VE, to show soil information in the context of (and as affected by) the over-lying landscape. To display the below-ground soil characteristics, four sets… 12. Generation of 3D Model for Urban area using Ikonos and Cartosat-1 Satellite Imageries with RS and GIS Techniques Rajpriya, N. R.; Vyas, A.; Sharma, S. A. 2014-11-01 Urban design is a subject that is concerned with the shape, the surface and its physical arrangement of all kinds of urban elements. Although urban design is a practice process and needs much detailed and multi-dimensional description. 3D city models based spatial analysis gives the possibility of solving these problems. Ahmedabad is third fastest growing cities in the world with large amount of development in infrastructure and planning. The fabric of the city is changing and expanding at the same time, which creates need of 3d visualization of the city to develop a sustainable planning for the city. These areas have to be monitored and mapped on a regular basis and satellite remote sensing images provide a valuable and irreplaceable source for urban monitoring. With this, the derivation of structural urban types or the mapping of urban biotopes becomes possible. The present study focused at development of technique for 3D modeling of buildings for urban area analysis and to implement encoding standards prescribed in "OGC City GML" for urban features. An attempt has been to develop a 3D city model with level of details 1 (LOD 1) for part of city of Ahmedabad in State of Gujarat, India. It shows the capability to monitor urbanization in 2D and 3D. 13. Periodic, pseudonoise waveforms for multifunction coherent ladar. PubMed Dierking, Matthew P; Duncan, Bradley D 2010-04-01 We report the use of periodic, pseudonoise waveforms in a multifunction coherent ladar system. We exploit the Doppler sensitivity of these waveforms, as well as agile processing, to enable diverse ladar functions, including high range resolution imaging, macro-Doppler imaging, synthetic aperture ladar, and range-resolved micro-Doppler imaging. We present analytic expressions and simulations demonstrating the utility of pseudonoise waveforms for each of the ladar modes. We also discuss a laboratory pseudonoise ladar system that was developed to demonstrate range compression and range-resolved micro-Doppler imaging, as well as the phase recovery common to each of the coherent modes. 14. Geological interpretation and analysis of surface based, spatially referenced planetary imagery data using PRoGIS 2.0 and Pro3D. Barnes, R.; Gupta, S.; Giordano, M.; Morley, J. G.; Muller, J. P.; Tao, Y.; Sprinks, J.; Traxler, C.; Hesina, G.; Ortner, T.; Sander, K.; Nauschnegg, B.; Paar, G.; Willner, K.; Pajdla, T. 2015-10-01 We apply the capabilities of the geospatial environment PRoGIS 2.0 and the real time rendering viewer PRo3D to geological analysis of NASA's Mars Exploration Rover-B (MER-B Opportunity rover) and Mars Science Laboratory (MSL Curiosity rover) datasets. Short baseline and serendipitous long baseline stereo Pancam rover imagery are used to create 3D point clouds which can be combined with super-resolution images derived from Mars Reconnaissance Orbiter HiRISE orbital data, andsuper-resolution outcrop images derived from MER Pancam, as well as hand-lens scale images for geology and outcrop characterization at all scales. Data within the PRoViDE database are presented and accessed through the PRoGIS interface. Simple geological measurement tools are implemented within the PRoGIS and PRo3D web software to accurately measure the dip and strike of bedding in outcrops, create detailed stratigraphic logs for correlation between the areas investigated, and to develop realistic 3D models for the characterization of planetary surface processes. Annotation tools are being developed to aid discussion and dissemination of the observations within the planetary science community. 15. LADAR MOUT Evaluation (LAME) conducted at the McKenna MOUT site, Fort Benning, Georgia Grobmyer, Joseph E., Jr.; Lum, Tommy; Morris, Robert E. 2002-07-01 The Aviation and Missile Research, Development and Engineering Center (AMRDEC) of the U.S. Army Aviation and Missile Command (AMCOM) conducted a series of Captive Flight Tests (CFT) gathering urban Laser Radar (LADAR) imagery at the McKenna Military Operations in Urban Terrain (MOUT) facility located at Fort Benning, Georgia, July 18 through August 4, 2001. PubMed Montoya, Juan; Sanchez-Rubio, Antonio; Hatch, Robert; Payson, Harold 2014-11-01 We demonstrate a ladar with 0.5 m class range resolution obtained by integrating a continuous-wave optical phased-array transmitter with a Geiger-mode avalanche photodiode receiver array. In contrast with conventional ladar systems, an array of continuous-wave sources is used to effectively pulse illuminate a target by electro-optically steering far-field fringes. From the reference frame of a point in the far field, a steered fringe appears as a pulse. Range information is thus obtained by measuring the arrival time of a pulse return from a target to a receiver pixel. This ladar system offers a number of benefits, including broad spectral coverage, high efficiency, small size, power scalability, and versatility. 17. Description and application of the 3D-CMCC FEM on multi-temporal NDVI satellite imagery and future scenarios Collalti, A.; Natali, S.; Noilè, A.; Mattiuzzi, M.; Marconi, S.; Santini, M.; Valentini, R. 2013-12-01 3D-CMCC-Forest Ecosystem Model is a process based model developed to investigate carbon fluxes and partitioning in forest ecosystems, taking into account their structural complexity. Unfortunately process oriented models need a large amount of parameters and input data, which could reduce their usability for large/regional scale applications. For this reason great attention has been progressively paid in developing methodologies to couple ecosystem models to remotely-sensed data acquisition techniques. This work aims to present 3D-CMCC-FEM, together with a procedure developed to make it use temporal NDVI information taken from ALOS-AVNIR2 LB2G high resolution satellite images. With this aim we compared two different case study afferent to two different Italian wide areas, National Park of Madonie (Sicily region) and Comunità Montana del Titerno ed Alto Tammaro (Campania region), respectively. In the former we used the model with a prognostic phenology approach, while in the latter we used a prognostic/diagnostic approach, using satellite data. Results show 3D-CMCC-FEM good capability to simulate GPP, NPP and carbon allocation. Moreover the model seems to be able to simulate the reduction in rates of productivity for hygrophilous species in dry years. Even though 3D-CMCC-FEM in some cases produces slight overestimation, its results may be considered statistically meaningful. In conclusion, we can assess that coupling high resolution satellite data with field information may be a successfully way to simulate physiological processes in forest ecosystems at regional scale, minimizing the loss of preliminary information. 18. Lossless to lossy compression for hyperspectral imagery based on wavelet and integer KLT transforms with 3D binary EZW Cheng, Kai-jen; Dill, Jeffrey 2013-05-01 In this paper, a lossless to lossy transform based image compression of hyperspectral images based on Integer Karhunen-Loève Transform (IKLT) and Integer Discrete Wavelet Transform (IDWT) is proposed. Integer transforms are used to accomplish reversibility. The IKLT is used as a spectral decorrelator and the 2D-IDWT is used as a spatial decorrelator. The three-dimensional Binary Embedded Zerotree Wavelet (3D-BEZW) algorithm efficiently encodes hyperspectral volumetric image by implementing progressive bitplane coding. The signs and magnitudes of transform coefficients are encoded separately. Lossy and lossless compressions of signs are implemented by conventional EZW algorithm and arithmetic coding respectively. The efficient 3D-BEZW algorithm is applied to code magnitudes. Further compression can be achieved using arithmetic coding. The lossless and lossy compression performance is compared with other state of the art predictive and transform based image compression methods on Airborne Visible/Infrared Imaging Spectrometer (AVIRIS) images. Results show that the 3D-BEZW performance is comparable to predictive algorithms. However, its computational cost is comparable to transform- based algorithms. 19. Initial progress in the recording of crime scene simulations using 3D laser structured light imagery techniques for law enforcement and forensic applications Altschuler, Bruce R.; Monson, Keith L. 1998-03-01 Representation of crime scenes as virtual reality 3D computer displays promises to become a useful and important tool for law enforcement evaluation and analysis, forensic identification and pathological study and archival presentation during court proceedings. Use of these methods for assessment of evidentiary materials demands complete accuracy of reproduction of the original scene, both in data collection and in its eventual virtual reality representation. The recording of spatially accurate information as soon as possible after first arrival of law enforcement personnel is advantageous for unstable or hazardous crime scenes and reduces the possibility that either inadvertent measurement error or deliberate falsification may occur or be alleged concerning processing of a scene. Detailed measurements and multimedia archiving of critical surface topographical details in a calibrated, uniform, consistent and standardized quantitative 3D coordinate method are needed. These methods would afford professional personnel in initial contact with a crime scene the means for remote, non-contacting, immediate, thorough and unequivocal documentation of the contents of the scene. Measurements of the relative and absolute global positions of object sand victims, and their dispositions within the scene before their relocation and detailed examination, could be made. Resolution must be sufficient to map both small and large objects. Equipment must be able to map regions at varied resolution as collected from different perspectives. Progress is presented in devising methods for collecting and archiving 3D spatial numerical data from crime scenes, sufficient for law enforcement needs, by remote laser structured light and video imagery. Two types of simulation studies were done. One study evaluated the potential of 3D topographic mapping and 3D telepresence using a robotic platform for explosive ordnance disassembly. The second study involved using the laser mapping system on a 20. 3D visualisation and artistic imagery to enhance interest in hidden environments' - new approaches to soil science Gilford, J.; Falconer, R. E.; Wade, R.; Scott-Brown, K. C. 2014-09-01 Interactive Virtual Environments (VEs) have the potential to increase student interest in soil science. Accordingly a bespoke 'soil atlas' was created using Java3D as an interactive 3D VE, to show soil information in the context of (and as affected by) the over-lying landscape. To display the below-ground soil characteristics, four sets of artistic illustrations were produced, each set showing the effects of soil organic-matter density and water content on fungal density, to determine potential for visualisations and interactivity in stimulating interest in soil and soil illustrations, interest being an important factor in facilitating learning. The illustrations were created using 3D modelling packages, and a wide range of styles were produced. This allowed a preliminary study of the relative merits of different artistic styles, scientific-credibility, scale, abstraction and 'realism' (e.g. photo-realism or realism of forms), and any relationship between these and the level of interest indicated by the study participants in the soil visualisations and VE. The study found significant differences in mean interest ratings for different soil illustration styles, as well as in the perception of scientific-credibility of these styles, albeit for both measures there was considerable difference of attitude between participants about particular styles. There was also found to be a highly significant positive correlation between participants rating styles highly for interest and highly for scientific-credibility. There was furthermore a particularly high interest rating among participants for seeing temporal soil processes illustrated/animated, suggesting this as a particularly promising method for further stimulating interest in soil illustrations and soil itself. 1. 3D Spatial and Spectral Fusion of Terrestrial Hyperspectral Imagery and Lidar for Hyperspectral Image Shadow Restoration Applied to a Geologic Outcrop Hartzell, P. J.; Glennie, C. L.; Hauser, D. L.; Okyay, U.; Khan, S.; Finnegan, D. C. 2016-12-01 2. Use of stereoscopic satellite imagery for 3D mapping of bedrock structure in West Antarctica: An example from the northern Ford Ranges Contreras, A.; Siddoway, C. S.; Porter, C.; Gottfried, M. 2012-12-01 In coastal West Antarctica, crustal-scale faults have been minimally mapped using traditional ground-based methods but regional scale structures are inferred mainly on the basis of low resolution potential fields data from airborne geophysical surveys (15 km flightline spacing). We use a new approach to detailed mapping of faults, shear zones, and intrusive relationships using panchromatic and multispectral imagery draped upon a digital elevation model (DEM). Our work focuses on the Fosdick Mountains, a culmination of lower middle crustal rocks exhumed at c. 100 Ma by dextral oblique detachment faulting. Ground truth exists for extensive areas visited during field studies in 2005-2011, providing a basis for spectral analysis of 8-band WorldView-02 imagery for detailed mapping of complex granite- migmatite relationships on the north side of the Fosdick range. A primary aim is the creation of a 3D geological map using the results of spectral analysis merged with a DEM computed from a stereographic pair of high resolution panchromatic images (sequential scenes, acquired 45 seconds apart). DEMs were computed using ERDAS Imagine™ LPS eATE, refined by MATLAB-based interpolation scripts to remove artifacts in the terrain model according to procedures developed by the Polar Geospatial Center (U. Minnesota). Orthorectified satellite imagery that covers the area of the DEMs was subjected to principal component analysis in ESRI ArcGIS™ 10.1, then the different rock types were identified using various combinations of spectral bands in order to map the geology of rock exposures that could not be accessed directly from the ground. Renderings in 3D of the satellite scenes draped upon the DEMs were created using Global Mapper™. The 3D perspective views reveal structural and geological features that are not observed in either the DEM nor the satellite imagery alone. The detailed map is crucial for an ongoing petrological / geochemical investigation of Cretaceous crustal 3. Instability of the perceived world while watching 3D stereoscopic imagery: A likely source of motion sickness symptoms. PubMed Hwang, Alex D; Peli, Eli 2014-01-01 Watching 3D content using a stereoscopic display may cause various discomforting symptoms, including eye strain, blurred vision, double vision, and motion sickness. Numerous studies have reported motion-sickness-like symptoms during stereoscopic viewing, but no causal linkage between specific aspects of the presentation and the induced discomfort has been explicitly proposed. Here, we describe several causes, in which stereoscopic capture, display, and viewing differ from natural viewing resulting in static and, importantly, dynamic distortions that conflict with the expected stability and rigidity of the real world. This analysis provides a basis for suggested changes to display systems that may alleviate the symptoms, and suggestions for future studies to determine the relative contribution of the various effects to the unpleasant symptoms. 4. Instability of the perceived world while watching 3D stereoscopic imagery: A likely source of motion sickness symptoms PubMed Central Hwang, Alex D.; Peli, Eli 2014-01-01 Watching 3D content using a stereoscopic display may cause various discomforting symptoms, including eye strain, blurred vision, double vision, and motion sickness. Numerous studies have reported motion-sickness-like symptoms during stereoscopic viewing, but no causal linkage between specific aspects of the presentation and the induced discomfort has been explicitly proposed. Here, we describe several causes, in which stereoscopic capture, display, and viewing differ from natural viewing resulting in static and, importantly, dynamic distortions that conflict with the expected stability and rigidity of the real world. This analysis provides a basis for suggested changes to display systems that may alleviate the symptoms, and suggestions for future studies to determine the relative contribution of the various effects to the unpleasant symptoms. PMID:26034562 5. Construction of a 3d Measurable Virtual Reality Environment Based on Ground Panoramic Images and Orbital Imagery for Planetary Exploration Applications Di, K.; Liang, J.; Liu, Z. 2011-08-01 This paper presents a method of constructing a measurable virtual reality environment based on ground (lander/rover) panoramic images and orbital imagery. Ground panoramic images acquired by a lander/rover at different azimuth and elevation angles are automatically registered, seamlessly mosaicked and projected onto a cylindrical surface. A specific function is developed for inverse calculation from the panorama back to the original images so that the 3D information associated with the original stereo images can be retrieved or computed. The three-dimensional measurable panorama is integrated into a globe viewer based on NASA World Wind. The techniques developed in this research can be used in visualization of and measuring the orbital and ground images for planetary exploration missions, especially rover missions. 6. Research into a Single-aperture Light Field Camera System to Obtain Passive Ground-based 3D Imagery of LEO Objects Bechis, K.; Pitruzzello, A. 2014-09-01 This presentation describes our ongoing research into using a ground-based light field camera to obtain passive, single-aperture 3D imagery of LEO objects. Light field cameras are an emerging and rapidly evolving technology for passive 3D imaging with a single optical sensor. The cameras use an array of lenslets placed in front of the camera focal plane, which provides angle of arrival information for light rays originating from across the target, allowing range to target and 3D image to be obtained from a single image using monocular optics. The technology, which has been commercially available for less than four years, has the potential to replace dual-sensor systems such as stereo cameras, dual radar-optical systems, and optical-LIDAR fused systems, thus reducing size, weight, cost, and complexity. We have developed a prototype system for passive ranging and 3D imaging using a commercial light field camera and custom light field image processing algorithms. Our light field camera system has been demonstrated for ground-target surveillance and threat detection applications, and this paper presents results of our research thus far into applying this technology to the 3D imaging of LEO objects. The prototype 3D imaging camera system developed by Northrop Grumman uses a Raytrix R5 C2GigE light field camera connected to a Windows computer with an nVidia graphics processing unit (GPU). The system has a frame rate of 30 Hz, and a software control interface allows for automated camera triggering and light field image acquisition to disk. Custom image processing software then performs the following steps: (1) image refocusing, (2) change detection, (3) range finding, and (4) 3D reconstruction. In Step (1), a series of 2D images are generated from each light field image; the 2D images can be refocused at up to 100 different depths. Currently, steps (1) through (3) are automated, while step (4) requires some user interaction. A key requirement for light field camera 7. Estimating Anthropometric Marker Locations from 3-D LADAR Point Clouds DTIC Science & Technology 2011-06-01 http://perception.inrialpes. fr/Publications/2008/CMKBH08. [17] Da Vinci , L. The Notebooks of Leonardo Da Vinci Complete. Public Domain, 2004. [18] Duda...the problem is based on historical research into the proper proportioning of the human body. A famous example of this research is Leonardo da Vinci’s 8. Europeana and 3D Pletinckx, D. 2011-09-01 The current 3D hype creates a lot of interest in 3D. People go to 3D movies, but are we ready to use 3D in our homes, in our offices, in our communication? Are we ready to deliver real 3D to a general public and use interactive 3D in a meaningful way to enjoy, learn, communicate? The CARARE project is realising this for the moment in the domain of monuments and archaeology, so that real 3D of archaeological sites and European monuments will be available to the general public by 2012. There are several aspects to this endeavour. First of all is the technical aspect of flawlessly delivering 3D content over all platforms and operating systems, without installing software. We have currently a working solution in PDF, but HTML5 will probably be the future. Secondly, there is still little knowledge on how to create 3D learning objects, 3D tourist information or 3D scholarly communication. We are still in a prototype phase when it comes to integrate 3D objects in physical or virtual museums. Nevertheless, Europeana has a tremendous potential as a multi-facetted virtual museum. Finally, 3D has a large potential to act as a hub of information, linking to related 2D imagery, texts, video, sound. We describe how to create such rich, explorable 3D objects that can be used intuitively by the generic Europeana user and what metadata is needed to support the semantic linking. 9. Characterization of 3-D imaging lidar for hazard avoidance and autonomous landing on the Moon Pierrottet, Diego F.; Amzajerdian, Farzin; Meadows, Byron L.; Estes, Robert; Noe, Anna M. 2007-04-01 Future robotic and crewed lunar missions will require safe and precision soft-landing at scientifically interesting sites near hazardous terrain features such as craters and rocks or near pre-deployed assets. Presently, NASA is studying the ability of various 3-dimensional imaging sensors particularly lidar/ladar techniques in meeting its lunar landing needs. For this reason, a Sensor Test Range facility has been developed at NASA Langley Research Center for calibration and characterization of potential 3-D imaging sensors. This paper describes the Sensor Test Range facility and its application in characterizing a 3-D imaging ladar. The results of the ladar measurement are reported and compared with simulated image frames generated by a ladar model that was also developed as part of this effort. In addition to allowing for characterization and evaluation of different ladar systems, the ladar measurements at the Sensor Test Range will support further advancement of ladar systems and development of more efficient and accurate image reconstruction algorithms. 10. Spotlight-mode incoherently synthetic aperture imaging ladar: fundamentals Liu, Liren 2010-08-01 In this paper, a concept of spotlight-mode incoherently-synthetic aperture imaging ladar (SAIL) is proposed on the basis of computer tomography (CT). This incoherent SAIL has three operations of conventional, inverse and CT spotlight-modes with two sensing techniques of range and Doppler resolutions, and supplies a variety of dimensional transformations for 2-D range- and Doppler-resolved imaging of 2-D objects and for 3-D range-resolved imaging or in the depth compressed 2-D range- and Doppler-resolved imaging of 3-D objects. Due to the simplification in both the construction and the algorithm the difficulties in the signal collection and data processing are importantly relaxed. The incoherent SAIL provides a great potential for applications in the extensive fields. The paper gives the detailed analysis. 11. LADAR object detection and tracking Monaco, Sam D. 2004-10-01 The paper describes an innovative LADAR system for use in detecting, acquiring and tracking high-speed ballistic such as bullets and mortar shells and rocket propelled objects such as Rocket Propelled Grenades (RPGs) and TOW missiles. This class of targets proves to be a considerable challenge for classical RADAR systems since the target areas are small, velocities are very high and target range is short. The proposed system is based on detector and illuminator technology without any moving parts. The target area is flood illuminated with one or more modulated sources and a proprietary-processing algorithm utilizing phase difference return signals generates target information. All aspects of the system utilize existing, low risk components that are readily available from optical and electronic vendors. Operating the illuminator in a continuously modulated mode permits the target range to be measured by the phase delay of the modulated beam. Target velocity is measured by the Doppler frequency shift of the returned signal. 12. Real-time range generation for ladar hardware-in-the-loop testing Olson, Eric M.; Coker, Charles F. 1996-05-01 Real-time closed loop simulation of LADAR seekers in a hardware-in-the-loop facility can reduce program risk and cost. This paper discusses an implementation of real-time range imagery generated in a synthetic environment at the Kinetic Kill Vehicle Hardware-in-the Loop facility at Eglin AFB, for the stimulation of LADAR seekers and algorithms. The computer hardware platform used was a Silicon Graphics Incorporated Onyx Reality Engine. This computer contains graphics hardware, and is optimized for generating visible or infrared imagery in real-time. A by-produce of the rendering process, in the form of a depth buffer, is generated from all objects in view during its rendering process. The depth buffer is an array of integer values that contributes to the proper rendering of overlapping objects and can be converted to range values using a mathematical formula. This paper presents an optimized software approach to the generation of the scenes, calculation of the range values, and outputting the range data for a LADAR seeker. 13. Characterization of articulated vehicles using ladar seekers Wellfare, Michael R.; Norris-Zachery, Karen 1997-08-01 Many vehicle targets of interest to military automatic target recognition (ATR) possess articulating components: that is, they have components that change position relative to the main body. Many vehicles also have multiple configurations wherein one or more devices or objects may be added to enhance specific military or logistical capabilities. As the expected target set for military ATR becomes more comprehensive, many additional articulations and optional components must be handled. Mobile air defense units often include moving radar antennae as well as turreted guns and missile launchers. Surface-to-surface missile launchers may be encountered with or without missiles, and with the launch rails raised or lowered. Engineers and countermine vehicles have a tremendous number of possible configurations and even conventional battle tanks may very items such as external reactive armor, long- range tanks, turret azimuth, and gun elevation. These changes pose a significant barrier to the target identification process since they greatly increase the range of possible target signatures. When combined with variations already encountered due to target aspect changes, an extremely large number of possible signatures is formed. Conventional algorithms cannot process so many possibilities effectively, so in response, the matching process is often made less selective. This degrades identification performance, increase false alarm rates, and increases data requirements for algorithm testing and training. By explicitly involving articulation in the detection and identification stages of an ATR algorithm, more precise matching constraints can be applied, and better selectivity can be achieve. Additional benefits include the measurement of the position and orientation of articulated components, which often has tactical significance. In this paper, the result of a study investigating the impact of target articulation in ATR for military vehicles are presented. 3D ladar signature 14. Synthetic aperture LADAR at 1550 nm: system demonstration, imaging processing and experimental result Li, Guangzuo; Wang, Ran; Wang, Peisi; Zhang, Keshu; Wu, Yirong 2016-10-01 In this manuscript, we propose and experimentally demonstrate our synthetic aperture LADAR (SAL) system. The system could obtain imageries in a few milliseconds with resolution of 5 cm from a long distance. Fine resolution in the range dimension was obtained by transmitting LADAR signal with large bandwidth. While in the cross-range dimension, the large synthetic aperture diameter provided fine resolution. By employing continuous translational motion of SAL system, a large aperture diameter was obtained through synthetic aperture processing. So the diffraction limit of real aperture diameter was overcome and finer resolution was achieved. Indoor and outdoor experiments were both performed and the corresponding results were showed. Results validated the feasibility of our system and processing algorithm. Stann, Barry L.; Dammann, John F.; Giza, Mark M. 2016-05-01 16. Synthetic Aperture Ladar Imaging and Atmospheric Turbulence DTIC Science & Technology 2016-06-09 AFRL-AFOSR-VA-TR-2016-0185 Synthetic Aperture Ladar Imaging and Atmospheric Turbulence Zeb Barber MONTANA STATE UNIV BOZEMAN Final Report 06/09/2016... Atmospheric Turbulence 5a.  CONTRACT NUMBER 5b.  GRANT NUMBER FA9550-12-1-0421 5c.  PROGRAM ELEMENT NUMBER 61102F 6.  AUTHOR(S) Zeb Barber 5d.  PROJECT...Aperture Ladar and Atmospheric Turbulence’. It includes a technical summary of the entire effort and a more detailed description of the final portion of 17. Optimal pseudorandom pulse position modulation ladar waveforms. PubMed Fluckiger, David U; Boland, Brian F; Marcus, Eran 2015-03-20 An algorithm for generating optimal pseudorandom pulse position modulation (PRPPM) waveforms for ladar ranging is presented. Bistatic ladar systems using Geiger-mode avalanche photodiodes require detection of several pulses in order to generate sufficient target statistics to satisfy some detection decision rule. For targets with large initial range uncertainty, it becomes convenient to transmit a pulse train with large ambiguity range. One solution is to employ a PRPPM waveform. An optimal PRPPM waveform will have minimal sidelobes: equivalent to 1 or 0 counts after the pulse correlation filter (compression). This can be accomplished by generating PRPPM pulse trains with optimal or minimal sidelobe autocorrelation. 18. A low-power CMOS trans-impedance amplifier for FM/cw ladar imaging system Hu, Kai; Zhao, Yi-qiang; Sheng, Yun; Zhao, Hong-liang; Yu, Hai-xia 2013-09-01 A scannerless ladar imaging system based on a unique frequency modulation/continuous wave (FM/cw) technique is able to entirely capture the target environment, using a focal plane array to construct a 3D picture of the target. This paper presents a low power trans-impedance amplifier (TIA) designed and implemented by 0.18 μm CMOS technology, which is used in the FM/cw imaging ladar with a 64×64 metal-semiconductor-metal(MSM) self-mixing detector array. The input stage of the operational amplifier (op amp) in TIA is realized with folded cascade structure to achieve large open loop gain and low offset. The simulation and test results of TIA with MSM detectors indicate that the single-end trans-impedance gain is beyond 100 kΩ, and the -3 dB bandwidth of Op Amp is beyond 60 MHz. The input common mode voltage ranges from 0.2 V to 1.5 V, and the power dissipation is reduced to 1.8 mW with a supply voltage of 3.3 V. The performance test results show that the TIA is a candidate for preamplifier of the read-out integrated circuit (ROIC) in the FM/cw scannerless ladar imaging system. 19. Multi-dimensional, non-contact metrology using trilateration and high resolution FMCW ladar. PubMed Mateo, Ana Baselga; Barber, Zeb W 2015-07-01 Here we propose, describe, and provide experimental proof-of-concept demonstrations of a multidimensional, non-contact-length metrology system design based on high resolution (millimeter to sub-100 micron) frequency modulated continuous wave (FMCW) ladar and trilateration based on length measurements from multiple, optical fiber-connected transmitters. With an accurate FMCW ladar source, the trilateration-based design provides 3D resolution inherently independent of standoff range and allows self-calibration to provide flexible setup of a field system. A proof-of-concept experimental demonstration was performed using a highly stabilized, 2 THz bandwidth chirped laser source, two emitters, and one scanning emitter/receiver providing 1D surface profiles (2D metrology) of diffuse targets. The measured coordinate precision of <200 microns was determined to be limited by laser speckle issues caused by diffuse scattering of the targets. 20. A study of integration methods of aerial imagery and LIDAR data for a high level of automation in 3D building reconstruction Seo, Suyoung; Schenk, Toni F. 2003-04-01 This paper describes integration methods to increase the level of automation in building reconstruction. Aerial imagery has been used as a major source in mapping fields and, in recent years, LIDAR data became popular as another type of mapping resources. Regarding to their performances, aerial imagery has abilities to delineate object boundaries but leaves many missing parts of boundaries during feature extraction. LIDAR data provide direct information about heights of object surfaces but have limitation for boundary localization. Efficient methods using complementary characteristics of two sensors are described to generate hypotheses of building boundaries and localize the object features. Tree structures for grid contours of LIDAR data are used for interpretation of contours. Buildings are recognized by analyzing the contour trees and modeled with surface patches with LIDAR data. Hypotheses of building models are generated as combination of wing models and verified by assessing the consistency between the corresponding data sets. Experiments using aerial imagery and laser data are presented. Our approach shows that the building boundaries are successfully recognized through our contour analysis approach and the inference from contours and our modeling method using wing model increase the level of automation in hypothesis generation/verification steps. 1. The Development of the Automatic Target Recognition System for the UGV/RSTA LADAR DTIC Science & Technology 1995-03-21 Vol. 1960, Orlando, FL, 14-16 April 1993, pp. 57-71. 6. J.E. Nettleton and S. Holder, "Active/passive laser radar field test (Fort A.P. Hill, VA...is to develop the automatic target recogni- tion (ATR) system that will process the imagery from the RSTA laser radar (ladar). A real-time...ix 1. INTRODUCTION 1 2. IMAGE DATA BASES 5 2.1 Introduction 5 2.2 Tri-Service Laser -Radar (TSLR) Data 5 2.3 Hobby Shop (HBS) Laser -Radar Data 11 2. Ground-based and airborne thermal imagery of 2D and 3D forest structure for estimating sub-canopy longwave radiation during snowmelt Webster, Clare; Westoby, Matt; Rutter, Nick; Dunning, Stuart; Jonas, Tobias 2017-04-01 NASA Technical Reports Server (NTRS) Matthies, Larry; Bergh, Chuck; Castano, Andres; Macedo, Jose; Manduchi, Roberto 2003-01-01 Autonomous off-road navigation is central to several important applications of unmanned ground vehicles. This requires the ability to detect obstacles in vegetation. We examine the prospects for doing so with scanning ladar and with a linear array of 2.2 GHz micro-impulse radar transceivers. For ladar, we summarize our work to date on algorithms for detecting obstacles in tall grass with single-axis ladar, then present a simple probabilistic model of the distance into tall grass that ladar-based obstacle detection is possible. 4. New High-Resolution 3D Imagery of Fault Deformation and Segmentation of the San Onofre and San Mateo Trends in the Inner California Borderlands Holmes, J. J.; Driscoll, N. W.; Kent, G. M.; Bormann, J. M.; Harding, A. J. 2015-12-01 The Inner California Borderlands (ICB) is situated off the coast of southern California and northern Baja. The structural and geomorphic characteristics of the area record a middle Oligocene transition from subduction to microplate capture along the California coast. Marine stratigraphic evidence shows large-scale extension and rotation overprinted by modern strike-slip deformation. Geodetic and geologic observations indicate that approximately 6-8 mm/yr of Pacific-North American relative plate motion is accommodated by offshore strike-slip faulting in the ICB. The farthest inshore fault system, the Newport-Inglewood Rose Canyon (NIRC) fault complex is a dextral strike-slip system that extends primarily offshore approximately 120 km from San Diego to the San Joaquin Hills near Newport Beach, California. Based on trenching and well data, the NIRC fault system Holocene slip rate is 1.5-2.0 mm/yr to the south and 0.5-1.0 mm/yr along its northern extent. An earthquake rupturing the entire length of the system could produce an Mw 7.0 earthquake or larger. West of the main segments of the NIRC fault complex are the San Mateo and San Onofre fault trends along the continental slope. Previous work concluded that these were part of a strike-slip system that eventually merged with the NIRC complex. Others have interpreted these trends as deformation associated with the Oceanside Blind Thrust fault purported to underlie most of the region. In late 2013, we acquired the first high-resolution 3D P-Cable seismic surveys (3.125 m bin resolution) of the San Mateo and San Onofre trends as part of the Southern California Regional Fault Mapping project aboard the R/V New Horizon. Analysis of these volumes provides important new insights and constraints on the fault segmentation and transfer of deformation. Based on the new 3D sparker seismic data, our preferred interpretation for the San Mateo and San Onofre fault trends is they are transpressional features associated with westward 5. Photon Counting Chirped Amplitude Modulation Ladar DTIC Science & Technology 2008-03-01 135 S. Taylor Ave., Room 103, Louisville, CO 80027-3025 14. ABSTRACT This work developed a method using Geiger - mode avalanche photodiode (GM-APD...effort to develop a method using Geiger - mode avalanche photodiode (GM-APD) photon counting detectors in the U.S. Army Research Laboratory’s chirped...architecture are discussed. 15. SUBJECT TERMS laser radar, ladar, avalanche photo-detectors, Geiger mode detectors, chirped amplitude modulation 6. New High-Resolution 3D Seismic Imagery of Deformation and Fault Architecture Along Newport-Inglewood/Rose Canyon Fault in the Inner California Borderlands Holmes, J. J.; Bormann, J. M.; Driscoll, N. W.; Kent, G.; Harding, A. J.; Wesnousky, S. G. 2014-12-01 The tectonic deformation and geomorphology of the Inner California Borderlands (ICB) records the transition from a convergent plate margin to a predominantly dextral strike-slip system. Geodetic measurements of plate boundary deformation onshore indicate that approximately 15%, or 6-8 mm/yr, of the total Pacific-North American relative plate motion is accommodated by faults offshore. The largest near-shore fault system, the Newport-Inglewood/Rose Canyon (NI/RC) fault complex, has a Holocene slip rate estimate of 1.5-2.0 mm/yr, according to onshore trenching, and current models suggest the potential to produce an Mw 7.0+ earthquake. The fault zone extends approximately 120 km, initiating from the south near downtown San Diego and striking northwards with a constraining bend north of Mt. Soledad in La Jolla and continuing northwestward along the continental shelf, eventually stepping onshore at Newport Beach, California. In late 2013, we completed the first high-resolution 3D seismic survey (3.125 m bins) of the NI/RC fault offshore of San Onofre as part of the Southern California Regional Fault Mapping project. We present new constraints on fault geometry and segmentation of the fault system that may play a role in limiting the extent of future earthquake ruptures. In addition, slip rate estimates using piercing points such as offset channels will be explored. These new observations will allow us to investigate recent deformation and strain transfer along the NI/RC fault system. 7. Fault Deformation and Segmentation of the Newport-Inglewood Rose Canyon, and San Onofre Trend Fault Systems from New High-Resolution 3D Seismic Imagery Holmes, J. J.; Driscoll, N. W.; Kent, G. M. 2016-12-01 The Inner California Borderlands (ICB) is situated off the coast of southern California and northern Baja. The structural and geomorphic characteristics of the area record a middle Oligocene transition from subduction to microplate capture along the California coast. Marine stratigraphic evidence shows large-scale extension and rotation overprinted by modern strike-slip deformation. Geodetic and geologic observations indicate that approximately 6-8 mm/yr of Pacific-North American relative plate motion is accommodated by offshore strike-slip faulting in the ICB. The farthest inshore fault system, the Newport-Inglewood Rose Canyon (NIRC) Fault is a dextral strike-slip system that is primarily offshore for approximately 120 km from San Diego to the San Joaquin Hills near Newport Beach, California. Based on trenching and well data, the NIRC Fault Holocene slip rate is 1.5-2.0 mm/yr to the south and 0.5-1.0 mm/yr along its northern extent. An earthquake rupturing the entire length of the system could produce an Mw 7.0 earthquake or larger. West of the main segments of the NIRC Fault is the San Onofre Trend (SOT) along the continental slope. Previous work concluded that this is part of a strike-slip system that eventually merges with the NIRC Fault. Others have interpreted this system as deformation associated with the Oceanside Blind Thrust fault purported to underlie most of the region. In late 2013, we acquired the first high-resolution 3D Parallel Cable (P-Cable) seismic surveys of the NIRC and SOT faults as part of the Southern California Regional Fault Mapping project aboard the R/V New Horizon. Analysis of these data volumes provides important new insights and constraints on the fault segmentation and transfer of deformation. Based on this new data, we've mapped several small fault strands associated with the SOT that appear to link up with a westward jog in right-lateral fault splays of the NIRC Fault on the shelf and then narrowly radiate southwards. Our Keffer, Charles E.; Papetti, Thomas J.; Johnson, Eddie 2007-04-01 The Advanced Measurements Optical Range (AMOR) began operations in 1978 with a mission to measure ladar target signatures of ballistic missiles and to advance the understanding of object features useful for discrimination of reentry vehicles from decoy objects. Ground breaking ladar technology developments and pioneering ladar target signature studies were completed in the early years of AMOR operations. More recently, AMOR functions primarily as a user test facility measuring ladar signatures of a diverse set of objects such as reentry vehicles and decoys, missile bodies, and satellite materials as well as serving as a ladar sensor test-bed to recreate realistic missile defense engagement scenarios to exercise and test missile seeker technologies. This paper gives a status report on current AMOR capabilities including the optical system, target handling system, laser systems, and data measurement types. Plans for future facility enhancements to provide improved service to ladar data users in the modeling and simulation field and to ladar system developers with requirements for advanced test requirements are also reported. 9. Foliage discrimination using a rotating ladar NASA Technical Reports Server (NTRS) Castano, A.; Matthies, L. 2003-01-01 We present a real time algorithm that detects foliage using range from a rotating laser. Objects not classified as foliage are conservatively labeled as non-driving obstacles. In contrast to related work that uses range statistics to classify objects, we exploit the expected localities and continuities of an obstacle, in both space and time. Also, instead of attempting to find a single accurate discriminating factor for every ladar return, we hypothesize the class of some few returns and then spread the confidence (and classification) to other returns using the locality constraints. The Urbie robot is presently using this algorithm to descriminate drivable grass from obstacles during outdoor autonomous navigation tasks. 10. Foliage discrimination using a rotating ladar NASA Technical Reports Server (NTRS) Castano, A.; Matthies, L. 2003-01-01 We present a real time algorithm that detects foliage using range from a rotating laser. Objects not classified as foliage are conservatively labeled as non-driving obstacles. In contrast to related work that uses range statistics to classify objects, we exploit the expected localities and continuities of an obstacle, in both space and time. Also, instead of attempting to find a single accurate discriminating factor for every ladar return, we hypothesize the class of some few returns and then spread the confidence (and classification) to other returns using the locality constraints. The Urbie robot is presently using this algorithm to descriminate drivable grass from obstacles during outdoor autonomous navigation tasks. 11. Demonstration of synthetic aperture imaging ladar Buell, W.; Marechal, N.; Buck, J.; Dickinson, R.; Kozlowski, D.; Wright, T.; Beck, S. 2005-05-01 The spatial resolution of a conventional imaging LADAR system is constrained by the diffraction limit of the telescope aperture. The purpose of this work is to investigate Synthetic Aperture Imaging LADAR (SAIL), which employs aperture synthesis with coherent laser radar to overcome the diffraction limit and achieve fine-resolution, long range, two-dimensional imaging with modest aperture diameters. This paper details our laboratory-scale SAIL testbed, digital signal processing techniques, and image results. A number of fine-resolution, well-focused SAIL images are shown including both retro-reflecting and diffuse scattering targets. A general digital signal processing solution to the laser waveform instability problem is described and demonstrated, involving both new algorithms and hardware elements. These algorithms are primarily data-driven, without a priori knowledge of waveform and sensor position, representing a crucial step in developing a robust imaging system. These techniques perform well on waveform errors, but not on external phase errors such as turbulence or vibration. As a first step towards mitigating phase errors of this type, we have developed a balanced, quadrature phase, laser vibrometer to work in conjunction with our SAIL system to measure and compensate for relative line of sight motion between the target and transceiver. We describe this system and present a comparison of the vibrometer-measured phase error with the phase error inferred from the SAIL data. 12. LADAR vision technology for automated rendezvous and capture NASA Technical Reports Server (NTRS) Frey, Randy W. 1991-01-01 LADAR Vision Technology at Autonomous Technologies Corporation consists of two sensor/processing technology elements: high performance long range multifunction coherent Doppler laser radar (LADAR) technology; and short range integrated CCD camera with direct detection laser ranging sensors. Algorithms and specific signal processing implementations have been simulated for both sensor/processing approaches to position and attitude tracking applicable to AR&C. Experimental data supporting certain sensor measurement accuracies have been generated. 13. 3-D Imagery Cockpit Display Development DTIC Science & Technology 1990-08-01 display. is needed. Good information - (3) Change from pictorial gauges to difficult to interpret. word warnings. Display EGT & OIL indicators at all times...indicator. Popped CBs. Information to be changed : Comments: (5) Nothing needs to be changed . Great format. (2) Standardize colors. Display is good. Use all ...sense? Any suggestions for changes ? 6 Pilots Good. 5 Pilots Great! Don’t change the format. 1 Pilot Stores part great. 1 Pilot Provides all the necessary 14. Synthetic aperture ladar based on a MOPAW laser Turbide, Simon; Marchese, Linda; Bergeron, Alain; Desbiens, Louis; Paradis, Patrick 2016-10-01 Long range land surveillance is a critical need in numerous military and civilian security applications, such as threat detection, terrain mapping and disaster prevention. A key technology for land surveillance, synthetic aperture radar (SAR) continues to provide high resolution radar images in all weather conditions from remote distances. State of the art SAR systems based on dual-use satellites are capable of providing ground resolutions of one meter; while their airborne counterparts obtain resolutions of 10 cm. Certain land surveillance applications such as subsidence monitoring, landslide hazard prediction and tactical target tracking could benefit from improved resolution. The ultimate limitation to the achievable resolution of any imaging system is its wavelength. State-of-the-art SAR systems are approaching this limit. The natural extension to improve resolution is to thus decrease the wavelength, i.e. design a synthetic aperture system in a different wavelength regime. One such system offering the potential for vastly improved resolution is Synthetic Aperture Ladar (SAL). This system operates at infrared wavelengths, ten thousand times smaller radar wavelengths. This paper presents a SAL platform based on the INO Master Oscillator with Programmable Amplitude Waveform (MOPAW) laser that has a wavelength sweep of Δλ=1.22 nm, a pulse repetition rate up to 1 kHz and up to 200 μJ per pulse. The results for SAL 2D imagery at a range of 10 m are presented, indicating a reflectance sensibility of 8 %, ground-range and azimuth resolution of 1.7 mm and 0.84 mm respectively. 15. Research on an FM/cw ladar system using a 64 × 64 InGaAs metal-semiconductor-metal self-mixing focal plane array of detectors. PubMed Gao, Jian; Sun, Jianfeng; Cong, Mingyu 2017-04-01 Frequency-modulated/continuous-wave imaging systems are a focal plane array (FPA) ladar architecture that is applicable to smart munitions, reconnaissance, face recognition, robotic navigation, etc. In this paper, we report a 64×64 pixel FPA ladar system we built using a 1550 nm amplified laser diode transmitter and an InAlAs/InGaAs metal-semiconductor-metal self-mixing detector array and the test results attained over the years it was constructed. Finally, we gained 4D imaging (3D range + 1D intensity) of the target with the range of 220 m. 16. Synthetic aperture ladar concept for infrastructure monitoring Turbide, Simon; Marchese, Linda; Terroux, Marc; Bergeron, Alain 2014-10-01 Long range surveillance of infrastructure is a critical need in numerous security applications, both civilian and military. Synthetic aperture radar (SAR) continues to provide high resolution radar images in all weather conditions from remote distances. As well, Interferometric SAR (InSAR) and Differential Interferometric SAR (D-InSAR) have become powerful tools adding high resolution elevation and change detection measurements. State of the art SAR systems based on dual-use satellites are capable of providing ground resolutions of one meter; while their airborne counterparts obtain resolutions of 10 cm. D-InSAR products based on these systems could produce cm-scale vertical resolution image products. Deformation monitoring of railways, roads, buildings, cellular antennas, power structures (i.e., power lines, wind turbines, dams, or nuclear plants) would benefit from improved resolution, both in the ground plane and vertical direction. The ultimate limitation to the achievable resolution of any imaging system is its wavelength. State-of-the art SAR systems are approaching this limit. The natural extension to improve resolution is to thus decrease the wavelength, i.e. design a synthetic aperture system in a different wavelength regime. One such system offering the potential for vastly improved resolution is Synthetic Aperture Ladar (SAL). This system operates at infrared wavelengths, ten thousand times smaller than radar wavelengths. This paper presents a laboratory demonstration of a scaled-down infrastructure deformation monitoring with an Interferometric Synthetic Aperture Ladar (IFSAL) system operating at 1.5 μm. Results show sub-millimeter precision on the deformation applied to the target. 17. DVE flight test results of a sensor enhanced 3D conformal pilot support system Münsterer, Thomas; Völschow, Philipp; Singer, Bernhard; Strobel, Michael; Kramper, Patrick 2015-06-01 The paper presents results and findings of flight tests of the Airbus Defence and Space DVE system SFERION performed at Yuma Proving Grounds. During the flight tests ladar information was fused with a priori DB knowledge in real-time and 3D conformal symbology was generated for display on an HMD. The test flights included low level flights as well as numerous brownout landings. 18. Construction of multi-functional open modulized Matlab simulation toolbox for imaging ladar system Wu, Long; Zhao, Yuan; Tang, Meng; He, Jiang; Zhang, Yong 2011-06-01 19. LADAR And FLIR Based Sensor Fusion For Automatic Target Classification Selzer, Fred; Gutfinger, Dan 1989-01-01 The purpose of this report is to show results of automatic target classification and sensor fusion for forward looking infrared (FLIR) and Laser Radar sensors. The sensor fusion data base was acquired from the Naval Weapon Center and it consists of coregistered Laser RaDAR (range and reflectance image), FLIR (raw and preprocessed image) and TV. Using this data base we have developed techniques to extract relevant object edges from the FLIR and LADAR which are correlated to wireframe models. The resulting correlation coefficients from both the LADAR and FLIR are fused using either the Bayesian or the Dempster-Shafer combination method so as to provide a higher confidence target classifica-tion level output. Finally, to minimize the correlation process the wireframe models are modified to reflect target range (size of target) and target orientation which is extracted from the LADAR reflectance image. 20. An Estimation Theory Approach to Detection and Ranging of Obscured Targets in 3-D LADAR Data DTIC Science & Technology 2006-03-01 set consists of 100 laser pulse returns. The first set was collected as a control set using the target shown in figure 2.2. The second set was...containing 2 surfaces. 36 0 2 4 6 8 10 12 14 16 18 0 500 1000 1500 2000 2500 3000 3500 Pixel (99,107) Control # of P ho to ns Sample # Figure 4.4...show the first pulse return of control data from a pixel corresponding to the front surface and back surface respectively. 37 0 2 4 6 8 10 1. A Statistical Approach to Fusing 2-D and 3-D LADAR Systems DTIC Science & Technology 2011-03-24 pulse estimate ( Pold (x, y, rk)), and the bias estimate (Bold(u, v)). The 20 conditional expectation (Q) in general form is shown in Eq. (3.6). Q = M∑ x=1...M∑ y=1 M∑ u=1 M∑ v=1 K∑ k=1 [E[d̃1(u, v, x, y, rk)|d(u, v, rk), Pold (x, y, rk), Bold(u, v)] ×(ln(A(x, y)√ 2πσ h(Lu− x, Lv − y)) + ln(e (rk−r(x,y)) 2...2σ2 ))− P (x, y, rk)h(Lu− x, Lv − y) −E[ln(d̃1(u, v, x, y, rk)!)|d(u, v, rk), Pold (x, y, rk), Bold(u, v) 2. Remote sensing solution using 3-D flash LADAR for automated control of aircraft Neff, Brian J.; Fuka, Jennifer A.; Burwell, Alan C.; Gray, Stephen W.; Hubbard, Mason J.; Schenkel, Joseph W. 2015-09-01 The majority of image quality studies in the field of remote sensing have been performed on systems with conventional aperture functions. These systems have well-understood image quality tradeoffs, characterized by the General Image Quality Equation (GIQE). Advanced, next-generation imaging systems present challenges to both post-processing and image quality prediction. Examples include sparse apertures, synthetic apertures, coded apertures and phase elements. As a result of the non-conventional point spread functions of these systems, post-processing becomes a critical step in the imaging process and artifacts arise that are more complicated than simple edge overshoot. Previous research at the Rochester Institute of Technology's Digital Imaging and Remote Sensing Laboratory has resulted in a modeling methodology for sparse and segmented aperture systems, the validation of which will be the focus of this work. This methodology has predicted some unique post-processing artifacts that arise when a sparse aperture system with wavefront error is used over a large (panchromatic) spectral bandpass. Since these artifacts are unique to sparse aperture systems, they have not yet been observed in any real-world data. In this work, a laboratory setup and initial results for a model validation study will be described. Initial results will focus on the validation of spatial frequency response predictions and verification of post-processing artifacts. The goal of this study is to validate the artifact and spatial frequency response predictions of this model. This will allow model predictions to be used in image quality studies, such as aperture design optimization, and the signal-to-noise vs. post-processing artifact tradeoff resulting from choosing a panchromatic vs. multispectral system. 3. True 3d Images and Their Applications Wang, Z.; wang@hzgeospace., zheng. 2012-07-01 A true 3D image is a geo-referenced image. Besides having its radiometric information, it also has true 3Dground coordinates XYZ for every pixels of it. For a true 3D image, especially a true 3D oblique image, it has true 3D coordinates not only for building roofs and/or open grounds, but also for all other visible objects on the ground, such as visible building walls/windows and even trees. The true 3D image breaks the 2D barrier of the traditional orthophotos by introducing the third dimension (elevation) into the image. From a true 3D image, for example, people will not only be able to read a building's location (XY), but also its height (Z). true 3D images will fundamentally change, if not revolutionize, the way people display, look, extract, use, and represent the geospatial information from imagery. In many areas, true 3D images can make profound impacts on the ways of how geospatial information is represented, how true 3D ground modeling is performed, and how the real world scenes are presented. This paper first gives a definition and description of a true 3D image and followed by a brief review of what key advancements of geospatial technologies have made the creation of true 3D images possible. Next, the paper introduces what a true 3D image is made of. Then, the paper discusses some possible contributions and impacts the true 3D images can make to geospatial information fields. At the end, the paper presents a list of the benefits of having and using true 3D images and the applications of true 3D images in a couple of 3D city modeling projects. 4. A 3d-3d appetizer Pei, Du; Ye, Ke 2016-11-01 We test the 3d-3d correspondence for theories that are labeled by Lens spaces. We find a full agreement between the index of the 3d N=2 "Lens space theory" T [ L( p, 1)] and the partition function of complex Chern-Simons theory on L( p, 1). In particular, for p = 1, we show how the familiar S 3 partition function of Chern-Simons theory arises from the index of a free theory. For large p, we find that the index of T[ L( p, 1)] becomes a constant independent of p. In addition, we study T[ L( p, 1)] on the squashed three-sphere S b 3 . This enables us to see clearly, at the level of partition function, to what extent G ℂ complex Chern-Simons theory can be thought of as two copies of Chern-Simons theory with compact gauge group G. 5. Interactive 3D Mars Visualization NASA Technical Reports Server (NTRS) Powell, Mark W. 2012-01-01 The Interactive 3D Mars Visualization system provides high-performance, immersive visualization of satellite and surface vehicle imagery of Mars. The software can be used in mission operations to provide the most accurate position information for the Mars rovers to date. When integrated into the mission data pipeline, this system allows mission planners to view the location of the rover on Mars to 0.01-meter accuracy with respect to satellite imagery, with dynamic updates to incorporate the latest position information. Given this information so early in the planning process, rover drivers are able to plan more accurate drive activities for the rover than ever before, increasing the execution of science activities significantly. Scientifically, this 3D mapping information puts all of the science analyses to date into geologic context on a daily basis instead of weeks or months, as was the norm prior to this contribution. This allows the science planners to judge the efficacy of their previously executed science observations much more efficiently, and achieve greater science return as a result. The Interactive 3D Mars surface view is a Mars terrain browsing software interface that encompasses the entire region of exploration for a Mars surface exploration mission. The view is interactive, allowing the user to pan in any direction by clicking and dragging, or to zoom in or out by scrolling the mouse or touchpad. This set currently includes tools for selecting a point of interest, and a ruler tool for displaying the distance between and positions of two points of interest. The mapping information can be harvested and shared through ubiquitous online mapping tools like Google Mars, NASA WorldWind, and Worldwide Telescope. 6. 3d-3d correspondence revisited DOE PAGES Chung, Hee -Joong; Dimofte, Tudor; Gukov, Sergei; ... 2016-04-21 In fivebrane compactifications on 3-manifolds, we point out the importance of all flat connections in the proper definition of the effective 3d N = 2 theory. The Lagrangians of some theories with the desired properties can be constructed with the help of homological knot invariants that categorify colored Jones polynomials. Higgsing the full 3d theories constructed this way recovers theories found previously by Dimofte-Gaiotto-Gukov. As a result, we also consider the cutting and gluing of 3-manifolds along smooth boundaries and the role played by all flat connections in this operation. 7. 3d-3d correspondence revisited SciTech Connect Chung, Hee -Joong; Dimofte, Tudor; Gukov, Sergei; Sułkowski, Piotr 2016-04-21 In fivebrane compactifications on 3-manifolds, we point out the importance of all flat connections in the proper definition of the effective 3d N = 2 theory. The Lagrangians of some theories with the desired properties can be constructed with the help of homological knot invariants that categorify colored Jones polynomials. Higgsing the full 3d theories constructed this way recovers theories found previously by Dimofte-Gaiotto-Gukov. As a result, we also consider the cutting and gluing of 3-manifolds along smooth boundaries and the role played by all flat connections in this operation. 8. ALLFlight: detection of moving objects in IR and ladar images Doehler, H.-U.; Peinecke, Niklas; Lueken, Thomas; Schmerwitz, Sven 2013-05-01 Supporting a helicopter pilot during landing and takeoff in degraded visual environment (DVE) is one of the challenges within DLR's project ALLFlight (Assisted Low Level Flight and Landing on Unprepared Landing Sites). Different types of sensors (TV, Infrared, mmW radar and laser radar) are mounted onto DLR's research helicopter FHS (flying helicopter simulator) for gathering different sensor data of the surrounding world. A high performance computer cluster architecture acquires and fuses all the information to get one single comprehensive description of the outside situation. While both TV and IR cameras deliver images with frame rates of 25 Hz or 30 Hz, Ladar and mmW radar provide georeferenced sensor data with only 2 Hz or even less. Therefore, it takes several seconds to detect or even track potential moving obstacle candidates in mmW or Ladar sequences. Especially if the helicopter is flying with higher speed, it is very important to minimize the detection time of obstacles in order to initiate a re-planning of the helicopter's mission timely. Applying feature extraction algorithms on IR images in combination with data fusion algorithms of extracted features and Ladar data can decrease the detection time appreciably. Based on real data from flight tests, the paper describes applied feature extraction methods for moving object detection, as well as data fusion techniques for combining features from TV/IR and Ladar data. 9. Ladar range image denoising by a nonlocal probability statistics algorithm Xia, Zhi-Wei; Li, Qi; Xiong, Zhi-Peng; Wang, Qi 2013-01-01 According to the characteristic of range images of coherent ladar and the basis of nonlocal means (NLM), a nonlocal probability statistics (NLPS) algorithm is proposed in this paper. The difference is that NLM performs denoising using the mean of the conditional probability distribution function (PDF) while NLPS using the maximum of the marginal PDF. In the algorithm, similar blocks are found out by the operation of block matching and form a group. Pixels in the group are analyzed by probability statistics and the gray value with maximum probability is used as the estimated value of the current pixel. The simulated range images of coherent ladar with different carrier-to-noise ratio and real range image of coherent ladar with 8 gray-scales are denoised by this algorithm, and the results are compared with those of median filter, multitemplate order mean filter, NLM, median nonlocal mean filter and its incorporation of anatomical side information, and unsupervised information-theoretic adaptive filter. The range abnormality noise and Gaussian noise in range image of coherent ladar are effectively suppressed by NLPS. 10. A 3-D Look at Post-Tropical Cyclone Hermine NASA Image and Video Library This 3-D flyby animation of GPM imagery shows Post-Tropical Storm Hermine on Sept. 6. Rain was falling at a rate of over 1.1 inches (27 mm) per hour between the Atlantic coast and Hermine's center ... 11. 3D and Education Meulien Ohlmann, Odile 2013-02-01 Today the industry offers a chain of 3D products. Learning to "read" and to "create in 3D" becomes an issue of education of primary importance. 25 years professional experience in France, the United States and Germany, Odile Meulien set up a personal method of initiation to 3D creation that entails the spatial/temporal experience of the holographic visual. She will present some different tools and techniques used for this learning, their advantages and disadvantages, programs and issues of educational policies, constraints and expectations related to the development of new techniques for 3D imaging. Although the creation of display holograms is very much reduced compared to the creation of the 90ies, the holographic concept is spreading in all scientific, social, and artistic activities of our present time. She will also raise many questions: What means 3D? Is it communication? Is it perception? How the seeing and none seeing is interferes? What else has to be taken in consideration to communicate in 3D? How to handle the non visible relations of moving objects with subjects? Does this transform our model of exchange with others? What kind of interaction this has with our everyday life? Then come more practical questions: How to learn creating 3D visualization, to learn 3D grammar, 3D language, 3D thinking? What for? At what level? In which matter? for whom? 12. Positional Awareness Map 3D (PAM3D) NASA Technical Reports Server (NTRS) Hoffman, Monica; Allen, Earl L.; Yount, John W.; Norcross, April Louise 2012-01-01 The Western Aeronautical Test Range of the National Aeronautics and Space Administration s Dryden Flight Research Center needed to address the aging software and hardware of its current situational awareness display application, the Global Real-Time Interactive Map (GRIM). GRIM was initially developed in the late 1980s and executes on older PC architectures using a Linux operating system that is no longer supported. Additionally, the software is difficult to maintain due to its complexity and loss of developer knowledge. It was decided that a replacement application must be developed or acquired in the near future. The replacement must provide the functionality of the original system, the ability to monitor test flight vehicles in real-time, and add improvements such as high resolution imagery and true 3-dimensional capability. This paper will discuss the process of determining the best approach to replace GRIM, and the functionality and capabilities of the first release of the Positional Awareness Map 3D. 13. Refined 3d-3d correspondence Alday, Luis F.; Genolini, Pietro Benetti; Bullimore, Mathew; van Loon, Mark 2017-04-01 We explore aspects of the correspondence between Seifert 3-manifolds and 3d N = 2 supersymmetric theories with a distinguished abelian flavour symmetry. We give a prescription for computing the squashed three-sphere partition functions of such 3d N = 2 theories constructed from boundary conditions and interfaces in a 4d N = 2∗ theory, mirroring the construction of Seifert manifold invariants via Dehn surgery. This is extended to include links in the Seifert manifold by the insertion of supersymmetric Wilson-'t Hooft loops in the 4d N = 2∗ theory. In the presence of a mass parameter cfor the distinguished flavour symmetry, we recover aspects of refined Chern-Simons theory with complex gauge group, and in particular construct an analytic continuation of the S-matrix of refined Chern-Simons theory. 14. A 3d-3d appetizer DOE PAGES Pei, Du; Ye, Ke 2016-11-02 Here, we test the 3d-3d correspondence for theories that are labeled by Lens spaces. We find a full agreement between the index of the 3d N=2 “Lens space theory” T [L(p, 1)] and the partition function of complex Chern-Simons theory on L(p, 1). In particular, for p = 1, we show how the familiar S3 partition function of Chern-Simons theory arises from the index of a free theory. For large p, we find that the index of T[L(p, 1)] becomes a constant independent of p. In addition, we study T[L(p, 1)] on the squashed three-sphere Sb3. This enables us tomore » see clearly, at the level of partition function, to what extent GC complex Chern-Simons theory can be thought of as two copies of Chern-Simons theory with compact gauge group G.« less 15. A 3d-3d appetizer SciTech Connect Pei, Du; Ye, Ke 2016-11-02 Here, we test the 3d-3d correspondence for theories that are labeled by Lens spaces. We find a full agreement between the index of the 3d N=2 “Lens space theory” T [L(p, 1)] and the partition function of complex Chern-Simons theory on L(p, 1). In particular, for p = 1, we show how the familiar S3 partition function of Chern-Simons theory arises from the index of a free theory. For large p, we find that the index of T[L(p, 1)] becomes a constant independent of p. In addition, we study T[L(p, 1)] on the squashed three-sphere Sb3. This enables us to see clearly, at the level of partition function, to what extent GC complex Chern-Simons theory can be thought of as two copies of Chern-Simons theory with compact gauge group G. 16. 3D Imaging. ERIC Educational Resources Information Center Hastings, S. K. 2002-01-01 Discusses 3 D imaging as it relates to digital representations in virtual library collections. Highlights include X-ray computed tomography (X-ray CT); the National Science Foundation (NSF) Digital Library Initiatives; output peripherals; image retrieval systems, including metadata; and applications of 3 D imaging for libraries and museums. (LRW) 17. 3D Imaging. ERIC Educational Resources Information Center Hastings, S. K. 2002-01-01 Discusses 3 D imaging as it relates to digital representations in virtual library collections. Highlights include X-ray computed tomography (X-ray CT); the National Science Foundation (NSF) Digital Library Initiatives; output peripherals; image retrieval systems, including metadata; and applications of 3 D imaging for libraries and museums. (LRW) 18. Diamond in 3-D NASA Image and Video Library 2004-08-20 This 3-D, microscopic imager mosaic of a target area on a rock called Diamond Jenness was taken after NASA Mars Exploration Rover Opportunity ground into the surface with its rock abrasion tool for a second time. 3D glasses are necessary. 19. Terrain classification of ladar data for bare earth determination Neuenschwander, Amy L.; Magruder, Lori A. 2011-06-01 Terrain classification, or bare earth extraction, is an important component to LADAR data analysis. The terrain classification approach presented in this effort utilizes an adaptive lower envelope follower (ALEF) with an adaptive gradient operation for accommodations of local topography and roughness. In order to create a more robust capability, the ALEF was modified to become a strictly data driven process that facilitates a quick production of the data product without the subjective component associated with user inputs. This automated technique was tested on existing LADAR surveys over Wyoming's Powder River Basin and the John Starr Memorial Forest in Mississippi, both locations with dynamic topographic features. The results indicate a useful approach in terms of operational time and accuracy of the final bare earth recovery with the advantage of being fully data driven. 20. Photon Counting Chirped AM Ladar: Concept, Simulation, and Experimental Results DTIC Science & Technology 2006-11-01 sensitivity. This noise is well above the signal shot noise limit. We are developing a method using Geiger - mode avalanche photodiode (Gm-APD) photon counting...shifted chirp waveform. The range to the target is recovered in the same way as for the chirped AM ladar with linear response mode detectors. In...output pulse from signal generator 1 also triggers the start of signal generator 2, which operates in burst mode to output trigger pulses to the 1. Optical design of a synthetic aperture ladar antenna system Cao, Changqing; Zeng, Xiaodong; Zhao, Xiaoyan; Liu, Huanhuan; Man, Xiangkun 2008-03-01 The spatial resolution of a conventional imaging LADAR system is constrained by the diffraction limit of the telescope aperture. The purpose of this work is to investigate Synthetic Aperture Imaging LADAR (SAIL), which employs aperture synthesis with coherent laser radar to overcome the diffraction limit and achieve fine-resolution, long range, two-dimensional imaging with modest aperture diameters. According to the demands of the Synthetic Aperture LADAR (SAL), the key techniques are analyzed briefly. The preliminary design of the optical antenna is also introduced in this paper. We investigate the design method and relevant problems of efficient optical antenna that are required in SAL. The design is pursued on the basis of the same method as is used at microwave frequency. The method is based on numerical analysis and the error values obtained by present manufacturing technology. According to the requirement to SAL with the trial of little size, light mass, low cost and high image quality, the result by ZEMAX will result. 2. Ladar System Identifies Obstacles Partly Hidden by Grass NASA Technical Reports Server (NTRS) Castano, Andres 2003-01-01 A ladar-based system now undergoing development is intended to enable an autonomous mobile robot in an outdoor environment to avoid moving toward trees, large rocks, and other obstacles that are partly hidden by tall grass. The design of the system incorporates the assumption that the robot is capable of moving through grass and provides for discrimination between grass and obstacles on the basis of geometric properties extracted from ladar readings as described below. The system (see figure) includes a ladar system that projects a range-measuring pulsed laser beam that has a small angular width of radians and is capable of measuring distances of reflective objects from a minimum of dmin to a maximum of dmax. The system is equipped with a rotating mirror that scans the beam through a relatively wide angular range of in a horizontal plane at a suitable small height above the ground. Successive scans are performed at time intervals of seconds. During each scan, the laser beam is fired at relatively small angular intervals of radians to make range measurements, so that the total number of range measurements acquired in a scan is Ne = / . 3. 3D Plasmon Ruler SciTech Connect 2011-01-01 In this animation of a 3D plasmon ruler, the plasmonic assembly acts as a transducer to deliver optical information about the structural dynamics of an attached protein. (courtesy of Paul Alivisatos group) 4. Prominent Rocks - 3-D NASA Image and Video Library 1997-07-13 Many prominent rocks near the Sagan Memorial Station are featured in this image from NASA Mars Pathfinder. Shark, Half-Dome, and Pumpkin are at center 3D glasses are necessary to identify surface detail. 5. 3D Laser System NASA Image and Video Library 2015-09-16 NASA Glenn's Icing Research Tunnel 3D Laser System used for digitizing ice shapes created in the wind tunnel. The ice shapes are later utilized for characterization, analysis, and software development. 6. Ground-Based Deep-Space Ladar for Satellite Detection: A Parametric Study DTIC Science & Technology 1989-12-01 1974). 20. Degnan, J. "Satellite Laser Ranging: Current Status and Future Prospects," IEEE Transactions on Geoscience and Remote Sensing . 398-413...42 Other Effects . .. .. .. .. ... ... ... .... ... ...... 47 Turbulence. .. .. .. .. .. ... .... ... ..... 4 7 High-Power Beam Effects ...examines LADAR techniques and detection methods to determine the optimum LADAR configuration. and then assesses the effects of atmospheric 7. AE3D SciTech Connect Spong, Donald A 2016-06-20 AE3D solves for the shear Alfven eigenmodes and eigenfrequencies in a torodal magnetic fusion confinement device. The configuration can be either 2D (e.g. tokamak, reversed field pinch) or 3D (e.g. stellarator, helical reversed field pinch, tokamak with ripple). The equations solved are based on a reduced MHD model and sound wave coupling effects are not currently included. 8. Ladar scene projector for a hardware-in-the-loop simulation system. PubMed Xu, Rui; Wang, Xin; Tian, Yi; Li, Zhuo 2016-07-20 In order to test a direct-detection ladar in a hardware-in-the-loop simulation system, a ladar scene projector is proposed. A model based on the ladar range equation is developed to calculate the profile of the ladar return signal. The influences of both the atmosphere and the target's surface properties are considered. The insertion delays of different channels of the ladar scene projector are investigated and compensated for. A target range image with 108 pixels is generated. The simulation range is from 0 to 15 km, the range resolution is 1.04 m, the range error is 1.28 cm, and the peak-valley error for different channels is 15 cm. 9. Use of 3D laser radar for navigation of unmanned aerial and ground vehicles in urban and indoor environments Uijt de Haag, Maarten; Venable, Don; Smearcheck, Mark 2007-04-01 This paper discusses the integration of Inertial measurements with measurements from a three-dimensional (3D) imaging sensor for position and attitude determination of unmanned aerial vehicles (UAV) and autonomous ground vehicles (AGV) in urban or indoor environments. To enable operation of UAVs and AGVs at any time in any environment a Precision Navigation, Attitude, and Time (PNAT) capability is required that is robust and not solely dependent on the Global Positioning System (GPS). In urban and indoor environments a GPS position capability may not only be unavailable due to shadowing, significant signal attenuation or multipath, but also due to intentional denial or deception. Although deep integration of GPS and Inertial Measurement Unit (IMU) data may prove to be a viable solution an alternative method is being discussed in this paper. The alternative solution is based on 3D imaging sensor technologies such as Flash Ladar (Laser Radar). Flash Ladar technology consists of a modulated laser emitter coupled with a focal plane array detector and the required optics. Like a conventional camera this sensor creates an "image" of the environment, but producing a 2D image where each pixel has associated intensity vales the flash Ladar generates an image where each pixel has an associated range and intensity value. Integration of flash Ladar with the attitude from the IMU allows creation of a 3-D scene. Current low-cost Flash Ladar technology is capable of greater than 100 x 100 pixel resolution with 5 mm depth resolution at a 30 Hz frame rate. The proposed algorithm first converts the 3D imaging sensor measurements to a point cloud of the 3D, next, significant environmental features such as planar features (walls), line features or point features (corners) are extracted and associated from one 3D imaging sensor frame to the next. Finally, characteristics of these features such as the normal or direction vectors are used to compute the platform position and attitude Oldham, Mark 2015-01-01 11. 3-D Seismic Interpretation Moore, Gregory F. 2009-05-01 This volume is a brief introduction aimed at those who wish to gain a basic and relatively quick understanding of the interpretation of three-dimensional (3-D) seismic reflection data. The book is well written, clearly illustrated, and easy to follow. Enough elementary mathematics are presented for a basic understanding of seismic methods, but more complex mathematical derivations are avoided. References are listed for readers interested in more advanced explanations. After a brief introduction, the book logically begins with a succinct chapter on modern 3-D seismic data acquisition and processing. Standard 3-D acquisition methods are presented, and an appendix expands on more recent acquisition techniques, such as multiple-azimuth and wide-azimuth acquisition. Although this chapter covers the basics of standard time processing quite well, there is only a single sentence about prestack depth imaging, and anisotropic processing is not mentioned at all, even though both techniques are now becoming standard. 12. Bootstrapping 3D fermions DOE PAGES Iliesiu, Luca; Kos, Filip; Poland, David; ... 2016-03-17 We study the conformal bootstrap for a 4-point function of fermions <ψψψψ> in 3D. We first introduce an embedding formalism for 3D spinors and compute the conformal blocks appearing in fermion 4-point functions. Using these results, we find general bounds on the dimensions of operators appearing in the ψ × ψ OPE, and also on the central charge CT. We observe features in our bounds that coincide with scaling dimensions in the GrossNeveu models at large N. Finally, we also speculate that other features could coincide with a fermionic CFT containing no relevant scalar operators. 13. Bootstrapping 3D fermions SciTech Connect Iliesiu, Luca; Kos, Filip; Poland, David; Pufu, Silviu S.; Simmons-Duffin, David; Yacoby, Ran 2016-03-17 We study the conformal bootstrap for a 4-point function of fermions <ψψψψ> in 3D. We first introduce an embedding formalism for 3D spinors and compute the conformal blocks appearing in fermion 4-point functions. Using these results, we find general bounds on the dimensions of operators appearing in the ψ × ψ OPE, and also on the central charge CT. We observe features in our bounds that coincide with scaling dimensions in the GrossNeveu models at large N. Finally, we also speculate that other features could coincide with a fermionic CFT containing no relevant scalar operators. 14. Medical 3-D Printing. PubMed Furlow, Bryant 2017-05-01 Three-dimensional printing is used in the manufacturing industry, medical and pharmaceutical research, drug production, clinical medicine, and dentistry, with implications for precision and personalized medicine. This technology is advancing the development of patient-specific prosthetics, stents, splints, and fixation devices and is changing medical education, treatment decision making, and surgical planning. Diagnostic imaging modalities play a fundamental role in the creation of 3-D printed models. Although most 3-D printed objects are rigid, flexible soft-tissue-like prosthetics also can be produced. ©2017 American Society of Radiologic Technologists. 15. Venus in 3D NASA Technical Reports Server (NTRS) Plaut, Jeffrey J. 1993-01-01 Stereographic images of the surface of Venus which enable geologists to reconstruct the details of the planet's evolution are discussed. The 120-meter resolution of these 3D images make it possible to construct digital topographic maps from which precise measurements can be made of the heights, depths, slopes, and volumes of geologic structures. 16. 3D photoacoustic imaging Carson, Jeffrey J. L.; Roumeliotis, Michael; Chaudhary, Govind; Stodilka, Robert Z.; Anastasio, Mark A. 2010-06-01 Our group has concentrated on development of a 3D photoacoustic imaging system for biomedical imaging research. The technology employs a sparse parallel detection scheme and specialized reconstruction software to obtain 3D optical images using a single laser pulse. With the technology we have been able to capture 3D movies of translating point targets and rotating line targets. The current limitation of our 3D photoacoustic imaging approach is its inability ability to reconstruct complex objects in the field of view. This is primarily due to the relatively small number of projections used to reconstruct objects. However, in many photoacoustic imaging situations, only a few objects may be present in the field of view and these objects may have very high contrast compared to background. That is, the objects have sparse properties. Therefore, our work had two objectives: (i) to utilize mathematical tools to evaluate 3D photoacoustic imaging performance, and (ii) to test image reconstruction algorithms that prefer sparseness in the reconstructed images. Our approach was to utilize singular value decomposition techniques to study the imaging operator of the system and evaluate the complexity of objects that could potentially be reconstructed. We also compared the performance of two image reconstruction algorithms (algebraic reconstruction and l1-norm techniques) at reconstructing objects of increasing sparseness. We observed that for a 15-element detection scheme, the number of measureable singular vectors representative of the imaging operator was consistent with the demonstrated ability to reconstruct point and line targets in the field of view. We also observed that the l1-norm reconstruction technique, which is known to prefer sparseness in reconstructed images, was superior to the algebraic reconstruction technique. Based on these findings, we concluded (i) that singular value decomposition of the imaging operator provides valuable insight into the capabilities of 17. Scalable singular 3D modeling for digital battlefield applications Jannson, Tomasz P.; Ternovskiy, Igor V. 2000-10-01 We propose a new classification algorithm to detect and classify targets of interest. It is based on an advanced brand of analytic geometry of manifolds, called theory of catastrophes. Physical Optics Corporation's (POC) scalable 3D model representation provides automatic and real-time analysis of a discrete frame of a sensed 2D imagery of terrain, urban, and target features. It then transforms this frame of discrete different-perspective 2D views of a target into a 3D continuous model called a pictogram. The unique local stereopsis feature of this modeling is the surprising ability to locally obtain a 3D pictogram from a single monoscopic photograph. The proposed 3D modeling, combined with more standard change detection algorithms and 3D terrain feature models, will constitute a novel classification algorithm and a new type of digital battlefield imagery for Imaging Systems. 18. Status report on next-generation LADAR for driving unmanned ground vehicles Juberts, Maris; Barbera, Anthony J. 2004-12-01 19. Range resolution improvement of eyesafe ladar testbed (ELT) measurements using sparse signal deconvolution Budge, Scott E.; Gunther, Jacob H. 2014-06-01 The Eyesafe Ladar Test-bed (ELT) is an experimental ladar system with the capability of digitizing return laser pulse waveforms at 2 GHz. These waveforms can then be exploited off-line in the laboratory to develop signal processing techniques for noise reduction, range resolution improvement, and range discrimination between two surfaces of similar range interrogated by a single laser pulse. This paper presents the results of experiments with new deconvolution algorithms with the hoped-for gains of improving the range discrimination of the ladar system. The sparsity of ladar returns is exploited to solve the deconvolution problem in two steps. The first step is to estimate a point target response using a database of measured calibration data. This basic target response is used to construct a dictionary of target responses with different delays/ranges. Using this dictionary ladar returns from a wide variety of surface configurations can be synthesized by taking linear combinations. A sparse linear combination matches the physical reality that ladar returns consist of the overlapping of only a few pulses. The dictionary construction process is a pre-processing step that is performed only once. The deconvolution step is performed by minimizing the error between the measured ladar return and the dictionary model while constraining the coefficient vector to be sparse. Other constraints such as the non-negativity of the coefficients are also applied. The results of the proposed technique are presented in the paper and are shown to compare favorably with previously investigated deconvolution techniques. 20. Metric Evaluation Pipeline for 3d Modeling of Urban Scenes Bosch, M.; Leichtman, A.; Chilcott, D.; Goldberg, H.; Brown, M. 2017-05-01 Publicly available benchmark data and metric evaluation approaches have been instrumental in enabling research to advance state of the art methods for remote sensing applications in urban 3D modeling. Most publicly available benchmark datasets have consisted of high resolution airborne imagery and lidar suitable for 3D modeling on a relatively modest scale. To enable research in larger scale 3D mapping, we have recently released a public benchmark dataset with multi-view commercial satellite imagery and metrics to compare 3D point clouds with lidar ground truth. We now define a more complete metric evaluation pipeline developed as publicly available open source software to assess semantically labeled 3D models of complex urban scenes derived from multi-view commercial satellite imagery. Evaluation metrics in our pipeline include horizontal and vertical accuracy and completeness, volumetric completeness and correctness, perceptual quality, and model simplicity. Sources of ground truth include airborne lidar and overhead imagery, and we demonstrate a semi-automated process for producing accurate ground truth shape files to characterize building footprints. We validate our current metric evaluation pipeline using 3D models produced using open source multi-view stereo methods. Data and software is made publicly available to enable further research and planned benchmarking activities. 1. High-resolution 3D imaging laser radar flight test experiments Marino, Richard M.; Davis, W. R.; Rich, G. C.; McLaughlin, J. L.; Lee, E. I.; Stanley, B. M.; Burnside, J. W.; Rowe, G. S.; Hatch, R. E.; Square, T. E.; Skelly, L. J.; O'Brien, M.; Vasile, A.; Heinrichs, R. M. 2005-05-01 Situation awareness and accurate Target Identification (TID) are critical requirements for successful battle management. Ground vehicles can be detected, tracked, and in some cases imaged using airborne or space-borne microwave radar. Obscurants such as camouflage net and/or tree canopy foliage can degrade the performance of such radars. Foliage can be penetrated with long wavelength microwave radar, but generally at the expense of imaging resolution. The goals of the DARPA Jigsaw program include the development and demonstration of high-resolution 3-D imaging laser radar (ladar) ensor technology and systems that can be used from airborne platforms to image and identify military ground vehicles that may be hiding under camouflage or foliage such as tree canopy. With DARPA support, MIT Lincoln Laboratory has developed a rugged and compact 3-D imaging ladar system that has successfully demonstrated the feasibility and utility of this application. The sensor system has been integrated into a UH-1 helicopter for winter and summer flight campaigns. The sensor operates day or night and produces high-resolution 3-D spatial images using short laser pulses and a focal plane array of Geiger-mode avalanche photo-diode (APD) detectors with independent digital time-of-flight counting circuits at each pixel. The sensor technology includes Lincoln Laboratory developments of the microchip laser and novel focal plane arrays. The microchip laser is a passively Q-switched solid-state frequency-doubled Nd:YAG laser transmitting short laser pulses (300 ps FWHM) at 16 kilohertz pulse rate and at 532 nm wavelength. The single photon detection efficiency has been measured to be > 20 % using these 32x32 Silicon Geiger-mode APDs at room temperature. The APD saturates while providing a gain of typically > 106. The pulse out of the detector is used to stop a 500 MHz digital clock register integrated within the focal-plane array at each pixel. Using the detector in this binary response mode 2. 3-D Grab! Connors, M. G.; Schofield, I. S. 2012-12-01 Modern technologies in imaging greatly extend the potential to present visual information. With recently developed software tools, the perception of the third dimension can not only dramatically enhance presentation, but also allow spatial data to be better encoded. 3-D images can be taken for many subjects with only one camera, carefully moved to generate a stereo pair. Color anaglyph viewing now can be very effective using computer screens, and active filter technologies can enhance visual effects with ever-decreasing cost. We will present various novel results of 3-D imaging, including those from the auroral observations of the new twinned Athabasca University Geophysical Observatories.; Single camera stereo image for viewing with red/cyan glasses. 3. LADAR Range Image Interpolation Exploiting Pulse Width Expansion DTIC Science & Technology 2012-03-22 33 3.2. The Gaussian beam upon exiting the laser cavity of the LADAR. 35 3.3. Two Gaussian beams with different beam waists ...g(x, y), is normalized so that the double summation of the squared field is equal to one. The variable ωo is the beam waist , which is described next...Beam waist is a characteristic that is essential in describing a Gaussian beam. The beam waist , represented by ωo in Equation (2.3), describes the 4. Hierarchical searching in model-based LADAR ATR using statistical separability tests DelMarco, Stephen; Sobel, Erik; Douglas, Joel 2006-05-01 In this work we investigate simultaneous object identification improvement and efficient library search for model-based object recognition applications. We develop an algorithm to provide efficient, prioritized, hierarchical searching of the object model database. A common approach to model-based object recognition chooses the object label corresponding to the best match score. However, due to corrupting effects the best match score does not always correspond to the correct object model. To address this problem, we propose a search strategy which exploits information contained in a number of representative elements of the library to drill down to a small class with high probability of containing the object. We first optimally partition the library into a hierarchic taxonomy of disjoint classes. A small number of representative elements are used to characterize each object model class. At each hierarchy level, the observed object is matched against the representative elements of each class to generate score sets. A hypothesis testing problem, using a distribution-free statistical test, is defined on the score sets and used to choose the appropriate class for a prioritized search. We conduct a probabilistic analysis of the computational cost savings, and provide a formula measuring the computational advantage of the proposed approach. We generate numerical results using match scores derived from matching highly-detailed CAD models of civilian ground vehicles used in 3-D LADAR ATR. We present numerical results showing effects on classification performance of significance level and representative element number in the score set hypothesis testing problem. 5. Unoriented 3d TFTs Bhardwaj, Lakshya 2017-05-01 This paper generalizes two facts about oriented 3d TFTs to the unoriented case. On one hand, it is known that oriented 3d TFTs having a topological boundary condition admit a state-sum construction known as the Turaev-Viro construction. This is related to the string-net construction of fermionic phases of matter. We show how Turaev-Viro construction can be generalized to unoriented 3d TFTs. On the other hand, it is known that the "fermionic" versions of oriented TFTs, known as Spin-TFTs, can be constructed in terms of "shadow" TFTs which are ordinary oriented TFTs with an anomalous ℤ 2 1-form symmetry. We generalize this correspondence to Pin+-TFTs by showing that they can be constructed in terms of ordinary unoriented TFTs with anomalous ℤ 2 1-form symmetry having a mixed anomaly with time-reversal symmetry. The corresponding Pin+-TFT does not have any anomaly for time-reversal symmetry however and hence it can be unambiguously defined on a non-orientable manifold. In case a Pin+-TFT admits a topological boundary condition, one can combine the above two statements to obtain a Turaev-Viro-like construction of Pin+-TFTs. As an application of these ideas, we construct a large class of Pin+-SPT phases. 6. 3D Audio System NASA Technical Reports Server (NTRS) 1992-01-01 Ames Research Center research into virtual reality led to the development of the Convolvotron, a high speed digital audio processing system that delivers three-dimensional sound over headphones. It consists of a two-card set designed for use with a personal computer. The Convolvotron's primary application is presentation of 3D audio signals over headphones. Four independent sound sources are filtered with large time-varying filters that compensate for motion. The perceived location of the sound remains constant. Possible applications are in air traffic control towers or airplane cockpits, hearing and perception research and virtual reality development. 7. Twin Peaks - 3D NASA Technical Reports Server (NTRS) 1997-01-01 The two hills in the distance, approximately one to two kilometers away, have been dubbed the 'Twin Peaks' and are of great interest to Pathfinder scientists as objects of future study. 3D glasses are necessary to identify surface detail. The white areas on the left hill, called the 'Ski Run' by scientists, may have been formed by hydrologic processes. The IMP is a stereo imaging system with color capability provided by 24 selectable filters -- twelve filters per 'eye. Click below to see the left and right views individually. [figure removed for brevity, see original site] Left [figure removed for brevity, see original site] Right 8. 3D and beyond Fung, Y. C. 1995-05-01 This conference on physiology and function covers a wide range of subjects, including the vasculature and blood flow, the flow of gas, water, and blood in the lung, the neurological structure and function, the modeling, and the motion and mechanics of organs. Many technologies are discussed. I believe that the list would include a robotic photographer, to hold the optical equipment in a precisely controlled way to obtain the images for the user. Why are 3D images needed? They are to achieve certain objectives through measurements of some objects. For example, in order to improve performance in sports or beauty of a person, we measure the form, dimensions, appearance, and movements. 9. 3D Displacements in the 7 December, 2015 M7.2 Murghob, Tajikistan Earthquake, from Optical Imagery, Stereo Topography, and InSAR, and Constraints on the 1911 Sarez Event Elliott, A. J.; Parsons, B.; Elliott, J. R.; Hollingsworth, J. 2016-12-01 Overtopping of the Usoi landslide dam, formed in 1911 during a poorly understood major earthquake in the Pamirs, represents one of the greatest natural hazards in Central Asia. On 7 Dec, 2015 a M7.2 earthquake struck the site, however it apparently differed in source location and landslide productivity from the 1911 M7.2 event. We measure the displacement field of the 2015 earthquake using the full gamut of space-based imaging techniques, revealing left-lateral offset along 60 km of the SSW-striking Karakul-Sarez fault (KSF), and numerous coseismic landslides. Sentinel-1 interferograms reveal up to 1.5 m of left-lateral surface displacement along 40 km of the KSF, with an additional 10-15 km of buried, blind rupture at both ends. This matches the extent of the dislocation we determine from pixel-tracking of pre- and post-event Landsat-8 scenes. Both of these far-field deformation maps indicate that the rupture ended northward around a 3-km step in the fault trace, and southward beneath Sarez Lake. Direct comparison of pre- and post-event SPOT6/7 images shows discontinuous new scarps and small stream offsets along 30 km of the KSF from the shore of Sarez Lake northward, corroborating this surface rupture extent. We difference pre- and post-event topography derived from the tristereo SPOT images, and thus identify throughgoing strike-slip rupture as the differential lateral advection of steep ridges. Our detailed height-change maps also reveal numerous landslides that may be attributed to the earthquake. In particular, massive slope failures around the shore of Sarez Lake indicate that overtopping of the Usoi dam by a landslide-induced seiche remains one of the principal secondary seismic hazards in the region. Our remote sensing of the 2015 rupture shows that it occupies the least recently ruptured reach of the KSF. To the north fresh scarps and a clear moletrack evident in pre-event imagery represent the prior event, the extent of which does not overlap the 2015 10. Optical 3D imaging and visualization of concealed objects Berginc, G.; Bellet, J.-B.; Berechet, I.; Berechet, S. 2016-09-01 This paper gives new insights on optical 3D imagery. In this paper we explore the advantages of laser imagery to form a three-dimensional image of the scene. 3D laser imaging can be used for three-dimensional medical imaging and surveillance because of ability to identify tumors or concealed objects. We consider the problem of 3D reconstruction based upon 2D angle-dependent laser images. The objective of this new 3D laser imaging is to provide users a complete 3D reconstruction of objects from available 2D data limited in number. The 2D laser data used in this paper come from simulations that are based on the calculation of the laser interactions with the different meshed objects of the scene of interest or from experimental 2D laser images. We show that combining the Radom transform on 2D laser images with the Maximum Intensity Projection can generate 3D views of the considered scene from which we can extract the 3D concealed object in real time. With different original numerical or experimental examples, we investigate the effects of the input contrasts. We show the robustness and the stability of the method. We have developed a new patented method of 3D laser imaging based on three-dimensional reflective tomographic reconstruction algorithms and an associated visualization method. In this paper we present the global 3D reconstruction and visualization procedures. 11. Random subspace ensemble for target recognition of ladar range image Liu, Zheng-Jun; Li, Qi; Wang, Qi 2013-02-01 Laser detection and ranging (ladar) range images have attracted considerable attention in the field of automatic target recognition. Generally, it is difficult to collect a mass of range images for ladar in real applications. However, with small samples, the Hughes effect may occur when the number of features is larger than the size of the training samples. A random subspace ensemble of support vector machine (RSE-SVM) is applied to solve the problem. Three experiments were performed: (1) the performance comparison among affine moment invariants (AMIs), Zernike moment invariants (ZMIs) and their combined moment invariants (CMIs) based on different size training sets using single SVM; (2) the impact analysis of the different number of features about the RSE-SVM and semi-random subspace ensemble of support vector machine; (3) the performance comparison between the RSE-SVM and the CMIs with SVM ensembles. The experiment's results demonstrate that the RSE-SVM is able to relieve the Hughes effect and perform better than ZMIs with single SVM and CMIs with SVM ensembles. 12. 3D Surgical Simulation PubMed Central Cevidanes, Lucia; Tucker, Scott; Styner, Martin; Kim, Hyungmin; Chapuis, Jonas; Reyes, Mauricio; Proffit, William; Turvey, Timothy; Jaskolka, Michael 2009-01-01 This paper discusses the development of methods for computer-aided jaw surgery. Computer-aided jaw surgery allows us to incorporate the high level of precision necessary for transferring virtual plans into the operating room. We also present a complete computer-aided surgery (CAS) system developed in close collaboration with surgeons. Surgery planning and simulation include construction of 3D surface models from Cone-beam CT (CBCT), dynamic cephalometry, semi-automatic mirroring, interactive cutting of bone and bony segment repositioning. A virtual setup can be used to manufacture positioning splints for intra-operative guidance. The system provides further intra-operative assistance with the help of a computer display showing jaw positions and 3D positioning guides updated in real-time during the surgical procedure. The CAS system aids in dealing with complex cases with benefits for the patient, with surgical practice, and for orthodontic finishing. Advanced software tools for diagnosis and treatment planning allow preparation of detailed operative plans, osteotomy repositioning, bone reconstructions, surgical resident training and assessing the difficulties of the surgical procedures prior to the surgery. CAS has the potential to make the elaboration of the surgical plan a more flexible process, increase the level of detail and accuracy of the plan, yield higher operative precision and control, and enhance documentation of cases. Supported by NIDCR DE017727, and DE018962 PMID:20816308 13. Martian terrain - 3D NASA Technical Reports Server (NTRS) 1997-01-01 An area of rocky terrain near the landing site of the Sagan Memorial Station can be seen in this image, taken in stereo by the Imager for Mars Pathfinder (IMP) on Sol 3. 3D glasses are necessary to identify surface detail. This image is part of a 3D 'monster' panorama of the area surrounding the landing site. Mars Pathfinder is the second in NASA's Discovery program of low-cost spacecraft with highly focused science goals. The Jet Propulsion Laboratory, Pasadena, CA, developed and manages the Mars Pathfinder mission for NASA's Office of Space Science, Washington, D.C. JPL is an operating division of the California Institute of Technology (Caltech). The Imager for Mars Pathfinder (IMP) was developed by the University of Arizona Lunar and Planetary Laboratory under contract to JPL. Peter Smith is the Principal Investigator. Click below to see the left and right views individually. [figure removed for brevity, see original site] Left [figure removed for brevity, see original site] Right 14. Martian terrain - 3D NASA Technical Reports Server (NTRS) 1997-01-01 An area of rocky terrain near the landing site of the Sagan Memorial Station can be seen in this image, taken in stereo by the Imager for Mars Pathfinder (IMP) on Sol 3. 3D glasses are necessary to identify surface detail. This image is part of a 3D 'monster' panorama of the area surrounding the landing site. Mars Pathfinder is the second in NASA's Discovery program of low-cost spacecraft with highly focused science goals. The Jet Propulsion Laboratory, Pasadena, CA, developed and manages the Mars Pathfinder mission for NASA's Office of Space Science, Washington, D.C. JPL is an operating division of the California Institute of Technology (Caltech). The Imager for Mars Pathfinder (IMP) was developed by the University of Arizona Lunar and Planetary Laboratory under contract to JPL. Peter Smith is the Principal Investigator. Click below to see the left and right views individually. [figure removed for brevity, see original site] Left [figure removed for brevity, see original site] Right 15. Research on positioning mode of LADAR aided navigation system over plain area Lin, Yi; Yan, Lei; Tong, Qingxi 2007-11-01 Laser Radar (LADAR) achieves more applications on aerial aided-navigation in mountainous areas for its good performance. But plain areas encounter terrain elevation's slow variation and occasional unavailability of Digital Feature Analysis Database (DFAD), which as necessary reference. Looking for replaceable map source and extracting common characters for matching, are the fundamental circles of imaging LADAR aided navigation research. In this paper aerial high-resolution remote sensing (RS) images are applied as substitute for DFAD, and the edge factor is chosen out by synthetically analyzing RS images' and imaging LADAR point cloud'scharacters. Then edge extraction algorithm based on multi-scale wavelet is explored to reflect their common features, and weighted Hausdorff distance method is applied to match for positioning. At last the high-resolution RS images and imaging LADAR data of the same area are assumed for simulation experiment, which testifies the validity of the methods proposed above. 16. Worldwide Uncertainty Assessments of Ladar and Radar Signal-to-Noise Ratio Performance for Diverse Low Altitude Atmospheric Environments DTIC Science & Technology 2009-05-01 interrogation. Results are presented in the form of worldwide plots of notional signal to noise ratio. The ladar and 95 GHz system types exhibit similar SNR ...signal to noise ratio. The ladar and 95 GHz system types exhibit similar SNR performance for forward oblique clear air operation. 1.557 µm ladar...good to very good SNR performance for both oblique and vertical paths for both fog and stratus conditions. 1.1 HELEEOS Worldwide Seasonal, Diurnal 17. 3D field harmonics SciTech Connect Caspi, S.; Helm, M.; Laslett, L.J. 1991-03-30 We have developed an harmonic representation for the three dimensional field components within the windings of accelerator magnets. The form by which the field is presented is suitable for interfacing with other codes that make use of the 3D field components (particle tracking and stability). The field components can be calculated with high precision and reduced cup time at any location (r,{theta},z) inside the magnet bore. The same conductor geometry which is used to simulate line currents is also used in CAD with modifications more readily available. It is our hope that the format used here for magnetic fields can be used not only as a means of delivering fields but also as a way by which beam dynamics can suggest correction to the conductor geometry. 5 refs., 70 figs. 18. MAP3D: a media processor approach for high-end 3D graphics Darsa, Lucia; Stadnicki, Steven; Basoglu, Chris 1999-12-01 Equator Technologies, Inc. has used a software-first approach to produce several programmable and advanced VLIW processor architectures that have the flexibility to run both traditional systems tasks and an array of media-rich applications. For example, Equator's MAP1000A is the world's fastest single-chip programmable signal and image processor targeted for digital consumer and office automation markets. The Equator MAP3D is a proposal for the architecture of the next generation of the Equator MAP family. The MAP3D is designed to achieve high-end 3D performance and a variety of customizable special effects by combining special graphics features with high performance floating-point and media processor architecture. As a programmable media processor, it offers the advantages of a completely configurable 3D pipeline--allowing developers to experiment with different algorithms and to tailor their pipeline to achieve the highest performance for a particular application. With the support of Equator's advanced C compiler and toolkit, MAP3D programs can be written in a high-level language. This allows the compiler to successfully find and exploit any parallelism in a programmer's code, thus decreasing the time to market of a given applications. The ability to run an operating system makes it possible to run concurrent applications in the MAP3D chip, such as video decoding while executing the 3D pipelines, so that integration of applications is easily achieved--using real-time decoded imagery for texturing 3D objects, for instance. This novel architecture enables an affordable, integrated solution for high performance 3D graphics. Shepherd, Orr; LePage, Andrew J.; Wijntjes, Geert J.; Zehnpfennig, Theodore F.; Sackos, John T.; Nellums, Robert O. 1999-01-01 Visidyne, Inc., teaming with Sandia National Laboratories, has developed the preliminary design for an innovative scannerless 3-D laser radar capable of acquiring, tracking, and determining the coordinates of small caliber projectiles in flight with sufficient precision, so their origin can be established by back projecting their tracks to their source. The design takes advantage of the relatively large effective cross-section of a bullet at optical wavelengths. Kay to its implementation is the use of efficient, high- power laser diode arrays for illuminators and an imaging laser receiver using a unique CCD imager design, that acquires the information to establish x, y (angle-angle) and range coordinates for each bullet at very high frame rates. The detection process achieves a high degree of discrimination by using the optical signature of the bullet, solar background mitigation, and track detection. Field measurements and computer simulations have been used to provide the basis for a preliminary design of a robust bullet tracker, the Counter Sniper 3-D Laser Radar. Experimental data showing 3-D test imagery acquired by a lidar with architecture similar to that of the proposed Counter Sniper 3-D Lidar are presented. A proposed Phase II development would yield an innovative, compact, and highly efficient bullet-tracking laser radar. Such a device would meet the needs of not only the military, but also federal, state, and local law enforcement organizations. 20. Prominent rocks - 3D NASA Technical Reports Server (NTRS) 1997-01-01 Many prominent rocks near the Sagan Memorial Station are featured in this image, taken in stereo by the Imager for Mars Pathfinder (IMP) on Sol 3. 3D glasses are necessary to identify surface detail. Wedge is at lower left; Shark, Half-Dome, and Pumpkin are at center. Flat Top, about four inches high, is at lower right. The horizon in the distance is one to two kilometers away. Mars Pathfinder is the second in NASA's Discovery program of low-cost spacecraft with highly focused science goals. The Jet Propulsion Laboratory, Pasadena, CA, developed and manages the Mars Pathfinder mission for NASA's Office of Space Science, Washington, D.C. JPL is an operating division of the California Institute of Technology (Caltech). The Imager for Mars Pathfinder (IMP) was developed by the University of Arizona Lunar and Planetary Laboratory under contract to JPL. Peter Smith is the Principal Investigator. Click below to see the left and right views individually. [figure removed for brevity, see original site] Left [figure removed for brevity, see original site] Right 1. Pluto in 3-D NASA Image and Video Library 2015-10-23 Global stereo mapping of Pluto surface is now possible, as images taken from multiple directions are downlinked from NASA New Horizons spacecraft. Stereo images will eventually provide an accurate topographic map of most of the hemisphere of Pluto seen by New Horizons during the July 14 flyby, which will be key to understanding Pluto's geological history. This example, which requires red/blue stereo glasses for viewing, shows a region 180 miles (300 kilometers) across, centered near longitude 130 E, latitude 20 N (the red square in the global context image). North is to the upper left. The image shows an ancient, heavily cratered region of Pluto, dotted with low hills and cut by deep fractures, which indicate extension of Pluto's crust. Analysis of these stereo images shows that the steep fracture in the upper left of the image is about 1 mile (1.6 kilometers) deep, and the craters in the lower right part of the image are up to 1.3 miles (2.1 km) deep. Smallest visible details are about 0.4 miles (0.6 kilometers) across. You will need 3D glasses to view this image showing an ancient, heavily cratered region of Pluto. http://photojournal.jpl.nasa.gov/catalog/PIA20032 2. Intraoral 3D scanner Kühmstedt, Peter; Bräuer-Burchardt, Christian; Munkelt, Christoph; Heinze, Matthias; Palme, Martin; Schmidt, Ingo; Hintersehr, Josef; Notni, Gunther 2007-09-01 Here a new set-up of a 3D-scanning system for CAD/CAM in dental industry is proposed. The system is designed for direct scanning of the dental preparations within the mouth. The measuring process is based on phase correlation technique in combination with fast fringe projection in a stereo arrangement. The novelty in the approach is characterized by the following features: A phase correlation between the phase values of the images of two cameras is used for the co-ordinate calculation. This works contrary to the usage of only phase values (phasogrammetry) or classical triangulation (phase values and camera image co-ordinate values) for the determination of the co-ordinates. The main advantage of the method is that the absolute value of the phase at each point does not directly determine the coordinate. Thus errors in the determination of the co-ordinates are prevented. Furthermore, using the epipolar geometry of the stereo-like arrangement the phase unwrapping problem of fringe analysis can be solved. The endoscope like measurement system contains one projection and two camera channels for illumination and observation of the object, respectively. The new system has a measurement field of nearly 25mm × 15mm. The user can measure two or three teeth at one time. So the system can by used for scanning of single tooth up to bridges preparations. In the paper the first realization of the intraoral scanner is described. 3. 'Diamond' in 3-D NASA Technical Reports Server (NTRS) 2004-01-01 This 3-D, microscopic imager mosaic of a target area on a rock called 'Diamond Jenness' was taken after NASA's Mars Exploration Rover Opportunity ground into the surface with its rock abrasion tool for a second time. Opportunity has bored nearly a dozen holes into the inner walls of 'Endurance Crater.' On sols 177 and 178 (July 23 and July 24, 2004), the rover worked double-duty on Diamond Jenness. Surface debris and the bumpy shape of the rock resulted in a shallow and irregular hole, only about 2 millimeters (0.08 inch) deep. The final depth was not enough to remove all the bumps and leave a neat hole with a smooth floor. This extremely shallow depression was then examined by the rover's alpha particle X-ray spectrometer. On Sol 178, Opportunity's 'robotic rodent' dined on Diamond Jenness once again, grinding almost an additional 5 millimeters (about 0.2 inch). The rover then applied its Moessbauer spectrometer to the deepened hole. This double dose of Diamond Jenness enabled the science team to examine the rock at varying layers. Results from those grindings are currently being analyzed. The image mosaic is about 6 centimeters (2.4 inches) across. 4. 'Diamond' in 3-D NASA Technical Reports Server (NTRS) 2004-01-01 This 3-D, microscopic imager mosaic of a target area on a rock called 'Diamond Jenness' was taken after NASA's Mars Exploration Rover Opportunity ground into the surface with its rock abrasion tool for a second time. Opportunity has bored nearly a dozen holes into the inner walls of 'Endurance Crater.' On sols 177 and 178 (July 23 and July 24, 2004), the rover worked double-duty on Diamond Jenness. Surface debris and the bumpy shape of the rock resulted in a shallow and irregular hole, only about 2 millimeters (0.08 inch) deep. The final depth was not enough to remove all the bumps and leave a neat hole with a smooth floor. This extremely shallow depression was then examined by the rover's alpha particle X-ray spectrometer. On Sol 178, Opportunity's 'robotic rodent' dined on Diamond Jenness once again, grinding almost an additional 5 millimeters (about 0.2 inch). The rover then applied its Moessbauer spectrometer to the deepened hole. This double dose of Diamond Jenness enabled the science team to examine the rock at varying layers. Results from those grindings are currently being analyzed. The image mosaic is about 6 centimeters (2.4 inches) across. 5. 3D Printing and 3D Bioprinting in Pediatrics PubMed Central Vijayavenkataraman, Sanjairaj; Fuh, Jerry Y H; Lu, Wen Feng 2017-01-01 Additive manufacturing, commonly referred to as 3D printing, is a technology that builds three-dimensional structures and components layer by layer. Bioprinting is the use of 3D printing technology to fabricate tissue constructs for regenerative medicine from cell-laden bio-inks. 3D printing and bioprinting have huge potential in revolutionizing the field of tissue engineering and regenerative medicine. This paper reviews the application of 3D printing and bioprinting in the field of pediatrics. PMID:28952542 6. 3D Printing and 3D Bioprinting in Pediatrics. PubMed Vijayavenkataraman, Sanjairaj; Fuh, Jerry Y H; Lu, Wen Feng 2017-07-13 Additive manufacturing, commonly referred to as 3D printing, is a technology that builds three-dimensional structures and components layer by layer. Bioprinting is the use of 3D printing technology to fabricate tissue constructs for regenerative medicine from cell-laden bio-inks. 3D printing and bioprinting have huge potential in revolutionizing the field of tissue engineering and regenerative medicine. This paper reviews the application of 3D printing and bioprinting in the field of pediatrics. Khizhnyak, Anatoliy; Markov, Vladimir; Tomov, Ivan; Murrell, David 2016-05-01 Security measures sometimes require persistent surveillance of government, military and public areas Borders, bridges, sport arenas, airports and others are often surveilled with low-cost cameras. Their low-light performance can be enhanced with laser illuminators; however various operational scenarios may require a low-intensity laser illumination with the object-scattered light intensity lower than the sensitivity of the Ladar image detector. This paper discusses a novel type of high-gain optical image amplifier. The approach enables time-synchronization of the incoming and amplifying signals with accuracy <= 1 ns. The technique allows the incoming signal to be amplified without the need to match the input spectrum to the cavity modes. Instead, the incoming signal is accepted within the spectral band of the amplifier. We have gauged experimentally the performance of the amplifier with a 40 dB gain and an angle of view 20 mrad. 8. SWIR HgCdTe photodiodes for LADAR applications Boieriu, Paul; Park, J. H.; Hahn, S.-R.; Wijewarnasuriya, Priyalal; Stan, Barry; Sivananthan, S. 2016-09-01 The detection of infrared radiation is of great importance for many defense and civilian applications. Eyesafe short-wavelength infrared (SWIR) spectral range is particularly interesting due to atmospheric propagation through obscurants. Applications include low-cost, long-range target identification, identification of heavily obscured targets, obstacle avoidance, and high resolution imaging from a variety of platforms including hand-held devices, unmanned air vehicles, or ground vehicles. HgCdTe grown on CdTe/Si by molecular beam epitaxy (MBE) was processed into mini-arrays for 1.55 μm LADAR applications. Low-capacitance photodiodes (<10 pF) were demonstrated at room temperature with frequency responses exceeding 100 MHz. This paper discusses the device architecture and device performance results. 9. Experimental demonstration of a stripmap holographic aperture ladar system. PubMed Stafford, Jason W; Duncan, Bradley D; Dierking, Matthew P 2010-04-20 By synthesizing large effective apertures through the translation of a smaller imaging sensor and the subsequent proper phasing and correlation of detected signals in postprocessing, holographic aperture ladar (HAL) systems seek to increase the resolution of remotely imaged targets. The stripmap HAL process was demonstrated in the laboratory, for the first time to our knowledge. Our results show that the stripmap HAL transformation can precisely account for off-axis transmitter induced phase migrations. This in turn allows multiple pupil plane field segments, sequentially collected across a synthetic aperture, to be coherently mosaiced together. As a direct consequence, we have been able to confirm the capability of the HAL method to potentially provide substantial increases in longitudinal cross-range resolution. The measurement and sampling of complex pupil plane field segments, as well as target related issues arising from short laboratory ranges, have also been addressed. 10. Laser development for optimal helicopter obstacle warning system LADAR performance Yaniv, A.; Krupkin, V.; Abitbol, A.; Stern, J.; Lurie, E.; German, A.; Solomonovich, S.; Lubashitz, B.; Harel, Y.; Engart, S.; Shimoni, Y.; Hezy, S.; Biltz, S.; Kaminetsky, E.; Goldberg, A.; Chocron, J.; Zuntz, N.; Zajdman, A. 2005-04-01 Low lying obstacles present immediate danger to both military and civilian helicopters performing low-altitude flight missions. A LADAR obstacle detection system is the natural solution for enhancing helicopter safety and improving the pilot situation awareness. Elop is currently developing an advanced Surveillance and Warning Obstacle Ranging and Display (SWORD) system for the Israeli Air Force. Several key factors and new concepts have contributed to system optimization. These include an adaptive FOV, data memorization, autonomous obstacle detection and warning algorithms and the use of an agile laser transmitter. In the present work we describe the laser design and performance and discuss some of the experimental results. Our eye-safe laser is characterized by its pulse energy, repetition rate and pulse length agility. By dynamically controlling these parameters, we are able to locally optimize the system"s obstacle detection range and scan density in accordance with the helicopter instantaneous maneuver. 11. Thermal infrared exploitation for 3D face reconstruction Abayowa, Bernard O. 2009-05-01 Despite the advances in face recognition research, current face recognition systems are still not accurate or robust enough to be deployed in uncontrolled environments. The existence of a pose and illumination invariant face recognition system is still lacking. This research exploits the relationship between thermal infrared and visible imagery, to estimate 3D face with visible texture from infrared imagery. The relationship between visible and thermal infrared texture is learned using kernel canonical correlation analysis(KCCA), and then a 3D modeler is used to estimate the geometric structure from predicted visual imagery. This research will find it's application in uncontrolled environments where illumination and pose invariant identification or tracking is required at long range such as urban search and rescue (Amber alert, missing dementia patient), and manhunt scenarios. 12. 3D volumetric radar using 94-GHz millimeter waves Takács, Barnabás 2006-05-01 This article describes a novel approach to the real-time visualization of 3D imagery obtained from a 3D millimeter wave scanning radar. The MMW radar system employs a spinning antenna to generate a fan-shaped scanning pattern of the entire scene. The beams formed this way provide all weather 3D distance measurements (range/azimuth display) of objects as they appear on the ground. The beam width of the antenna and its side lobes are optimized to produce the best possible resolution even at distances of up to 15 Kms. To create a full 3D data set the fan-pattern is tilted up and down with the help of a controlled stepper motor. For our experiments we collected data at 0.1 degrees increments while using both bi-static as well as a mono-static antennas in our arrangement. The data collected formed a stack of range-azimuth images in the shape of a cone. This information is displayed using our high-end 3D visualization engine capable of displaying high-resolution volumetric models with 30 frames per second. The resulting 3D scenes can then be viewed from any angle and subsequently processed to integrate, fuse or match them against real-life sensor imagery or 3D model data stored in a synthetic database. 13. Visualization of 3D Geological Models on Google Earth Choi, Y.; Um, J.; Park, M. 2013-05-01 14. Imagery Integration Team NASA Technical Reports Server (NTRS) Calhoun, Tracy; Melendrez, Dave 2014-01-01 -of-a-kind imagery assets and skill sets, such as ground-based fixed and tracking cameras, crew-in the-loop imaging applications, and the integration of custom or commercial-off-the-shelf sensors onboard spacecraft. For spaceflight applications, the Integration 2 Team leverages modeling, analytical, and scientific resources along with decades of experience and lessons learned to assist the customer in optimizing engineering imagery acquisition and management schemes for any phase of flight - launch, ascent, on-orbit, descent, and landing. The Integration 2 Team guides the customer in using NASA's world-class imagery analysis teams, which specialize in overcoming inherent challenges associated with spaceflight imagery sets. Precision motion tracking, two-dimensional (2D) and three-dimensional (3D) photogrammetry, image stabilization, 3D modeling of imagery data, lighting assessment, and vehicle fiducial marking assessments are available. During a mission or test, the Integration 2 Team provides oversight of imagery operations to verify fulfillment of imagery requirements. The team oversees the collection, screening, and analysis of imagery to build a set of imagery findings. It integrates and corroborates the imagery findings with other mission data sets, generating executive summaries to support time-critical mission decisions. NASA Technical Reports Server (NTRS) 2000-01-01 16. Conformal 3D visualization for virtual colonoscopy Haker, Steven; Angenent, Sigurd; Tannenbaum, Allen R.; Kikinis, Ron 2000-04-01 In this paper, we propose a new 3D visualization technique for virtual colonoscopy. Such visualization methods could have a major impact since they have the potential for non-invasively determining the presence of polyps and other pathologies. We moreover demonstrate a method which presents a surface scan of the entire colon as a cine, and affords the viewer the opportunity to examine each point on the surface without distortion. We use the theory of conformal mappings from differential geometry in order to derive an explicit method for flattening surfaces obtained from 3D colon computerized tomography (CT) imagery. Indeed, we describe a general finite element method based on a discretization of the Laplace- Beltrami operator for flattening a surface onto the plane in an angle preserving manner. We also provide simple formulas which may be used in a real time cine to correct for distortion. We apply our method to 3D colon CT data provided to us by the Surgical Planning Laboratory of Brigham and Women's Hospital. We show how the conformal nature of the flattening function provides a flattened representation of the colon which is similar in appearance to the original. Finally, we indicate a few frames of a distortion correcting cine which can be used to examine the entire colon surface. 17. A 3D Cloud-Construction Algorithm for the EarthCARE Satellite Mission NASA Technical Reports Server (NTRS) Barker, H. W.; Jerg, M. P.; Wehr, T.; Kato, S.; Donovan, D. P.; Hogan, R. J. 2011-01-01 This article presents and assesses an algorithm that constructs 3D distributions of cloud from passive satellite imagery and collocated 2D nadir profiles of cloud properties inferred synergistically from lidar, cloud radar and imager data. 18. A 3D Cloud-Construction Algorithm for the EarthCARE Satellite Mission NASA Technical Reports Server (NTRS) Barker, H. W.; Jerg, M. P.; Wehr, T.; Kato, S.; Donovan, D. P.; Hogan, R. J. 2011-01-01 This article presents and assesses an algorithm that constructs 3D distributions of cloud from passive satellite imagery and collocated 2D nadir profiles of cloud properties inferred synergistically from lidar, cloud radar and imager data. 19. 3D Spectroscopy in Astronomy Mediavilla, Evencio; Arribas, Santiago; Roth, Martin; Cepa-Nogué, Jordi; Sánchez, Francisco 2011-09-01 Preface; Acknowledgements; 1. Introductory review and technical approaches Martin M. Roth; 2. Observational procedures and data reduction James E. H. Turner; 3. 3D Spectroscopy instrumentation M. A. Bershady; 4. Analysis of 3D data Pierre Ferruit; 5. Science motivation for IFS and galactic studies F. Eisenhauer; 6. Extragalactic studies and future IFS science Luis Colina; 7. Tutorials: how to handle 3D spectroscopy data Sebastian F. Sánchez, Begona García-Lorenzo and Arlette Pécontal-Rousset. 20. Spherical 3D isotropic wavelets Lanusse, F.; Rassat, A.; Starck, J.-L. 2012-04-01 Context. Future cosmological surveys will provide 3D large scale structure maps with large sky coverage, for which a 3D spherical Fourier-Bessel (SFB) analysis in spherical coordinates is natural. Wavelets are particularly well-suited to the analysis and denoising of cosmological data, but a spherical 3D isotropic wavelet transform does not currently exist to analyse spherical 3D data. Aims: The aim of this paper is to present a new formalism for a spherical 3D isotropic wavelet, i.e. one based on the SFB decomposition of a 3D field and accompany the formalism with a public code to perform wavelet transforms. Methods: We describe a new 3D isotropic spherical wavelet decomposition based on the undecimated wavelet transform (UWT) described in Starck et al. (2006). We also present a new fast discrete spherical Fourier-Bessel transform (DSFBT) based on both a discrete Bessel transform and the HEALPIX angular pixelisation scheme. We test the 3D wavelet transform and as a toy-application, apply a denoising algorithm in wavelet space to the Virgo large box cosmological simulations and find we can successfully remove noise without much loss to the large scale structure. Results: We have described a new spherical 3D isotropic wavelet transform, ideally suited to analyse and denoise future 3D spherical cosmological surveys, which uses a novel DSFBT. We illustrate its potential use for denoising using a toy model. All the algorithms presented in this paper are available for download as a public code called MRS3D at http://jstarck.free.fr/mrs3d.html 1. 3D Elevation Program—Virtual USA in 3D USGS Publications Warehouse Lukas, Vicki; Stoker, J.M. 2016-04-14 The U.S. Geological Survey (USGS) 3D Elevation Program (3DEP) uses a laser system called ‘lidar’ (light detection and ranging) to create a virtual reality map of the Nation that is very accurate. 3D maps have many uses with new uses being discovered all the time. 2. Case study: The Avengers 3D: cinematic techniques and digitally created 3D Clark, Graham D. 2013-03-01 Marvel's THE AVENGERS was the third film Stereo D collaborated on with Marvel; it was a summation of our artistic development of what Digitally Created 3D and Stereo D's artists and toolsets affords Marvel's filmmakers; the ability to shape stereographic space to support the film and story, in a way that balances human perception and live photography. We took our artistic lead from the cinematic intentions of Marvel, the Director Joss Whedon, and Director of Photography Seamus McGarvey. In the digital creation of a 3D film from a 2D image capture, recommendations to the filmmakers cinematic techniques are offered by Stereo D at each step from pre-production onwards, through set, into post. As the footage arrives at our facility we respond in depth to the cinematic qualities of the imagery in context of the edit and story, with the guidance of the Directors and Studio, creating stereoscopic imagery. Our involvement in The Avengers was early in production, after reading the script we had the opportunity and honor to meet and work with the Director Joss Whedon, and DP Seamus McGarvey on set, and into post. We presented what is obvious to such great filmmakers in the ways of cinematic techniques as they related to the standard depth cues and story points we would use to evaluate depth for their film. Our hope was any cinematic habits that supported better 3D would be emphasized. In searching for a 3D statement for the studio and filmmakers we arrived at a stereographic style that allowed for comfort and maximum visual engagement to the viewer. 3. Ground target detection based on discrete cosine transform and Rényi entropy for imaging ladar Xu, Yuannan; Chen, Weili; Li, Junwei; Dong, Yanbing 2016-01-01 The discrete cosine transform (DCT) due to its excellent properties that the images can be represented in spatial/spatial-frequency domains, has been applied in sequence data analysis and image fusion. For intensity and range images of ladar, through the DCT using one dimension window, the statistical property of Rényi entropy for images is studied. We also analyzed the change of Rényi entropy's statistical property in the ladar intensity and range images when the man-made objects appear. From this foundation, a novel method for generating saliency map based on DCT and Rényi entropy is proposed. After that, ground target detection is completed when the saliency map is segmented using a simple and convenient threshold method. For the ladar intensity and range images, experimental results show the proposed method can effectively detect the military vehicles from complex earth background with low false alarm. 4. World Wind 3D Earth Viewing NASA Technical Reports Server (NTRS) Hogan, Patrick; Maxwell, Christopher; Kim, Randolph; Gaskins, Tom 2007-01-01 World Wind allows users to zoom from satellite altitude down to any place on Earth, leveraging high-resolution LandSat imagery and SRTM (Shuttle Radar Topography Mission) elevation data to experience Earth in visually rich 3D. In addition to Earth, World Wind can also visualize other planets, and there are already comprehensive data sets for Mars and the Earth's moon, which are as easily accessible as those of Earth. There have been more than 20 million downloads to date, and the software is being used heavily by the Department of Defense due to the code s ability to be extended and the evolution of the code courtesy of NASA and the user community. Primary features include the dynamic access to public domain imagery and its ease of use. All one needs to control World Wind is a two-button mouse. Additional guides and features can be accessed through a simplified menu. A JAVA version will be available soon. Navigation is automated with single clicks of a mouse, or by typing in any location to automatically zoom in to see it. The World Wind install package contains the necessary requirements such as the .NET runtime and managed DirectX library. World Wind can display combinations of data from a variety of sources, including Blue Marble, LandSat 7, SRTM, NASA Scientific Visualization Studio, GLOBE, and much more. A thorough list of features, the user manual, a key chart, and screen shots are available at http://worldwind.arc.nasa.gov. 5. Acquisition algorithm for direct-detection ladars with Geiger-mode avalanche photodiodes. PubMed Milstein, Adam B; Jiang, Leaf A; Luu, Jane X; Hines, Eric L; Schultz, Kenneth I 2008-01-10 An optimal algorithm for detecting a target using a ladar system employing Geiger-mode avalanche photodiodes (GAPDs) is presented. The algorithm applies to any scenario where a ranging direct detection ladar is used to determine the presence of a target against a sky background within a specified range window. A complete statistical model of the detection process for GAPDs is presented, including GAPDs that are inactive for a fixed period of time each time they fire. The model is used to develop a constant false alarm rate detection algorithm that minimizes acquisition time. Numerical performance predictions, simulation results, and experimental results are presented. Zhang, Shuan; Liu, Hongjun; Huang, Nan; Wang, Zhaolu; Han, Jing 2017-07-01 The phase-sensitive amplification (PSA) with an injected squeezed vacuum field is theoretically investigated in quantum-enhanced laser detection and ranging (LADAR) receiver. The theoretical model of the amplified process is derived to investigate the quantum fluctuations in detail. A new method of mitigating the unflat gain of nonideal PSA is proposed by adjusting the squeezed angle of the squeezed vacuum field. The simulation results indicate that signal-noise ratio (SNR) of system can be efficiently improved and close to the ideal case by this method. This research will provide an important potential in the applications of quantum-enhanced LADAR receiver. 7. The Enhanced-model Ladar Wind Sensor and Its Application in Planetary Wind Velocity Measurements NASA Technical Reports Server (NTRS) Soreide, D. C.; Mcgann, R. L.; Erwin, L. L.; Morris, D. J. 1993-01-01 For several years we have been developing an optical air-speed sensor that has a clear application as a meteorological wind-speed sensor for the Mars landers. This sensor has been developed for aircraft use to replace the familiar, pressure-based Pitot probe. Our approach utilizes a new concept in the laser-based optical measurement of air velocity (the Enhanced-Mode Ladar), which allows us to make velocity measurements with significantly lower laser power than conventional methods. The application of the Enhanced-Mode Ladar to measuring wind speeds in the martian atmosphere is discussed. 8. Perception of 3D spatial relations for 3D displays Rosen, Paul; Pizlo, Zygmunt; Hoffmann, Christoph; Popescu, Voicu S. 2004-05-01 We test perception of 3D spatial relations in 3D images rendered by a 3D display (Perspecta from Actuality Systems) and compare it to that of a high-resolution flat panel display. 3D images provide the observer with such depth cues as motion parallax and binocular disparity. Our 3D display is a device that renders a 3D image by displaying, in rapid succession, radial slices through the scene on a rotating screen. The image is contained in a glass globe and can be viewed from virtually any direction. In the psychophysical experiment several families of 3D objects are used as stimuli: primitive shapes (cylinders and cuboids), and complex objects (multi-story buildings, cars, and pieces of furniture). Each object has at least one plane of symmetry. On each trial an object or its "distorted" version is shown at an arbitrary orientation. The distortion is produced by stretching an object in a random direction by 40%. This distortion must eliminate the symmetry of an object. The subject's task is to decide whether or not the presented object is distorted under several viewing conditions (monocular/binocular, with/without motion parallax, and near/far). The subject's performance is measured by the discriminability d', which is a conventional dependent variable in signal detection experiments. 9. LLNL-Earth3D SciTech Connect 2013-10-01 Earth3D is a computer code designed to allow fast calculation of seismic rays and travel times through a 3D model of the Earth. LLNL is using this for earthquake location and global tomography efforts and such codes are of great interest to the Earth Science community. 10. 3D World Building System ScienceCinema None 2016-07-12 This video provides an overview of the Sandia National Laboratories developed 3-D World Model Building capability that provides users with an immersive, texture rich 3-D model of their environment in minutes using a laptop and color and depth camera. 11. Market study: 3-D eyetracker NASA Technical Reports Server (NTRS) 1977-01-01 A market study of a proposed version of a 3-D eyetracker for initial use at NASA's Ames Research Center was made. The commercialization potential of a simplified, less expensive 3-D eyetracker was ascertained. Primary focus on present and potential users of eyetrackers, as well as present and potential manufacturers has provided an effective means of analyzing the prospects for commercialization. 12. 3D Buckligami: Digital Matter van Hecke, Martin; de Reus, Koen; Florijn, Bastiaan; Coulais, Corentin 2014-03-01 We present a class of elastic structures which exhibit collective buckling in 3D, and create these by a 3D printing/moulding technique. Our structures consist of cubic lattice of anisotropic unit cells, and we show that their mechanical properties are programmable via the orientation of these unit cells. 13. 3D World Building System SciTech Connect 2013-10-30 This video provides an overview of the Sandia National Laboratories developed 3-D World Model Building capability that provides users with an immersive, texture rich 3-D model of their environment in minutes using a laptop and color and depth camera. 14. Euro3D Science Conference Walsh, J. R. 2004-02-01 The Euro3D RTN is an EU funded Research Training Network to foster the exploitation of 3D spectroscopy in Europe. 3D spectroscopy is a general term for spectroscopy of an area of the sky and derives its name from its two spatial + one spectral dimensions. There are an increasing number of instruments which use integral field devices to achieve spectroscopy of an area of the sky, either using lens arrays, optical fibres or image slicers, to pack spectra of multiple pixels on the sky (spaxels'') onto a 2D detector. On account of the large volume of data and the special methods required to reduce and analyse 3D data, there are only a few centres of expertise and these are mostly involved with instrument developments. There is a perceived lack of expertise in 3D spectroscopy spread though the astronomical community and its use in the armoury of the observational astronomer is viewed as being highly specialised. For precisely this reason the Euro3D RTN was proposed to train young researchers in this area and develop user tools to widen the experience with this particular type of data in Europe. The Euro3D RTN is coordinated by Martin M. Roth (Astrophysikalisches Institut Potsdam) and has been running since July 2002. The first Euro3D science conference was held in Cambridge, UK from 22 to 23 May 2003. The main emphasis of the conference was, in keeping with the RTN, to expose the work of the young post-docs who are funded by the RTN. In addition the team members from the eleven European institutes involved in Euro3D also presented instrumental and observational developments. The conference was organized by Andy Bunker and held at the Institute of Astronomy. There were over thirty participants and 26 talks covered the whole range of application of 3D techniques. The science ranged from Galactic planetary nebulae and globular clusters to kinematics of nearby galaxies out to objects at high redshift. Several talks were devoted to reporting recent observations with newly 15. 3D printing in dentistry. PubMed Dawood, A; Marti Marti, B; Sauret-Jackson, V; Darwood, A 2015-12-01 3D printing has been hailed as a disruptive technology which will change manufacturing. Used in aerospace, defence, art and design, 3D printing is becoming a subject of great interest in surgery. The technology has a particular resonance with dentistry, and with advances in 3D imaging and modelling technologies such as cone beam computed tomography and intraoral scanning, and with the relatively long history of the use of CAD CAM technologies in dentistry, it will become of increasing importance. Uses of 3D printing include the production of drill guides for dental implants, the production of physical models for prosthodontics, orthodontics and surgery, the manufacture of dental, craniomaxillofacial and orthopaedic implants, and the fabrication of copings and frameworks for implant and dental restorations. This paper reviews the types of 3D printing technologies available and their various applications in dentistry and in maxillofacial surgery. 16. 3D vision system assessment Pezzaniti, J. Larry; Edmondson, Richard; Vaden, Justin; Hyatt, Bryan; Chenault, David B.; Kingston, David; Geulen, Vanilynmae; Newell, Scott; Pettijohn, Brad 2009-02-01 In this paper, we report on the development of a 3D vision system consisting of a flat panel stereoscopic display and auto-converging stereo camera and an assessment of the system's use for robotic driving, manipulation, and surveillance operations. The 3D vision system was integrated onto a Talon Robot and Operator Control Unit (OCU) such that direct comparisons of the performance of a number of test subjects using 2D and 3D vision systems were possible. A number of representative scenarios were developed to determine which tasks benefited most from the added depth perception and to understand when the 3D vision system hindered understanding of the scene. Two tests were conducted at Fort Leonard Wood, MO with noncommissioned officers ranked Staff Sergeant and Sergeant First Class. The scenarios; the test planning, approach and protocols; the data analysis; and the resulting performance assessment of the 3D vision system are reported. 17. PLOT3D user's manual NASA Technical Reports Server (NTRS) Walatka, Pamela P.; Buning, Pieter G.; Pierce, Larry; Elson, Patricia A. 1990-01-01 PLOT3D is a computer graphics program designed to visualize the grids and solutions of computational fluid dynamics. Seventy-four functions are available. Versions are available for many systems. PLOT3D can handle multiple grids with a million or more grid points, and can produce varieties of model renderings, such as wireframe or flat shaded. Output from PLOT3D can be used in animation programs. The first part of this manual is a tutorial that takes the reader, keystroke by keystroke, through a PLOT3D session. The second part of the manual contains reference chapters, including the helpfile, data file formats, advice on changing PLOT3D, and sample command files. 18. Optical antenna of telescope for synthetic aperture ladar Liu, Liren 2008-08-01 For synthetic aperture ladar (SAL) imaging, there are difficulties in the space domain because the size of optical antenna of telescope is up to six orders of magnitude larger than the wavelength. In this paper, we suggest a defocused and spatial phase masked telescope for reception to compensate the diffraction aberration from the target to match the directivity of heterodyne detection, a defocused and phase masked transmission telescope to send out a wavefront with an additional and controllable spatial quadratic phase to the phase history, and a circulated duplex to compensate the aberration for reception and to produce spatial phase bias for transmission concurrently in the same telescope. On this basis, the point target radar equation in a full space and time treatment is achieved. Correspondingly, the complete collection equations of 2-D data acquired in the range and azimuth directions for 2-D SAL imaging of the strip-map mode and the spotlight mode are developed. Then the imaging azimuth and range resolutions are redefined in terms of the idea of optical imaging by a lens, and the requirement for azimuth sampling is given. The paper systemically presents the all details. 19. Imaging signal-to-noise ratio of synthetic aperture ladar Liu, Liren 2015-09-01 On the basis of the Poisson photocurrent statistics in the photon-limited heterodyne detection, in this paper, the signal-to-noise ratios in the receiver in the time domain and on the focused 1-D image and 2-D image in the space domain are derived for both the down-looking and side-looking synthetic aperture imaging ladars using PIN or APD photodiodes. The major shot noises in the down-looking SAIL and the side-looking SAIL are, respectively, from the dark current of photodiode and the local beam current. It is found that the ratio of 1-D image SNR to receiver SNR is proportional to the number of resolution elements in the cross direction of travel and the ratio of 2-D image SNR to 1-D image SNR is proportional to the number of resolution elements in the travel direction. And the sensitivity, the effect of Fourier transform of sampled signal, and the influence of time response of detection circuit are discussed, too. The study will help to correctly design a SAIL system. 20. Antenna aperture and imaging resolution of synthetic aperture imaging ladar Liu, Liren 2009-08-01 In this paper, the azimuth imaging resolutions of synthetic aperture imaging ladar (SAIL) using the antenna telescopes with a circular aperture for reception and a circular plan or a Gaussian beam for transmitting and with a rectangular aperture for reception and a rectangular plane or an elliptic Gaussian beam for transmitting are investigated. The analytic expressions of impulse response for imaging are achieved. The ideal azimuth spot of resolution and its degradation due to the target deviation from the footprint center, the mismatch from the quadratic phase matched filtering, the finite sampling rate and width are discussed. And the range resolution is also studied. Mathematical criteria are all given. As a conclusion, the telescope of rectangular aperture can provide a rectangular footprint more suitable for the SAIL scanning format, and an optimal design of aperture is thus possible for both a high resolution and a wide scan strip. Moreover, an explanation to the resulted azimuth resolution from our laboratory-scaled SAIL is given to verify the developed theory. 1. Experimental demonstration of tri-aperture Differential Synthetic Aperture Ladar Zhao, Zhilong; Huang, Jianyu; Wu, Shudong; Wang, Kunpeng; Bai, Tao; Dai, Ze; Kong, Xinyi; Wu, Jin 2017-04-01 A tri-aperture Differential Synthetic Aperture Ladar (DSAL) is demonstrated in laboratory, which is configured by using one common aperture to transmit the illuminating laser and another two along-track receiving apertures to collect back-scattered laser signal for optical heterodyne detection. The image formation theory on this tri-aperture DSAL shows that there are two possible methods to reconstruct the azimuth Phase History Data (PHD) for aperture synthesis by following standard DSAL principle, either method resulting in a different matched filter as well as an azimuth image resolution. The experimental setup of the tri-aperture DSAL adopts a frequency chirped laser of about 40 mW in 1550 nm wavelength range as the illuminating source and an optical isolator composed of a polarizing beam-splitter and a quarter wave plate to virtually line the three apertures in the along-track direction. Various DSAL images up to target distance of 12.9 m are demonstrated using both PHD reconstructing methods. 2. PLOT3D/AMES, APOLLO UNIX VERSION USING GMR3D (WITH TURB3D) NASA Technical Reports Server (NTRS) Buning, P. 1994-01-01 PLOT3D is an interactive graphics program designed to help scientists visualize computational fluid dynamics (CFD) grids and solutions. Today, supercomputers and CFD algorithms can provide scientists with simulations of such highly complex phenomena that obtaining an understanding of the simulations has become a major problem. Tools which help the scientist visualize the simulations can be of tremendous aid. PLOT3D/AMES offers more functions and features, and has been adapted for more types of computers than any other CFD graphics program. Version 3.6b+ is supported for five computers and graphic libraries. Using PLOT3D, CFD physicists can view their computational models from any angle, observing the physics of problems and the quality of solutions. As an aid in designing aircraft, for example, PLOT3D's interactive computer graphics can show vortices, temperature, reverse flow, pressure, and dozens of other characteristics of air flow during flight. As critical areas become obvious, they can easily be studied more closely using a finer grid. PLOT3D is part of a computational fluid dynamics software cycle. First, a program such as 3DGRAPE (ARC-12620) helps the scientist generate computational grids to model an object and its surrounding space. Once the grids have been designed and parameters such as the angle of attack, Mach number, and Reynolds number have been specified, a "flow-solver" program such as INS3D (ARC-11794 or COS-10019) solves the system of equations governing fluid flow, usually on a supercomputer. Grids sometimes have as many as two million points, and the "flow-solver" produces a solution file which contains density, x- y- and z-momentum, and stagnation energy for each grid point. With such a solution file and a grid file containing up to 50 grids as input, PLOT3D can calculate and graphically display any one of 74 functions, including shock waves, surface pressure, velocity vectors, and particle traces. PLOT3D's 74 functions are organized into 3. PLOT3D/AMES, APOLLO UNIX VERSION USING GMR3D (WITHOUT TURB3D) NASA Technical Reports Server (NTRS) Buning, P. 1994-01-01 PLOT3D is an interactive graphics program designed to help scientists visualize computational fluid dynamics (CFD) grids and solutions. Today, supercomputers and CFD algorithms can provide scientists with simulations of such highly complex phenomena that obtaining an understanding of the simulations has become a major problem. Tools which help the scientist visualize the simulations can be of tremendous aid. PLOT3D/AMES offers more functions and features, and has been adapted for more types of computers than any other CFD graphics program. Version 3.6b+ is supported for five computers and graphic libraries. Using PLOT3D, CFD physicists can view their computational models from any angle, observing the physics of problems and the quality of solutions. As an aid in designing aircraft, for example, PLOT3D's interactive computer graphics can show vortices, temperature, reverse flow, pressure, and dozens of other characteristics of air flow during flight. As critical areas become obvious, they can easily be studied more closely using a finer grid. PLOT3D is part of a computational fluid dynamics software cycle. First, a program such as 3DGRAPE (ARC-12620) helps the scientist generate computational grids to model an object and its surrounding space. Once the grids have been designed and parameters such as the angle of attack, Mach number, and Reynolds number have been specified, a "flow-solver" program such as INS3D (ARC-11794 or COS-10019) solves the system of equations governing fluid flow, usually on a supercomputer. Grids sometimes have as many as two million points, and the "flow-solver" produces a solution file which contains density, x- y- and z-momentum, and stagnation energy for each grid point. With such a solution file and a grid file containing up to 50 grids as input, PLOT3D can calculate and graphically display any one of 74 functions, including shock waves, surface pressure, velocity vectors, and particle traces. PLOT3D's 74 functions are organized into 4. PLOT3D/AMES, APOLLO UNIX VERSION USING GMR3D (WITHOUT TURB3D) NASA Technical Reports Server (NTRS) Buning, P. 1994-01-01 PLOT3D is an interactive graphics program designed to help scientists visualize computational fluid dynamics (CFD) grids and solutions. Today, supercomputers and CFD algorithms can provide scientists with simulations of such highly complex phenomena that obtaining an understanding of the simulations has become a major problem. Tools which help the scientist visualize the simulations can be of tremendous aid. PLOT3D/AMES offers more functions and features, and has been adapted for more types of computers than any other CFD graphics program. Version 3.6b+ is supported for five computers and graphic libraries. Using PLOT3D, CFD physicists can view their computational models from any angle, observing the physics of problems and the quality of solutions. As an aid in designing aircraft, for example, PLOT3D's interactive computer graphics can show vortices, temperature, reverse flow, pressure, and dozens of other characteristics of air flow during flight. As critical areas become obvious, they can easily be studied more closely using a finer grid. PLOT3D is part of a computational fluid dynamics software cycle. First, a program such as 3DGRAPE (ARC-12620) helps the scientist generate computational grids to model an object and its surrounding space. Once the grids have been designed and parameters such as the angle of attack, Mach number, and Reynolds number have been specified, a "flow-solver" program such as INS3D (ARC-11794 or COS-10019) solves the system of equations governing fluid flow, usually on a supercomputer. Grids sometimes have as many as two million points, and the "flow-solver" produces a solution file which contains density, x- y- and z-momentum, and stagnation energy for each grid point. With such a solution file and a grid file containing up to 50 grids as input, PLOT3D can calculate and graphically display any one of 74 functions, including shock waves, surface pressure, velocity vectors, and particle traces. PLOT3D's 74 functions are organized into 5. PLOT3D/AMES, APOLLO UNIX VERSION USING GMR3D (WITH TURB3D) NASA Technical Reports Server (NTRS) Buning, P. 1994-01-01 PLOT3D is an interactive graphics program designed to help scientists visualize computational fluid dynamics (CFD) grids and solutions. Today, supercomputers and CFD algorithms can provide scientists with simulations of such highly complex phenomena that obtaining an understanding of the simulations has become a major problem. Tools which help the scientist visualize the simulations can be of tremendous aid. PLOT3D/AMES offers more functions and features, and has been adapted for more types of computers than any other CFD graphics program. Version 3.6b+ is supported for five computers and graphic libraries. Using PLOT3D, CFD physicists can view their computational models from any angle, observing the physics of problems and the quality of solutions. As an aid in designing aircraft, for example, PLOT3D's interactive computer graphics can show vortices, temperature, reverse flow, pressure, and dozens of other characteristics of air flow during flight. As critical areas become obvious, they can easily be studied more closely using a finer grid. PLOT3D is part of a computational fluid dynamics software cycle. First, a program such as 3DGRAPE (ARC-12620) helps the scientist generate computational grids to model an object and its surrounding space. Once the grids have been designed and parameters such as the angle of attack, Mach number, and Reynolds number have been specified, a "flow-solver" program such as INS3D (ARC-11794 or COS-10019) solves the system of equations governing fluid flow, usually on a supercomputer. Grids sometimes have as many as two million points, and the "flow-solver" produces a solution file which contains density, x- y- and z-momentum, and stagnation energy for each grid point. With such a solution file and a grid file containing up to 50 grids as input, PLOT3D can calculate and graphically display any one of 74 functions, including shock waves, surface pressure, velocity vectors, and particle traces. PLOT3D's 74 functions are organized into 6. 3D Geomodeling of the Venezuelan Andes Monod, B.; Dhont, D.; Hervouet, Y.; Backé, G.; Klarica, S.; Choy, J. E. 2010-12-01 The crustal structure of the Venezuelan Andes is investigated thanks to a geomodel. The method integrates surface structural data, remote sensing imagery, crustal scale balanced cross-sections, earthquake locations and focal mechanism solutions to reconstruct fault surfaces at the scale of the mountain belt into a 3D environment. The model proves to be essential for understanding the basic processes of both the orogenic float and the tectonic escape involved in the Plio-Quaternary evolution of the orogen. The reconstruction of the Bocono and Valera faults reveals the 3D shape of the Trujillo block whose geometry can be compared to a boat bow floating over a mid-crustal detachment horizon emerging at the Bocono-Valera triple junction. Motion of the Trujillo block is accompanied by a generalized extension in the upper crust accommodated by normal faults with listric geometries such as for the Motatan, Momboy and Tuñame faults. Extension may be related to the lateral spreading of the upper crust, suggesting that gravity forces play an important role in the escape process. 7. Unassisted 3D camera calibration Atanassov, Kalin; Ramachandra, Vikas; Nash, James; Goma, Sergio R. 2012-03-01 With the rapid growth of 3D technology, 3D image capture has become a critical part of the 3D feature set on mobile phones. 3D image quality is affected by the scene geometry as well as on-the-device processing. An automatic 3D system usually assumes known camera poses accomplished by factory calibration using a special chart. In real life settings, pose parameters estimated by factory calibration can be negatively impacted by movements of the lens barrel due to shaking, focusing, or camera drop. If any of these factors displaces the optical axes of either or both cameras, vertical disparity might exceed the maximum tolerable margin and the 3D user may experience eye strain or headaches. To make 3D capture more practical, one needs to consider unassisted (on arbitrary scenes) calibration. In this paper, we propose an algorithm that relies on detection and matching of keypoints between left and right images. Frames containing erroneous matches, along with frames with insufficiently rich keypoint constellations, are detected and discarded. Roll, pitch yaw , and scale differences between left and right frames are then estimated. The algorithm performance is evaluated in terms of the remaining vertical disparity as compared to the maximum tolerable vertical disparity. 8. Bioprinting of 3D hydrogels. PubMed Stanton, M M; Samitier, J; Sánchez, S 2015-08-07 Three-dimensional (3D) bioprinting has recently emerged as an extension of 3D material printing, by using biocompatible or cellular components to build structures in an additive, layer-by-layer methodology for encapsulation and culture of cells. These 3D systems allow for cell culture in a suspension for formation of highly organized tissue or controlled spatial orientation of cell environments. The in vitro 3D cellular environments simulate the complexity of an in vivo environment and natural extracellular matrices (ECM). This paper will focus on bioprinting utilizing hydrogels as 3D scaffolds. Hydrogels are advantageous for cell culture as they are highly permeable to cell culture media, nutrients, and waste products generated during metabolic cell processes. They have the ability to be fabricated in customized shapes with various material properties with dimensions at the micron scale. 3D hydrogels are a reliable method for biocompatible 3D printing and have applications in tissue engineering, drug screening, and organ on a chip models. 9. 3D Scan Systems Integration DTIC Science & Technology 2007-11-02 AGENCY USE ONLY (Leave Blank) 2. REPORT DATE 5 Feb 98 4. TITLE AND SUBTITLE 3D Scan Systems Integration REPORT TYPE AND DATES COVERED...2-89) Prescribed by ANSI Std. Z39-1 298-102 [ EDO QUALITY W3PECTEDI DLA-ARN Final Report for US Defense Logistics Agency on DDFG-T2/P3: 3D...SCAN SYSTEMS INTEGRATION Contract Number SPO100-95-D-1014 Contractor Ohio University Delivery Order # 0001 Delivery Order Title 3D Scan Systems 10. 3D sensor algorithms for spacecraft pose determination Trenkle, John M.; Tchoryk, Peter, Jr.; Ritter, Greg A.; Pavlich, Jane C.; Hickerson, Aaron S. 2006-05-01 Researchers at the Michigan Aerospace Corporation have developed accurate and robust 3-D algorithms for pose determination (position and orientation) of satellites as part of an on-going effort supporting autonomous rendezvous, docking and space situational awareness activities. 3-D range data from a LAser Detection And Ranging (LADAR) sensor is the expected input; however, the approach is unique in that the algorithms are designed to be sensor independent. Parameterized inputs allow the algorithms to be readily adapted to any sensor of opportunity. The cornerstone of our approach is the ability to simulate realistic range data that may be tailored to the specifications of any sensor. We were able to modify an open-source raytracing package to produce point cloud information from which high-fidelity simulated range images are generated. The assumptions made in our experimentation are as follows: 1) we have access to a CAD model of the target including information about the surface scattering and reflection characteristics of the components; 2) the satellite of interest may appear at any 3-D attitude; 3) the target is not necessarily rigid, but does have a limited number of configurations; and, 4) the target is not obscured in any way and is the only object in the field of view of the sensor. Our pose estimation approach then involves rendering a large number of exemplars (100k to 5M), extracting 2-D (silhouette- and projection-based) and 3-D (surface-based) features, and then training ensembles of decision trees to predict: a) the 4-D regions on a unit hypersphere into which the unit quaternion that represents the vehicle [Q X, Q Y, Q Z, Q W] is pointing, and, b) the components of that unit quaternion. Results have been quite promising and the tools and simulation environment developed for this application may also be applied to non-cooperative spacecraft operations, Autonomous Hazard Detection and Avoidance (AHDA) for landing craft, terrain mapping, vehicle 11. The 3D laser radar vision processor system NASA Technical Reports Server (NTRS) Sebok, T. M. 1990-01-01 Loral Defense Systems (LDS) developed a 3D Laser Radar Vision Processor system capable of detecting, classifying, and identifying small mobile targets as well as larger fixed targets using three dimensional laser radar imagery for use with a robotic type system. This processor system is designed to interface with the NASA Johnson Space Center in-house Extra Vehicular Activity (EVA) Retriever robot program and provide to it needed information so it can fetch and grasp targets in a space-type scenario. 12. ASI/MET - 3-D NASA Image and Video Library 1997-07-13 The Atmospheric Structure Instrument/Meteorology Package ASI/MET is the mast and windsocks at the center of this stereo image from NASA Mars Pathfinder. 3D glasses are necessary to identify surface detail. 13. 3D Models of Immunotherapy Cancer.gov This collaborative grant is developing 3D models of both mouse and human biology to investigate aspects of therapeutic vaccination in order to answer key questions relevant to human cancer immunotherapy. 14. 3D polymer scaffold arrays. PubMed Simon, Carl G; Yang, Yanyin; Dorsey, Shauna M; Ramalingam, Murugan; Chatterjee, Kaushik 2011-01-01 We have developed a combinatorial platform for fabricating tissue scaffold arrays that can be used for screening cell-material interactions. Traditional research involves preparing samples one at a time for characterization and testing. Combinatorial and high-throughput (CHT) methods lower the cost of research by reducing the amount of time and material required for experiments by combining many samples into miniaturized specimens. In order to help accelerate biomaterials research, many new CHT methods have been developed for screening cell-material interactions where materials are presented to cells as a 2D film or surface. However, biomaterials are frequently used to fabricate 3D scaffolds, cells exist in vivo in a 3D environment and cells cultured in a 3D environment in vitro typically behave more physiologically than those cultured on a 2D surface. Thus, we have developed a platform for fabricating tissue scaffold libraries where biomaterials can be presented to cells in a 3D format. 15. Accepting the T3D SciTech Connect Rich, D.O.; Pope, S.C.; DeLapp, J.G. 1994-10-01 In April, a 128 PE Cray T3D was installed at Los Alamos National Laboratorys Advanced Computing Laboratory as part of the DOEs High-Performance Parallel Processor Program (H4P). In conjunction with CRI, the authors implemented a 30 day acceptance test. The test was constructed in part to help them understand the strengths and weaknesses of the T3D. In this paper, they briefly describe the H4P and its goals. They discuss the design and implementation of the T3D acceptance test and detail issues that arose during the test. They conclude with a set of system requirements that must be addressed as the T3D system evolves. 16. [Tridimensional (3D) endoscopic ultrasonography]. PubMed Varas Lorenzo, M J; Muñoz Agel, F; Abad Belando, R 2007-01-01 A review and update on 3D endoscopic ultrasonography is included regarding all of this technique s aspects, technical details, and current indications. Images from our own clinical experience are presented. 17. Heterodyne 3D ghost imaging Yang, Xu; Zhang, Yong; Yang, Chenghua; Xu, Lu; Wang, Qiang; Zhao, Yuan 2016-06-01 Conventional three dimensional (3D) ghost imaging measures range of target based on pulse fight time measurement method. Due to the limit of data acquisition system sampling rate, range resolution of the conventional 3D ghost imaging is usually low. In order to take off the effect of sampling rate to range resolution of 3D ghost imaging, a heterodyne 3D ghost imaging (HGI) system is presented in this study. The source of HGI is a continuous wave laser instead of pulse laser. Temporal correlation and spatial correlation of light are both utilized to obtain the range image of target. Through theory analysis and numerical simulations, it is demonstrated that HGI can obtain high range resolution image with low sampling rate. 18. Combinatorial 3D Mechanical Metamaterials Coulais, Corentin; Teomy, Eial; de Reus, Koen; Shokef, Yair; van Hecke, Martin 2015-03-01 We present a class of elastic structures which exhibit 3D-folding motion. Our structures consist of cubic lattices of anisotropic unit cells that can be tiled in a complex combinatorial fashion. We design and 3d-print this complex ordered mechanism, in which we combine elastic hinges and defects to tailor the mechanics of the material. Finally, we use this large design space to encode smart functionalities such as surface patterning and multistability. 19. Linear Mode Photon Counting LADAR Camera Development for the Ultra-Sensitive Detector Program Jack, M.; Bailey, S.; Edwards, J.; Burkholder, R.; Liu, K.; Asbrock, J.; Randall, V.; Chapman, G.; Riker, J. 20. Range-Doppler imaging of moving target with chirped AM ladar Liu, Chun-bo; Lu, Fang; Zhao, Yanjie; Han, Xiang'e. 2011-06-01 As the result of the synthetic aperture radar technique applied to laser band, SAIL (synthetic aperture imaging ladar) can provide range-Doppler image of targets with much more high-resolution than its counterpart in microwave band. However, the complicated structure of coherent heterodyne detection and the significant impacts of laser atmospheric effect, target depolarization, phase error arising from platform vibration on heterodyne detection efficiency degrades its performance. In this paper, an APD and semiconductor laser based range-Doppler imaging ladar is presented. The ladar combines the (inverse) synthetic aperture technique and direct detection and can obtain the high-resolution image at a relative low cost and complexity. In the meanwhile, owing to the poor coherence of semiconductor laser, the impact of atmospheric effect on laser pattern can be released to some extent. Firstly, the system diagram is presented and the components are briefly introduced; Secondly, the operation principle and performance are formulated detailedly; and then the parameters determination of the chirped AM waveform is analyzed considering the extraction of the range and velocity and the fine resolution. Finally, the 1-D range and 2-D range-Doppler imaging procedure are numerically simulated based on the given target model, which shows that the proposed imaging ladar is effective and feasible. 1. Research progress on a focal plane array ladar system using chirped amplitude modulation Stann, Barry L.; Abou-Auf, Ahmed; Aliberti, Keith; Dammann, John; Giza, Mark; Dang, Gerard; Ovrebo, Greg; Redman, Brian; Ruff, William; Simon, Deborah 2003-08-01 The Army Research Laboratory is researching a focal plane array (FPA) ladar architecture that is applicable for smart munitions, reconnaissance, face recognition, robotic navigation, etc.. Here we report on progress and test results attained over the past year related to the construction of a 32x32 pixel FPA ladar laboratory breadboard. The near-term objective of this effort is to evaluate and demonstrate an FPA ladar using chirped amplitude modulation; knowledge gained will then be used to build a field testable version with a larger array format. The ladar architecture achieves ranging based on a frequency modulation/continuous wave technique implemented by directly amplitude modulating a near-IR diode laser transmitter with a radio frequency (rf) subcarrier that is linearly frequency modulated (chirped amplitude modulation). The diode's output is collected and projected to form an illumination field in the downrange image area. The returned signal is focused onto an array of optoelectronic mixing, metal-semiconductor-metal detectors where it is detected and mixed with a delayed replica of the laser modulation signal that modulates the responsivity of each detector. The output of each detector is an intermediate frequency (IF) signal resulting from the mixing process whose frequency is proportional to the target range. This IF signal is continuously sampled over a period of the rf modulation. Following this, a signal processor calculates the discrete fast Fourier transform over the IF waveform in each pixel to establish the ranges and amplitudes of all scatterers. Stann, Barry L.; Dammann, John F.; Del Giorno, Mark; DiBerardino, Charles; Giza, Mark M.; Powers, Michael A.; Uzunovic, Nenad 2014-06-01 LADAR is among the pre-eminent sensor modalities for autonomous vehicle navigation. Size, weight, power and cost constraints impose significant practical limitations on perception systems intended for small ground robots. In recent years, the Army Research Laboratory (ARL) developed a LADAR architecture based on a MEMS mirror scanner that fundamentally improves the trade-offs between these limitations and sensor capability. We describe how the characteristics of a highly developed prototype correspond to and satisfy the requirements of autonomous navigation and the experimental scenarios of the ARL Robotics Collaborative Technology Alliance (RCTA) program. In particular, the long maximum and short minimum range capability of the ARL MEMS LADAR makes it remarkably suitable for a wide variety of scenarios from building mapping to the manipulation of objects at close range, including dexterous manipulation with robotic arms. A prototype system was applied to a small (approximately 50 kg) unmanned robotic vehicle as the primary mobility perception sensor. We present the results of a field test where the perception information supplied by the LADAR system successfully accomplished the experimental objectives of an Integrated Research Assessment (IRA). 3. Simulation of laser radar imagery Sheffer, Albert D., Jr.; Thompson, Fred L. 1986-01-01 Software has been developed for the simulation of laser radar range imagery. Two versions have been developed: the first is an idealized model which is noise-free and with zero dropout rate; the second includes both pointing and range noise effects and provides for calculation of probability of detection for each pixel, with dropout occurring for probabilities below threshold, and also allows for user control over a number of other parameters such as scanning convention (unidirectional vs bidirectional), scan efficiency, and trajectory update rates. Each version allows for motion of a LADAR sensor across a terrain database on which faceted objects (targets and clutter) have been placed. For each pixel the program calculates the laser exit beam direction, based upon the combined effects of the sensor sweep pattern and the motion and attitude of the sensor platform. The exit beam is traced for intersection with the terrain or an object. Program output consists of the x, y, z-coordinates of the intersection point and the (real-number) range to that point for each pixel. This output can then be converted to a displayable range image. The software is currently implemented on a VAX 11/750 computer operating under VMS. 4. Investigation of synthetic aperture ladar for land surveillance applications Turbide, Simon; Marchese, Linda; Terroux, Marc; Bergeron, Alain 2013-10-01 Long-range land surveillance is a critical need in numerous military and civilian security applications, such as threat detection, terrain mapping and disaster prevention. A key technology for land surveillance, synthetic aperture radar (SAR) continues to provide high resolution radar images in all weather conditions from remote distances. Recently, Interferometric SAR (InSAR) and Differential Interferometric SAR (D-InSAR) have become powerful tools adding high resolution elevation and change detection measurements. State of the art SAR systems based on dual-use satellites are capable of providing ground resolutions of one meter; while their airborne counterparts obtain resolutions of 10 cm. DInSAR products based on these systems can produce cm-scale vertical resolution image products. Certain land surveillance applications such as land subsidence monitoring, landslide hazard prediction and tactical target tracking could benefit from improved resolution. The ultimate limitation to the achievable resolution of any imaging system is its wavelength. State-of-the art SAR systems are approaching this limit. The natural extension to improve resolution is to thus decrease the wavelength, i.e. design a synthetic aperture system in a different wavelength regime. One such system offering the potential for vastly improved resolution is Synthetic Aperture Ladar (SAL). This system operates at infrared wavelengths, ten thousand times smaller radar wavelengths. This paper discusses an initial investigation into a concept for an airborne SAL specifically aiming at land surveillance. The system would operate at 1.55 μm and would integrate an optronic processor on-board to allow for immediate transmission of the high resolution images to the end-user on the ground. Estimates of the size and weight, as well as the resolution and processing time are given. 5. LASTRAC.3d: Transition Prediction in 3D Boundary Layers NASA Technical Reports Server (NTRS) Chang, Chau-Lyan 2004-01-01 Langley Stability and Transition Analysis Code (LASTRAC) is a general-purpose, physics-based transition prediction code released by NASA for laminar flow control studies and transition research. This paper describes the LASTRAC extension to general three-dimensional (3D) boundary layers such as finite swept wings, cones, or bodies at an angle of attack. The stability problem is formulated by using a body-fitted nonorthogonal curvilinear coordinate system constructed on the body surface. The nonorthogonal coordinate system offers a variety of marching paths and spanwise waveforms. In the extreme case of an infinite swept wing boundary layer, marching with a nonorthogonal coordinate produces identical solutions to those obtained with an orthogonal coordinate system using the earlier release of LASTRAC. Several methods to formulate the 3D parabolized stability equations (PSE) are discussed. A surface-marching procedure akin to that for 3D boundary layer equations may be used to solve the 3D parabolized disturbance equations. On the other hand, the local line-marching PSE method, formulated as an easy extension from its 2D counterpart and capable of handling the spanwise mean flow and disturbance variation, offers an alternative. A linear stability theory or parabolized stability equations based N-factor analysis carried out along the streamline direction with a fixed wavelength and downstream-varying spanwise direction constitutes an efficient engineering approach to study instability wave evolution in a 3D boundary layer. The surface-marching PSE method enables a consistent treatment of the disturbance evolution along both streamwise and spanwise directions but requires more stringent initial conditions. Both PSE methods and the traditional LST approach are implemented in the LASTRAC.3d code. Several test cases for tapered or finite swept wings and cones at an angle of attack are discussed. 6. 3-D threat image projection Yildiz, Yesna O.; Abraham, Douglas Q.; Agaian, Sos; Panetta, Karen 2008-02-01 Automated Explosive Detection Systems utilizing Computed Tomography perform a series X-ray scans of passenger bags being checked in at the airport, and produce various 2-D projection images and 3-D volumetric images of the bag. The determination as to whether the passenger bag contains an explosive and needs to be searched manually is performed through trained Transportation Security Administration screeners following an approved protocol. In order to keep the screeners vigilant with regards to screening quality, the Transportation Security Administration has mandated the use of Threat Image Projection on 2-D projection X-ray screening equipment used at all US airports. These algorithms insert visual artificial threats into images of the normal passenger bags in order to test the screeners with regards to their screening efficiency and their screening quality at determining threats. This technology for 2-D X-ray system is proven and is widespread amongst multiple manufacturers of X-ray projection systems. Until now, Threat Image Projection has been unsuccessful at being introduced into 3-D Automated Explosive Detection Systems for numerous reasons. The failure of these prior attempts are mainly due to imaging queues that the screeners pickup on, and therefore make it easy for the screeners to discern the presence of the threat image and thus defeating the intended purpose. This paper presents a novel approach for 3-D Threat Image Projection for 3-D Automated Explosive Detection Systems. The method presented here is a projection based approach where both the threat object and the bag remain in projection sinogram space. Novel approaches have been developed for projection based object segmentation, projection based streak reduction used for threat object isolation along with scan orientation independence and projection based streak generation for an overall realistic 3-D image. The algorithms are prototyped in MatLab and C++ and demonstrate non discernible 3-D threat 7. From 3D view to 3D print Dima, M.; Farisato, G.; Bergomi, M.; Viotto, V.; Magrin, D.; Greggio, D.; Farinato, J.; Marafatto, L.; Ragazzoni, R.; Piazza, D. 2014-08-01 In the last few years 3D printing is getting more and more popular and used in many fields going from manufacturing to industrial design, architecture, medical support and aerospace. 3D printing is an evolution of bi-dimensional printing, which allows to obtain a solid object from a 3D model, realized with a 3D modelling software. The final product is obtained using an additive process, in which successive layers of material are laid down one over the other. A 3D printer allows to realize, in a simple way, very complex shapes, which would be quite difficult to be produced with dedicated conventional facilities. Thanks to the fact that the 3D printing is obtained superposing one layer to the others, it doesn't need any particular work flow and it is sufficient to simply draw the model and send it to print. Many different kinds of 3D printers exist based on the technology and material used for layer deposition. A common material used by the toner is ABS plastics, which is a light and rigid thermoplastic polymer, whose peculiar mechanical properties make it diffusely used in several fields, like pipes production and cars interiors manufacturing. I used this technology to create a 1:1 scale model of the telescope which is the hardware core of the space small mission CHEOPS (CHaracterising ExOPlanets Satellite) by ESA, which aims to characterize EXOplanets via transits observations. The telescope has a Ritchey-Chrétien configuration with a 30cm aperture and the launch is foreseen in 2017. In this paper, I present the different phases for the realization of such a model, focusing onto pros and cons of this kind of technology. For example, because of the finite printable volume (10×10×12 inches in the x, y and z directions respectively), it has been necessary to split the largest parts of the instrument in smaller components to be then reassembled and post-processed. A further issue is the resolution of the printed material, which is expressed in terms of layers 8. YouDash3D: exploring stereoscopic 3D gaming for 3D movie theaters Schild, Jonas; Seele, Sven; Masuch, Maic 2012-03-01 Along with the success of the digitally revived stereoscopic cinema, events beyond 3D movies become attractive for movie theater operators, i.e. interactive 3D games. In this paper, we present a case that explores possible challenges and solutions for interactive 3D games to be played by a movie theater audience. We analyze the setting and showcase current issues related to lighting and interaction. Our second focus is to provide gameplay mechanics that make special use of stereoscopy, especially depth-based game design. Based on these results, we present YouDash3D, a game prototype that explores public stereoscopic gameplay in a reduced kiosk setup. It features live 3D HD video stream of a professional stereo camera rig rendered in a real-time game scene. We use the effect to place the stereoscopic effigies of players into the digital game. The game showcases how stereoscopic vision can provide for a novel depth-based game mechanic. Projected trigger zones and distributed clusters of the audience video allow for easy adaptation to larger audiences and 3D movie theater gaming. NASA Technical Reports Server (NTRS) 2002-01-01 In 1999, Genex submitted a proposal to Stennis Space Center for a volumetric 3-D display technique that would provide multiple users with a 360-degree perspective to simultaneously view and analyze 3-D data. The futuristic capabilities of the VolumeViewer(R) have offered tremendous benefits to commercial users in the fields of medicine and surgery, air traffic control, pilot training and education, computer-aided design/computer-aided manufacturing, and military/battlefield management. The technology has also helped NASA to better analyze and assess the various data collected by its satellite and spacecraft sensors. Genex capitalized on its success with Stennis by introducing two separate products to the commercial market that incorporate key elements of the 3-D display technology designed under an SBIR contract. The company Rainbow 3D(R) imaging camera is a novel, three-dimensional surface profile measurement system that can obtain a full-frame 3-D image in less than 1 second. The third product is the 360-degree OmniEye(R) video system. Ideal for intrusion detection, surveillance, and situation management, this unique camera system offers a continuous, panoramic view of a scene in real time. 10. 3D Printed Bionic Nanodevices. PubMed Kong, Yong Lin; Gupta, Maneesh K; Johnson, Blake N; McAlpine, Michael C 2016-06-01 The ability to three-dimensionally interweave biological and functional materials could enable the creation of bionic devices possessing unique and compelling geometries, properties, and functionalities. Indeed, interfacing high performance active devices with biology could impact a variety of fields, including regenerative bioelectronic medicines, smart prosthetics, medical robotics, and human-machine interfaces. Biology, from the molecular scale of DNA and proteins, to the macroscopic scale of tissues and organs, is three-dimensional, often soft and stretchable, and temperature sensitive. This renders most biological platforms incompatible with the fabrication and materials processing methods that have been developed and optimized for functional electronics, which are typically planar, rigid and brittle. A number of strategies have been developed to overcome these dichotomies. One particularly novel approach is the use of extrusion-based multi-material 3D printing, which is an additive manufacturing technology that offers a freeform fabrication strategy. This approach addresses the dichotomies presented above by (1) using 3D printing and imaging for customized, hierarchical, and interwoven device architectures; (2) employing nanotechnology as an enabling route for introducing high performance materials, with the potential for exhibiting properties not found in the bulk; and (3) 3D printing a range of soft and nanoscale materials to enable the integration of a diverse palette of high quality functional nanomaterials with biology. Further, 3D printing is a multi-scale platform, allowing for the incorporation of functional nanoscale inks, the printing of microscale features, and ultimately the creation of macroscale devices. This blending of 3D printing, novel nanomaterial properties, and 'living' platforms may enable next-generation bionic systems. In this review, we highlight this synergistic integration of the unique properties of nanomaterials with the 11. 3D Printed Bionic Nanodevices PubMed Central Kong, Yong Lin; Gupta, Maneesh K.; Johnson, Blake N.; McAlpine, Michael C. 2016-01-01 Summary The ability to three-dimensionally interweave biological and functional materials could enable the creation of bionic devices possessing unique and compelling geometries, properties, and functionalities. Indeed, interfacing high performance active devices with biology could impact a variety of fields, including regenerative bioelectronic medicines, smart prosthetics, medical robotics, and human-machine interfaces. Biology, from the molecular scale of DNA and proteins, to the macroscopic scale of tissues and organs, is three-dimensional, often soft and stretchable, and temperature sensitive. This renders most biological platforms incompatible with the fabrication and materials processing methods that have been developed and optimized for functional electronics, which are typically planar, rigid and brittle. A number of strategies have been developed to overcome these dichotomies. One particularly novel approach is the use of extrusion-based multi-material 3D printing, which is an additive manufacturing technology that offers a freeform fabrication strategy. This approach addresses the dichotomies presented above by (1) using 3D printing and imaging for customized, hierarchical, and interwoven device architectures; (2) employing nanotechnology as an enabling route for introducing high performance materials, with the potential for exhibiting properties not found in the bulk; and (3) 3D printing a range of soft and nanoscale materials to enable the integration of a diverse palette of high quality functional nanomaterials with biology. Further, 3D printing is a multi-scale platform, allowing for the incorporation of functional nanoscale inks, the printing of microscale features, and ultimately the creation of macroscale devices. This blending of 3D printing, novel nanomaterial properties, and ‘living’ platforms may enable next-generation bionic systems. In this review, we highlight this synergistic integration of the unique properties of nanomaterials with 12. Macrophage podosomes go 3D. PubMed Van Goethem, Emeline; Guiet, Romain; Balor, Stéphanie; Charrière, Guillaume M; Poincloux, Renaud; Labrousse, Arnaud; Maridonneau-Parini, Isabelle; Le Cabec, Véronique 2011-01-01 Macrophage tissue infiltration is a critical step in the immune response against microorganisms and is also associated with disease progression in chronic inflammation and cancer. Macrophages are constitutively equipped with specialized structures called podosomes dedicated to extracellular matrix (ECM) degradation. We recently reported that these structures play a critical role in trans-matrix mesenchymal migration mode, a protease-dependent mechanism. Podosome molecular components and their ECM-degrading activity have been extensively studied in two dimensions (2D), but yet very little is known about their fate in three-dimensional (3D) environments. Therefore, localization of podosome markers and proteolytic activity were carefully examined in human macrophages performing mesenchymal migration. Using our gelled collagen I 3D matrix model to obligate human macrophages to perform mesenchymal migration, classical podosome markers including talin, paxillin, vinculin, gelsolin, cortactin were found to accumulate at the tip of F-actin-rich cell protrusions together with β1 integrin and CD44 but not β2 integrin. Macrophage proteolytic activity was observed at podosome-like protrusion sites using confocal fluorescence microscopy and electron microscopy. The formation of migration tunnels by macrophages inside the matrix was accomplished by degradation, engulfment and mechanic compaction of the matrix. In addition, videomicroscopy revealed that 3D F-actin-rich protrusions of migrating macrophages were as dynamic as their 2D counterparts. Overall, the specifications of 3D podosomes resembled those of 2D podosome rosettes rather than those of individual podosomes. This observation was further supported by the aspect of 3D podosomes in fibroblasts expressing Hck, a master regulator of podosome rosettes in macrophages. In conclusion, human macrophage podosomes go 3D and take the shape of spherical podosome rosettes when the cells perform mesenchymal migration. This work 13. 3D Computations and Experiments SciTech Connect Couch, R; Faux, D; Goto, D; Nikkel, D 2004-04-05 This project consists of two activities. Task A, Simulations and Measurements, combines all the material model development and associated numerical work with the materials-oriented experimental activities. The goal of this effort is to provide an improved understanding of dynamic material properties and to provide accurate numerical representations of those properties for use in analysis codes. Task B, ALE3D Development, involves general development activities in the ALE3D code with the focus of improving simulation capabilities for problems of mutual interest to DoD and DOE. Emphasis is on problems involving multi-phase flow, blast loading of structures and system safety/vulnerability studies. 14. Petal, terrain & airbags - 3D NASA Technical Reports Server (NTRS) 1997-01-01 Portions of the lander's deflated airbags and a petal are at the lower area of this image, taken in stereo by the Imager for Mars Pathfinder (IMP) on Sol 3. 3D glasses are necessary to identify surface detail. The metallic object at lower right is part of the lander's low-gain antenna. This image is part of a 3D 'monster Click below to see the left and right views individually. [figure removed for brevity, see original site] Left [figure removed for brevity, see original site] Right 15. Petal, terrain & airbags - 3D NASA Technical Reports Server (NTRS) 1997-01-01 Portions of the lander's deflated airbags and a petal are at the lower area of this image, taken in stereo by the Imager for Mars Pathfinder (IMP) on Sol 3. 3D glasses are necessary to identify surface detail. The metallic object at lower right is part of the lander's low-gain antenna. This image is part of a 3D 'monster Click below to see the left and right views individually. [figure removed for brevity, see original site] Left [figure removed for brevity, see original site] Right 16. Method to improve the signal-to-noise ratio of photon-counting chirped amplitude modulation ladar. PubMed Zhang, Zijing; Wu, Long; Zhang, Yong; Zhao, Yuan 2013-01-10 Photon-counting chirped amplitude modulation (PCCAM) ladar employs Geiger mode avalanche photodiode as a detector. After the detector corresponding to the echo signal is reflected from an object or target, the modulation depth (MD) of the detection outputs has some certain loss relative to that of the transmitting signal. The signal-to-noise ratio (SNR) of PCCAM ladar is mainly determined by the MD of detection outputs of the echo signal. There is a proper echo signal intensity that can decrease the MD loss and improve the SNR of the ladar receiver. In this paper, an improved PCCAM ladar system is presented, which employs an echo signal intensity optimization strategy with an iris diaphragm under different signal and noise intensities. The improved system is demonstrated with the background noise of a sunny day and the echo signal intensity from 0.1 to 10 counts/ns. The experimental results show that it can effectively improve the SNR of the ladar receiver compared with the typical PCCAM ladar system. © 2013 Optical Society of America 17. 3D Printing: Exploring Capabilities ERIC Educational Resources Information Center Samuels, Kyle; Flowers, Jim 2015-01-01 As 3D printers become more affordable, schools are using them in increasing numbers. They fit well with the emphasis on product design in technology and engineering education, allowing students to create high-fidelity physical models to see and test different iterations in their product designs. They may also help students to "think in three… NASA Image and Video Library 2010-02-23 This anaglyph from images captured by NASA Cassini spacecraft shows a dramatic, 3-D view of one of the deep fractures nicknamed tiger stripes on Saturn moon Enceladus which are located near the moon south pole, spray jets of water ice. 19. 3D Printing: Exploring Capabilities ERIC Educational Resources Information Center Samuels, Kyle; Flowers, Jim 2015-01-01 As 3D printers become more affordable, schools are using them in increasing numbers. They fit well with the emphasis on product design in technology and engineering education, allowing students to create high-fidelity physical models to see and test different iterations in their product designs. They may also help students to "think in three… 20. Making Inexpensive 3-D Models ERIC Educational Resources Information Center Manos, Harry 2016-01-01 Visual aids are important to student learning, and they help make the teacher's job easier. Keeping with the "TPT" theme of "The Art, Craft, and Science of Physics Teaching," the purpose of this article is to show how teachers, lacking equipment and funds, can construct a durable 3-D model reference frame and a model gravity… 1. Ganges Chasma in 3-D NASA Image and Video Library 1999-06-25 Ganges Chasma is part of the Valles Marineris trough system that stretches nearly 5,000 kilometers 3,000 miles across the western equatorial region of Mars. This stereo anaglyph is from NASA Mars Global Surveyor. 3D glasses are necessary. 2. Opportunity Stretches Out 3-D NASA Image and Video Library 2004-02-02 This is a three-dimensional stereo anaglyph of an image taken by the front hazard-identification camera onboard NASA Mars Exploration Rover Opportunity, showing the rover arm in its extended position. 3D glasses are necessary to view this image. 3. Making Inexpensive 3-D Models ERIC Educational Resources Information Center Manos, Harry 2016-01-01 Visual aids are important to student learning, and they help make the teacher's job easier. Keeping with the "TPT" theme of "The Art, Craft, and Science of Physics Teaching," the purpose of this article is to show how teachers, lacking equipment and funds, can construct a durable 3-D model reference frame and a model gravity… 4. The World of 3-D. ERIC Educational Resources Information Center Mayshark, Robin K. 1991-01-01 Students explore three-dimensional properties by creating red and green wall decorations related to Christmas. Students examine why images seem to vibrate when red and green pieces are small and close together. Instructions to conduct the activity and construct 3-D glasses are given. (MDH) 5. Rosetta Comet in 3-D NASA Image and Video Library 2014-11-21 A 3D image shows what it would look like to fly over the surface of comet 67P/Churyumov-Gerasimenko. The image was generated by data collected by ESA Philae spacecraft during the decent to the spacecraft initial touchdown on the comet Nov. 12, 2014. 6. SNL3dFace SciTech Connect Russ, Trina; Koch, Mark; Koudelka, Melissa; Peters, Ralph; Little, Charles; Boehnen, Chris; Peters, Tanya 2007-07-20 This software distribution contains MATLAB and C++ code to enable identity verification using 3D images that may or may not contain a texture component. The code is organized to support system performance testing and system capability demonstration through the proper configuration of the available user interface. Using specific algorithm parameters the face recognition system has been demonstrated to achieve a 96.6% verification rate (Pd) at 0.001 false alarm rate. The system computes robust facial features of a 3D normalized face using Principal Component Analysis (PCA) and Fisher Linear Discriminant Analysis (FLDA). A 3D normalized face is obtained by alighning each face, represented by a set of XYZ coordinated, to a scaled reference face using the Iterative Closest Point (ICP) algorithm. The scaled reference face is then deformed to the input face using an iterative framework with parameters that control the deformed surface regulation an rate of deformation. A variety of options are available to control the information that is encoded by the PCA. Such options include the XYZ coordinates, the difference of each XYZ coordinates from the reference, the Z coordinate, the intensity/texture values, etc. In addition to PCA/FLDA feature projection this software supports feature matching to obtain similarity matrices for performance analysis. In addition, this software supports visualization of the STL, MRD, 2D normalized, and PCA synthetic representations in a 3D environment. 7. TACO3D. 3-D Finite Element Heat Transfer Code SciTech Connect Mason, W.E. 1992-03-04 TACO3D is a three-dimensional, finite-element program for heat transfer analysis. An extension of the two-dimensional TACO program, it can perform linear and nonlinear analyses and can be used to solve either transient or steady-state problems. The program accepts time-dependent or temperature-dependent material properties, and materials may be isotropic or orthotropic. A variety of time-dependent and temperature-dependent boundary conditions and loadings are available including temperature, flux, convection, and radiation boundary conditions and internal heat generation. Additional specialized features treat enclosure radiation, bulk nodes, and master/slave internal surface conditions (e.g., contact resistance). Data input via a free-field format is provided. A user subprogram feature allows for any type of functional representation of any independent variable. A profile (bandwidth) minimization option is available. The code is limited to implicit time integration for transient solutions. TACO3D has no general mesh generation capability. Rows of evenly-spaced nodes and rows of sequential elements may be generated, but the program relies on separate mesh generators for complex zoning. TACO3D does not have the ability to calculate view factors internally. Graphical representation of data in the form of time history and spatial plots is provided through links to the POSTACO and GRAPE postprocessor codes. 8. Forensic 3D scene reconstruction Little, Charles Q.; Small, Daniel E.; Peters, Ralph R.; Rigdon, J. B. 2000-05-01 Traditionally law enforcement agencies have relied on basic measurement and imaging tools, such as tape measures and cameras, in recording a crime scene. A disadvantage of these methods is that they are slow and cumbersome. The development of a portable system that can rapidly record a crime scene with current camera imaging, 3D geometric surface maps, and contribute quantitative measurements such as accurate relative positioning of crime scene objects, would be an asset to law enforcement agents in collecting and recording significant forensic data. The purpose of this project is to develop a fieldable prototype of a fast, accurate, 3D measurement and imaging system that would support law enforcement agents to quickly document and accurately record a crime scene. 9. 3D Printed Robotic Hand NASA Technical Reports Server (NTRS) Pizarro, Yaritzmar Rosario; Schuler, Jason M.; Lippitt, Thomas C. 2013-01-01 Dexterous robotic hands are changing the way robots and humans interact and use common tools. Unfortunately, the complexity of the joints and actuations drive up the manufacturing cost. Some cutting edge and commercially available rapid prototyping machines now have the ability to print multiple materials and even combine these materials in the same job. A 3D model of a robotic hand was designed using Creo Parametric 2.0. Combining "hard" and "soft" materials, the model was printed on the Object Connex350 3D printer with the purpose of resembling as much as possible the human appearance and mobility of a real hand while needing no assembly. After printing the prototype, strings where installed as actuators to test mobility. Based on printing materials, the manufacturing cost of the hand was \$167, significantly lower than other robotic hands without the actuators since they have more complex assembly processes. 10. Forensic 3D Scene Reconstruction SciTech Connect LITTLE,CHARLES Q.; PETERS,RALPH R.; RIGDON,J. BRIAN; SMALL,DANIEL E. 1999-10-12 Traditionally law enforcement agencies have relied on basic measurement and imaging tools, such as tape measures and cameras, in recording a crime scene. A disadvantage of these methods is that they are slow and cumbersome. The development of a portable system that can rapidly record a crime scene with current camera imaging, 3D geometric surface maps, and contribute quantitative measurements such as accurate relative positioning of crime scene objects, would be an asset to law enforcement agents in collecting and recording significant forensic data. The purpose of this project is to develop a feasible prototype of a fast, accurate, 3D measurement and imaging system that would support law enforcement agents to quickly document and accurately record a crime scene. 11. 3-D sprag ratcheting tool NASA Technical Reports Server (NTRS) Wade, Michael O. (Inventor); Poland, Jr., James W. (Inventor) 2003-01-01 A ratcheting device comprising a driver head assembly which includes at least two 3-D sprag elements positioned within a first groove within the driver head assembly such that at least one of the 3-D sprag elements may lockingly engage the driver head assembly and a mating hub assembly to allow for rotation of the hub assembly in one direction with respect to the driver head assembly. This arrangement allows the ratcheting tool to impart torque in either the clockwise or counterclockwise direction without having to first rotate the ratcheting tool in the direction opposite the direction in which the torque is applied. This arrangement also allows the ratcheting tool to impart torque in either the clockwise or counterclockwise direction while in the neutral position. PubMed van Geer, Erik; Molenbroek, Johan; Schreven, Sander; deVoogd-Claessen, Lenneke; Toussaint, Huib 2012-01-01 In competitive swimming, suits have become more important. These suits influence friction, pressure and wave drag. Friction drag is related to the surface properties whereas both pressure and wave drag are greatly influenced by body shape. To find a relationship between the body shape and the drag, the anthropometry of several world class female swimmers wearing different suits was accurately defined using a 3D scanner and traditional measuring methods. The 3D scans delivered more detailed information about the body shape. On the same day the swimmers did performance tests in the water with the tested suits. Afterwards the result of the performance tests and the differences found in body shape was analyzed to determine the deformation caused by a swimsuit and its effect on the swimming performance. Although the amount of data is limited because of the few test subjects, there is an indication that the deformation of the body influences the swimming performance. 13. 3D-graphite structure SciTech Connect Belenkov, E. A. Ali-Pasha, V. A. 2011-01-15 The structure of clusters of some new carbon 3D-graphite phases have been calculated using the molecular-mechanics methods. It is established that 3D-graphite polytypes {alpha}{sub 1,1}, {alpha}{sub 1,3}, {alpha}{sub 1,5}, {alpha}{sub 2,1}, {alpha}{sub 2,3}, {alpha}{sub 3,1}, {beta}{sub 1,2}, {beta}{sub 1,4}, {beta}{sub 1,6}, {beta}{sub 2,1}, and {beta}{sub 3,2} consist of sp{sup 2}-hybridized atoms, have hexagonal unit cells, and differ in regards to the structure of layers and order of their alternation. A possible way to experimentally synthesize new carbon phases is proposed: the polymerization and carbonization of hydrocarbon molecules. 14. [Real time 3D echocardiography NASA Technical Reports Server (NTRS) Bauer, F.; Shiota, T.; Thomas, J. D. 2001-01-01 Three-dimensional representation of the heart is an old concern. Usually, 3D reconstruction of the cardiac mass is made by successive acquisition of 2D sections, the spatial localisation and orientation of which require complex guiding systems. More recently, the concept of volumetric acquisition has been introduced. A matricial emitter-receiver probe complex with parallel data processing provides instantaneous of a pyramidal 64 degrees x 64 degrees volume. The image is restituted in real time and is composed of 3 planes (planes B and C) which can be displaced in all spatial directions at any time during acquisition. The flexibility of this system of acquisition allows volume and mass measurement with greater accuracy and reproducibility, limiting inter-observer variability. Free navigation of the planes of investigation allows reconstruction for qualitative and quantitative analysis of valvular heart disease and other pathologies. Although real time 3D echocardiography is ready for clinical usage, some improvements are still necessary to improve its conviviality. Then real time 3D echocardiography could be the essential tool for understanding, diagnosis and management of patients. 15. [Real time 3D echocardiography NASA Technical Reports Server (NTRS) Bauer, F.; Shiota, T.; Thomas, J. D. 2001-01-01 Three-dimensional representation of the heart is an old concern. Usually, 3D reconstruction of the cardiac mass is made by successive acquisition of 2D sections, the spatial localisation and orientation of which require complex guiding systems. More recently, the concept of volumetric acquisition has been introduced. A matricial emitter-receiver probe complex with parallel data processing provides instantaneous of a pyramidal 64 degrees x 64 degrees volume. The image is restituted in real time and is composed of 3 planes (planes B and C) which can be displaced in all spatial directions at any time during acquisition. The flexibility of this system of acquisition allows volume and mass measurement with greater accuracy and reproducibility, limiting inter-observer variability. Free navigation of the planes of investigation allows reconstruction for qualitative and quantitative analysis of valvular heart disease and other pathologies. Although real time 3D echocardiography is ready for clinical usage, some improvements are still necessary to improve its conviviality. Then real time 3D echocardiography could be the essential tool for understanding, diagnosis and management of patients. 16. GPU-Accelerated Denoising in 3D (GD3D) SciTech Connect 2013-10-01 The raw computational power GPU Accelerators enables fast denoising of 3D MR images using bilateral filtering, anisotropic diffusion, and non-local means. This software addresses two facets of this promising application: what tuning is necessary to achieve optimal performance on a modern GPU? And what parameters yield the best denoising results in practice? To answer the first question, the software performs an autotuning step to empirically determine optimal memory blocking on the GPU. To answer the second, it performs a sweep of algorithm parameters to determine the combination that best reduces the mean squared error relative to a noiseless reference image. 17. GPU-Accelerated Denoising in 3D (GD3D) SciTech Connect 2013-10-01 The raw computational power GPU Accelerators enables fast denoising of 3D MR images using bilateral filtering, anisotropic diffusion, and non-local means. This software addresses two facets of this promising application: what tuning is necessary to achieve optimal performance on a modern GPU? And what parameters yield the best denoising results in practice? To answer the first question, the software performs an autotuning step to empirically determine optimal memory blocking on the GPU. To answer the second, it performs a sweep of algorithm parameters to determine the combination that best reduces the mean squared error relative to a noiseless reference image. 18. A Nonparametric Approach to Segmentation of Ladar Images DTIC Science & Technology 2012-12-01 Recognition, 26(9):1277–1294, 1993. [72] Papoulis , A. Probability , Random Variables, and Stochastic Processes. McGraw-Hill, Inc., 3rd. edition, 1991. [73...into distinct “phases” of imagery. The segmentation method is initialized using nonparametric probability density estimation. The resulting probability ...density is sliced piecewise into probability range bins, and the dominant object regions in each slice are traced and labeled. Plane fitting of each 19. Cramer-Rao lower bound on range error for LADARs with Geiger-mode avalanche photodiodes. PubMed Johnson, Steven E 2010-08-20 The Cramer-Rao lower bound (CRLB) on range error is calculated for laser detection and ranging (LADAR) systems using Geiger-mode avalanche photodiodes (GMAPDs) to detect reflected laser pulses. For the cases considered, the GMAPD range error CRLB is greater than the CRLB for a photon-counting device. It is also shown that the GMAPD range error CRLB is minimized when the mean energy in the received laser pulse is finite. Given typical LADAR system parameters, a Gaussian-envelope received pulse, and a noise detection rate of less than 4 MHz, the GMAPD range error CRLB is minimized when the quantum efficiency times the mean number of received laser pulse photons is between 2.2 and 2.3. 20. Optical imaging process based on two-dimensional Fourier transform for synthetic aperture imaging ladar Sun, Zhiwei; Zhi, Ya'nan; Liu, Liren; Sun, Jianfeng; Zhou, Yu; Hou, Peipei 2013-09-01 The synthetic aperture imaging ladar (SAIL) systems typically generate large amounts of data difficult to compress with digital method. This paper presents an optical SAIL processor based on compensation of quadratic phase of echo in azimuth direction and two dimensional Fourier transform. The optical processor mainly consists of one phase-only liquid crystal spatial modulator(LCSLM) to load the phase data of target echo and one cylindrical lens to compensate the quadratic phase and one spherical lens to fulfill the task of two dimensional Fourier transform. We show the imaging processing result of practical target echo obtained by a synthetic aperture imaging ladar demonstrator. The optical processor is compact and lightweight and could provide inherent parallel and the speed-of-light computing capability, it has a promising application future especially in onboard and satellite borne SAIL systems. 1. The laser linewidth effect on the image quality of phase coded synthetic aperture ladar Cai, Guangyu; Hou, Peipei; Ma, Xiaoping; Sun, Jianfeng; Zhang, Ning; Li, Guangyuan; Zhang, Guo; Liu, Liren 2015-12-01 The phase coded (PC) waveform in synthetic aperture ladar (SAL) outperforms linear frequency modulated (LFM) signal in lower side lobe, shorter pulse duration and making the rigid control of the chirp starting point in every pulse unnecessary. Inherited from radar PC waveform and strip map SAL, the backscattered signal of a point target in PC SAL was listed and the two dimensional match filtering algorithm was introduced to focus a point image. As an inherent property of laser, linewidth is always detrimental to coherent ladar imaging. With the widely adopted laser linewidth model, the effect of laser linewidth on SAL image quality was theoretically analyzed and examined via Monte Carlo simulation. The research gives us a clear view of how to select linewidth parameters in the future PC SAL systems. Hollinger, Jim; Vessey, Alyssa; Close, Ryan; Middleton, Seth; Williams, Kathryn; Rupp, Ronald; Nguyen, Son 2016-05-01 Commercial sensor technology has the potential to bring cost-effective sensors to a number of U.S. Army applications. By using sensors built for a widespread of commercial application, such as the automotive market, the Army can decrease costs of future systems while increasing overall capabilities. Additional sensors operating in alternate and orthogonal modalities can also be leveraged to gain a broader spectrum measurement of the environment. Leveraging multiple phenomenologies can reduce false alarms and make detection algorithms more robust to varied concealment materials. In this paper, this approach is applied to the detection of roadside hazards partially concealed by light-to-medium vegetation. This paper will present advances in detection algorithms using a ground vehicle-based commercial LADAR system. The benefits of augmenting a LADAR with millimeter-wave automotive radar and results from relevant data sets are also discussed. 3. Precision and accuracy testing of FMCW ladar-based length metrology. PubMed Mateo, Ana Baselga; Barber, Zeb W 2015-07-01 The calibration and traceability of high-resolution frequency modulated continuous wave (FMCW) ladar sources is a requirement for their use in length and volume metrology. We report the calibration of FMCW ladar length measurement systems by use of spectroscopy of molecular frequency references HCN (C-band) or CO (L-band) to calibrate the chirp rate of the FMCW sources. Propagating the stated uncertainties from the molecular calibrations provided by NIST and measurement errors provide an estimated uncertainty of a few ppm for the FMCW system. As a test of this calibration, a displacement measurement interferometer with a laser wavelength close to that of our FMCW system was built to make comparisons of the relative precision and accuracy. The comparisons performed show <10  ppm agreement, which was within the combined estimated uncertainties of the FMCW system and interferometer. 4. Atmospheric aerosol and molecular backscatter imaging effects on direct detection LADAR Youmans, Douglas G. 2015-05-01 Backscatter from atmospheric aerosols and molecular nitrogen and oxygen causes "clutter" noise in direct detection ladar applications operating within the atmosphere. The backscatter clutter is more pronounced in multiple pulse, high PRF ladars where pulse-averaging is used to increase operating range. As more and more pulses are added to the wavetrain the backscatter increases. We analyze the imaging of a transmitted Gaussian laser-mode multi-pulse wave-train scatteried off of aerosols and molecules at the focal plane including angular-slew rate resulting from optical tracking, angular lead-angle, and bistatic-optics spatial separation. The defocused backscatter images, from those pulses closest to the receiver, are analyzed using a simple geometrical optics approximation. Methods for estimating the aerosol number density versus altitude and the volume backscatter coefficient of the aerosols are also discussed. 5. Bound on range precision for shot-noise limited ladar systems. PubMed Johnson, Steven; Cain, Stephen 2008-10-01 The precision of ladar range measurements is limited by noise. The fundamental source of noise in a laser signal is the random time between photon arrivals. This phenomenon, called shot noise, is modeled as a Poisson random process. Other noise sources in the system are also modeled as Poisson processes. Under the Poisson-noise assumption, the Cramer-Rao lower bound (CRLB) on range measurements is derived. This bound on the variance of any unbiased range estimate is greater than the CRLB derived by assuming Gaussian noise of equal variance. Finally, it is shown that, for a ladar capable of dividing a fixed amount of energy into multiple laser pulses, the range precision is maximized when all energy is transmitted in a single pulse. 6. A 3D Polar Processing Algorithm for Scale Model UHF ISAR Imaging DTIC Science & Technology 2006-05-01 5 in order to allow visualization of the target’s main scattering features. The low level intensity in the imagery is represented by the color green ...imagery, one may observe higher level colors behind the low level green surfaces. Considering the relatively long wavelengths used in the 3D UHF ISAR...Lundberg, P. Follo, P. Frolind, and A. Gustavsson , “Performance of VHF-band SAR change detection for wide-area surveillance of concealed ground 7. 3D Soil Images Structure Quantification using Relative Entropy Tarquis, A. M.; Gonzalez-Nieto, P. L.; Bird, N. R. A. 2012-04-01 Soil voids manifest the cumulative effect of local pedogenic processes and ultimately influence soil behavior - especially as it pertains to aeration and hydrophysical properties. Because of the relatively weak attenuation of X-rays by air, compared with liquids or solids, non-disruptive CT scanning has become a very attractive tool for generating three-dimensional imagery of soil voids. One of the main steps involved in this analysis is the thresholding required to transform the original (greyscale) images into the type of binary representation (e.g., pores in white, solids in black) needed for fractal analysis or simulation with Lattice-Boltzmann models (Baveye et al., 2010). The objective of the current work is to apply an innovative approach to quantifying soil voids and pore networks in original X-ray CT imagery using Relative Entropy (Bird et al., 2006; Tarquis et al., 2008). These will be illustrated using typical imagery representing contrasting soil structures. Particular attention will be given to the need to consider the full 3D context of the CT imagery, as well as scaling issues, in the application and interpretation of this index. 8. Magmatic Systems in 3-D Kent, G. M.; Harding, A. J.; Babcock, J. M.; Orcutt, J. A.; Bazin, S.; Singh, S.; Detrick, R. S.; Canales, J. P.; Carbotte, S. M.; Diebold, J. 2002-12-01 Multichannel seismic (MCS) images of crustal magma chambers are ideal targets for advanced visualization techniques. In the mid-ocean ridge environment, reflections originating at the melt-lens are well separated from other reflection boundaries, such as the seafloor, layer 2A and Moho, which enables the effective use of transparency filters. 3-D visualization of seismic reflectivity falls into two broad categories: volume and surface rendering. Volumetric-based visualization is an extremely powerful approach for the rapid exploration of very dense 3-D datasets. These 3-D datasets are divided into volume elements or voxels, which are individually color coded depending on the assigned datum value; the user can define an opacity filter to reject plotting certain voxels. This transparency allows the user to peer into the data volume, enabling an easy identification of patterns or relationships that might have geologic merit. Multiple image volumes can be co-registered to look at correlations between two different data types (e.g., amplitude variation with offsets studies), in a manner analogous to draping attributes onto a surface. In contrast, surface visualization of seismic reflectivity usually involves producing "fence" diagrams of 2-D seismic profiles that are complemented with seafloor topography, along with point class data, draped lines and vectors (e.g. fault scarps, earthquake locations and plate-motions). The overlying seafloor can be made partially transparent or see-through, enabling 3-D correlations between seafloor structure and seismic reflectivity. Exploration of 3-D datasets requires additional thought when constructing and manipulating these complex objects. As numbers of visual objects grow in a particular scene, there is a tendency to mask overlapping objects; this clutter can be managed through the effective use of total or partial transparency (i.e., alpha-channel). In this way, the co-variation between different datasets can be investigated 9. Developing Spatial Reasoning Through 3D Representations of the Universe Summers, F.; Eisenhamer, B.; McCallister, D. 2013-12-01 Mental models of astronomical objects are often greatly hampered by the flat two-dimensional representation of pictures from telescopes. Lacking experience with the true structures in much of the imagery, there is no basis for anything but the default interpretation of a picture postcard. Using astronomical data and scientific visualizations, our professional development session allows teachers and their students to develop their spatial reasoning while forming more accurate and richer mental models. Examples employed in this session include star positions and constellations, morphologies of both normal and interacting galaxies, shapes of planetary nebulae, and three dimensional structures in star forming regions. Participants examine, imagine, predict, and confront the 3D interpretation of well-known 2D imagery using authentic data from NASA, the Hubble Space Telescope, and other scientific sources. The session's cross-disciplinary nature includes science, math, and artistic reasoning while addressing common cosmic misconceptions. Stars of the Orion Constellation seen in 3D explodes the popular misconception that stars in a constellation are all at the same distance. A scientific visualization of two galaxies colliding provides a 3D comparison for Hubble images of interacting galaxies. 10. Fusion of Airborne and Terrestrial Image-Based 3d Modelling for Road Infrastructure Management - Vision and First Experiments Nebiker, S.; Cavegn, S.; Eugster, H.; Laemmer, K.; Markram, J.; Wagner, R. 2012-07-01 In this paper we present the vision and proof of concept of a seamless image-based 3d modelling approach fusing airborne and mobile terrestrial imagery. The proposed fusion relies on dense stereo matching for extracting 3d point clouds which - in combination with the original airborne and terrestrial stereo imagery - create a rich 3d geoinformation and 3d measuring space. For the seamless exploitation of this space we propose using a new virtual globe technology integrating the airborne and terrestrial stereoscopic imagery with the derived 3d point clouds. The concept is applied to road and road infrastructure management and evaluated in a highway mapping project combining stereovision based mobile mapping with high-resolution multispectral airborne road corridor mapping using the new Leica RCD30 sensor. 11. Optical image reconstruction using an astigmatic lens for synthetic-aperture imaging ladar Sun, Zhiwei; Hou, Peipei; Zhi, Yanan; Sun, Jianfeng; Zhou, Yu; Xu, Qian; Liu, Liren 2014-11-01 An optical processor for synthetic-aperture imaging ladar (SAIL) utilizing one astigmatic lens is proposed. The processor comprises two structures of transmitting and reflecting. The imaging process is mathematically analyzed using the unified data-collection equation of side-looking and down-looking SAILs. Results show that the astigmatic lens can be replaced with a cylindrical lens on certain conditions. To verify this concept, laboratory experiment is conducted, the imaging result of data collected from one SAIL demonstrator is given. 12. Real Time Coincidence Processing Algorithm for Geiger Mode LADAR using FPGAs DTIC Science & Technology 2017-01-09 processing algorithm implemented on a FPGA . By utilizing FPGAs we are able to achieve a “micro-ladar” system that resides in a design space not...developed embedded FPGA real time processing algorithms that take noisy raw data, streaming at upwards of 1GB/sec, and filters the data to obtain a near- ly...current operating parameters and extrapo- lated for future system upgrades. Simulated ladar data is processed using the FPGA and compared to the Matlab 13. Inverse Synthetic Aperture LADAR for Geosynchronous Space Objects - Signal-to-Noise Analysis DTIC Science & Technology 2011-09-01 Inverse synthetic aperture LADAR for geosynchronous space objects – signal-to-noise analysis Casey J. Pellizzari Air Force Research Laboratory...NM 87117 Rao Gudimetla Air Force Research Laboratory (RDSMA) 535 Lipoa Parkway, Ste. 200, Kihei HI 96753 ABSTRACT Inverse synthetic ...return signal detected by a coherent ISAL system. Using tomographic techniques common to synthetic aperture radar (SAR), a model is developed for the 14. Dimensionality reduction and information-theoretic divergence between sets of LADAR images Gray, David M.; Príncipe, José C. 2008-04-01 This paper presents a preliminary study of information-theoretic divergence between sets of LADAR image data. This study has been motivated by the hypothesis that despite the huge dimensionality of raw image space, related images actually lie on embedded manifolds within this set of all possible images and can be represented in much lowerdimensional sub-spaces. If these low-dimensional representations can be found, information theoretic properties of the images can be exploited while circumventing many of the problems associated with the so-called "curse of dimensionality." In this study, PCA techniques are used to find a low-dimensional sub-space representation of LADAR image sets. A real LADAR image data set was collected using the AFSTAR sensor and a synthetic image data set was created using the Irma LADAR image modeling program. One unique aspect of this study is the use of an entirely synthetic data set to find a sub-space representation that is reasonably valid for both the synthetic data set and the real data set. After the sub-space representation is found, an information-theoretic density divergence measure (Cauchy- Schwarz divergence) is computed using Parzen window estimation methods to find the divergence between and among the sets of synthetic and real target classes. These divergence measures can then be used to make target classification decisions for sets of images. In practice, this technique could be used to make classification decisions on multiple images collected from a moving sensor platform or from a geographically distributed set of cooperating sensor platforms operating in a target region. 15. Space Partitioning for Privacy Enabled 3D City Models Filippovska, Y.; Wichmann, A.; Kada, M. 2016-10-01 Due to recent technological progress, data capturing and processing of highly detailed (3D) data has become extensive. And despite all prospects of potential uses, data that includes personal living spaces and public buildings can also be considered as a serious intrusion into people's privacy and a threat to security. It becomes especially critical if data is visible by the general public. Thus, a compromise is needed between open access to data and privacy requirements which can be very different for each application. As privacy is a complex and versatile topic, the focus of this work particularly lies on the visualization of 3D urban data sets. For the purpose of privacy enabled visualizations of 3D city models, we propose to partition the (living) spaces into privacy regions, each featuring its own level of anonymity. Within each region, the depicted 2D and 3D geometry and imagery is anonymized with cartographic generalization techniques. The underlying spatial partitioning is realized as a 2D map generated as a straight skeleton of the open space between buildings. The resulting privacy cells are then merged according to the privacy requirements associated with each building to form larger regions, their borderlines smoothed, and transition zones established between privacy regions to have a harmonious visual appearance. It is exemplarily demonstrated how the proposed method generates privacy enabled 3D city models. 16. Developing Spatial Reasoning Through 3D Representations of the Universe Summers, Frank; Eisenhamer, B.; McCallister, D. 2014-01-01 Mental models of astronomical objects are often greatly hampered by the flat two-dimensional representation of pictures from telescopes. Lacking experience with the true structures in much of the imagery, there is little basis for anything but the default interpretation of a picture postcard. Using astronomical data and scientific visualizations, our team has worked in both formal and informal educational settings to explore and foster development of spatial reasoning while forming more accurate and richer mental models. Employing inquiry-based methods, participants examine, imagine, predict, and confront the 3D interpretation of well-known 2D imagery using data from NASA, the Hubble Space Telescope, and other scientific sources. Examples include star positions and constellations, morphologies of both normal and interacting galaxies, shapes of planetary nebulae, and three dimensional structures in star forming regions. Of particular appeal to educators is the activity's cross-disciplinary nature which includes science, math, and artistic reasoning while addressing common cosmic misconceptions. 17. Probabilistic analysis of linear mode vs. Geiger mode APD FPAs for advanced LADAR enabled interceptors Williams, George M.; Huntington, Andrew S. 2006-05-01 18. Target recognition of log-polar ladar range images using moment invariants Xia, Wenze; Han, Shaokun; Cao, Jie; Yu, Haoyong 2017-01-01 The ladar range image has received considerable attentions in the automatic target recognition field. However, previous research does not cover target recognition using log-polar ladar range images. Therefore, we construct a target recognition system based on log-polar ladar range images in this paper. In this system combined moment invariants and backpropagation neural network are selected as shape descriptor and shape classifier, respectively. In order to fully analyze the effect of log-polar sampling pattern on recognition result, several comparative experiments based on simulated and real range images are carried out. Eventually, several important conclusions are drawn: (i) if combined moments are computed directly by log-polar range images, translation, rotation and scaling invariant properties of combined moments will be invalid (ii) when object is located in the center of field of view, recognition rate of log-polar range images is less sensitive to the changing of field of view (iii) as object position changes from center to edge of field of view, recognition performance of log-polar range images will decline dramatically (iv) log-polar range images has a better noise robustness than Cartesian range images. Finally, we give a suggestion that it is better to divide field of view into recognition area and searching area in the real application. 19. A LADAR bare earth extraction technique for diverse topography and complex scenes Neuenschwander, Amy L.; Stevenson, Terry H.; Magruder, Lori A. 2012-06-01 Bare earth extraction is an important component to LADAR data analysis in terms of terrain classification. The challenge in providing accurate digital models is augmented when there is diverse topography within the data set or complex combinations of vegetation and built structures. A successful approach provides a flexible methodology (adaptable for topography and/or environment) that is capable of integrating multiple ladar point cloud data attributes. A newly developed approach (TE-SiP) uses a 2nd and 3rd order spatial derivative for each point in the DEM to determine sets of contiguous regions of similar elevation. Specifically, the derivative of the central point represents the curvature of the terrain at that position. Contiguous sets of high (positive or negative) values define sharp edges such as building edges or cliffs. This method is independent of the slope, such that very steep, but continuous topography still have relatively low curvature values and are preserved in the terrain classification. Next, a recursive segmentation method identifies unique features of homogeneity on the surface separated by areas of high curvature. An iterative selection process is used to eliminate regions containing buildings or vegetation from the terrain surface. This technique was tested on a variety of existing LADAR surveys, each with varying levels of topographic complexity. The results shown here include developed and forested regions in the Dominican Republic. NASA Technical Reports Server (NTRS) 2004-01-01 This is a 3-D anaglyph showing a microscopic image taken of an area measuring 3 centimeters (1.2 inches) across on the rock called Adirondack. The image was taken at Gusev Crater on the 33rd day of the Mars Exploration Rover Spirit's journey (Feb. 5, 2004), after the rover used its rock abrasion tool brush to clean the surface of the rock. Dust, which was pushed off to the side during cleaning, can still be seen to the left and in low areas of the rock. NASA Technical Reports Server (NTRS) 2004-01-01 This is a 3-D anaglyph showing a microscopic image taken of an area measuring 3 centimeters (1.2 inches) across on the rock called Adirondack. The image was taken at Gusev Crater on the 33rd day of the Mars Exploration Rover Spirit's journey (Feb. 5, 2004), after the rover used its rock abrasion tool brush to clean the surface of the rock. Dust, which was pushed off to the side during cleaning, can still be seen to the left and in low areas of the rock. 2. Making Inexpensive 3-D Models Manos, Harry 2016-03-01 Visual aids are important to student learning, and they help make the teacher's job easier. Keeping with the TPT theme of "The Art, Craft, and Science of Physics Teaching," the purpose of this article is to show how teachers, lacking equipment and funds, can construct a durable 3-D model reference frame and a model gravity well tailored to specific class lessons. Most of the supplies are readily available in the home or at school: rubbing alcohol, a rag, two colors of spray paint, art brushes, and masking tape. The cost of these supplies, if you don't have them, is less than 20. NASA Technical Reports Server (NTRS) 2004-01-01 This 3-D cylindrical-perspective mosaic taken by the navigation camera on the Mars Exploration Rover Spirit on sol 82 shows the view south of the large crater dubbed 'Bonneville.' The rover will travel toward the Columbia Hills, seen here at the upper left. The rock dubbed 'Mazatzal' and the hole the rover drilled in to it can be seen at the lower left. The rover's position is referred to as 'Site 22, Position 32.' This image was geometrically corrected to make the horizon appear flat. NASA Technical Reports Server (NTRS) 2004-01-01 This 3-D cylindrical-perspective mosaic taken by the navigation camera on the Mars Exploration Rover Spirit on sol 82 shows the view south of the large crater dubbed 'Bonneville.' The rover will travel toward the Columbia Hills, seen here at the upper left. The rock dubbed 'Mazatzal' and the hole the rover drilled in to it can be seen at the lower left. The rover's position is referred to as 'Site 22, Position 32.' This image was geometrically corrected to make the horizon appear flat. 5. 3D Printed Shelby Cobra SciTech Connect Love, Lonnie 2015-01-09 ORNL's newly printed 3D Shelby Cobra was showcased at the 2015 NAIAS in Detroit. This "laboratory on wheels" uses the Shelby Cobra design, celebrating the 50th anniversary of this model and honoring the first vehicle to be voted a national monument. The Shelby was printed at the Department of Energy’s Manufacturing Demonstration Facility at ORNL using the BAAM (Big Area Additive Manufacturing) machine and is intended as a “plug-n-play” laboratory on wheels. The Shelby will allow research and development of integrated components to be tested and enhanced in real time, improving the use of sustainable, digital manufacturing solutions in the automotive industry. 6. LIME: 3D visualisation and interpretation of virtual geoscience models Buckley, Simon; Ringdal, Kari; Dolva, Benjamin; Naumann, Nicole; Kurz, Tobias 2017-04-01 Three-dimensional and photorealistic acquisition of surface topography, using methods such as laser scanning and photogrammetry, has become widespread across the geosciences over the last decade. With recent innovations in photogrammetric processing software, robust and automated data capture hardware, and novel sensor platforms, including unmanned aerial vehicles, obtaining 3D representations of exposed topography has never been easier. In addition to 3D datasets, fusion of surface geometry with imaging sensors, such as multi/hyperspectral, thermal and ground-based InSAR, and geophysical methods, create novel and highly visual datasets that provide a fundamental spatial framework to address open geoscience research questions. Although data capture and processing routines are becoming well-established and widely reported in the scientific literature, challenges remain related to the analysis, co-visualisation and presentation of 3D photorealistic models, especially for new users (e.g. students and scientists new to geomatics methods). Interpretation and measurement is essential for quantitative analysis of 3D datasets, and qualitative methods are valuable for presentation purposes, for planning and in education. Motivated by this background, the current contribution presents LIME, a lightweight and high performance 3D software for interpreting and co-visualising 3D models and related image data in geoscience applications. The software focuses on novel data integration and visualisation of 3D topography with image sources such as hyperspectral imagery, logs and interpretation panels, geophysical datasets and georeferenced maps and images. High quality visual output can be generated for dissemination purposes, to aid researchers with communication of their research results. The background of the software is described and case studies from outcrop geology, in hyperspectral mineral mapping and geophysical-geospatial data integration are used to showcase the novel 7. 3D Printed Bionic Ears PubMed Central Mannoor, Manu S.; Jiang, Ziwen; James, Teena; Kong, Yong Lin; Malatesta, Karen A.; Soboyejo, Winston O.; Verma, Naveen; Gracias, David H.; McAlpine, Michael C. 2013-01-01 The ability to three-dimensionally interweave biological tissue with functional electronics could enable the creation of bionic organs possessing enhanced functionalities over their human counterparts. Conventional electronic devices are inherently two-dimensional, preventing seamless multidimensional integration with synthetic biology, as the processes and materials are very different. Here, we present a novel strategy for overcoming these difficulties via additive manufacturing of biological cells with structural and nanoparticle derived electronic elements. As a proof of concept, we generated a bionic ear via 3D printing of a cell-seeded hydrogel matrix in the precise anatomic geometry of a human ear, along with an intertwined conducting polymer consisting of infused silver nanoparticles. This allowed for in vitro culturing of cartilage tissue around an inductive coil antenna in the ear, which subsequently enables readout of inductively-coupled signals from cochlea-shaped electrodes. The printed ear exhibits enhanced auditory sensing for radio frequency reception, and complementary left and right ears can listen to stereo audio music. Overall, our approach suggests a means to intricately merge biologic and nanoelectronic functionalities via 3D printing. PMID:23635097 8. 3D Printable Graphene Composite PubMed Central Wei, Xiaojun; Li, Dong; Jiang, Wei; Gu, Zheming; Wang, Xiaojuan; Zhang, Zengxing; Sun, Zhengzong 2015-01-01 In human being’s history, both the Iron Age and Silicon Age thrived after a matured massive processing technology was developed. Graphene is the most recent superior material which could potentially initialize another new material Age. However, while being exploited to its full extent, conventional processing methods fail to provide a link to today’s personalization tide. New technology should be ushered in. Three-dimensional (3D) printing fills the missing linkage between graphene materials and the digital mainstream. Their alliance could generate additional stream to push the graphene revolution into a new phase. Here we demonstrate for the first time, a graphene composite, with a graphene loading up to 5.6 wt%, can be 3D printable into computer-designed models. The composite’s linear thermal coefficient is below 75 ppm·°C−1 from room temperature to its glass transition temperature (Tg), which is crucial to build minute thermal stress during the printing process. PMID:26153673 9. 3D Printable Graphene Composite Wei, Xiaojun; Li, Dong; Jiang, Wei; Gu, Zheming; Wang, Xiaojuan; Zhang, Zengxing; Sun, Zhengzong 2015-07-01 In human being’s history, both the Iron Age and Silicon Age thrived after a matured massive processing technology was developed. Graphene is the most recent superior material which could potentially initialize another new material Age. However, while being exploited to its full extent, conventional processing methods fail to provide a link to today’s personalization tide. New technology should be ushered in. Three-dimensional (3D) printing fills the missing linkage between graphene materials and the digital mainstream. Their alliance could generate additional stream to push the graphene revolution into a new phase. Here we demonstrate for the first time, a graphene composite, with a graphene loading up to 5.6 wt%, can be 3D printable into computer-designed models. The composite’s linear thermal coefficient is below 75 ppm·°C-1 from room temperature to its glass transition temperature (Tg), which is crucial to build minute thermal stress during the printing process. 10. 3D medical thermography device 2015-05-01 In this paper, a novel handheld 3D medical thermography system is introduced. The proposed system consists of a thermal-infrared camera, a color camera and a depth camera rigidly attached in close proximity and mounted on an ergonomic handle. As a practitioner holding the device smoothly moves it around the human body parts, the proposed system generates and builds up a precise 3D thermogram model by incorporating information from each new measurement in real-time. The data is acquired in motion, thus it provides multiple points of view. When processed, these multiple points of view are adaptively combined by taking into account the reliability of each individual measurement which can vary due to a variety of factors such as angle of incidence, distance between the device and the subject and environmental sensor data or other factors influencing a confidence of the thermal-infrared data when captured. Finally, several case studies are presented to support the usability and performance of the proposed system. 11. 3D acoustic atmospheric tomography Rogers, Kevin; Finn, Anthony 2014-10-01 This paper presents a method for tomographically reconstructing spatially varying 3D atmospheric temperature profiles and wind velocity fields based. Measurements of the acoustic signature measured onboard a small Unmanned Aerial Vehicle (UAV) are compared to ground-based observations of the same signals. The frequency-shifted signal variations are then used to estimate the acoustic propagation delay between the UAV and the ground microphones, which are also affected by atmospheric temperature and wind speed vectors along each sound ray path. The wind and temperature profiles are modelled as the weighted sum of Radial Basis Functions (RBFs), which also allow local meteorological measurements made at the UAV and ground receivers to supplement any acoustic observations. Tomography is used to provide a full 3D reconstruction/visualisation of the observed atmosphere. The technique offers observational mobility under direct user control and the capacity to monitor hazardous atmospheric environments, otherwise not justifiable on the basis of cost or risk. This paper summarises the tomographic technique and reports on the results of simulations and initial field trials. The technique has practical applications for atmospheric research, sound propagation studies, boundary layer meteorology, air pollution measurements, analysis of wind shear, and wind farm surveys. 12. 3D printed bionic ears. PubMed Mannoor, Manu S; Jiang, Ziwen; James, Teena; Kong, Yong Lin; Malatesta, Karen A; Soboyejo, Winston O; Verma, Naveen; Gracias, David H; McAlpine, Michael C 2013-06-12 The ability to three-dimensionally interweave biological tissue with functional electronics could enable the creation of bionic organs possessing enhanced functionalities over their human counterparts. Conventional electronic devices are inherently two-dimensional, preventing seamless multidimensional integration with synthetic biology, as the processes and materials are very different. Here, we present a novel strategy for overcoming these difficulties via additive manufacturing of biological cells with structural and nanoparticle derived electronic elements. As a proof of concept, we generated a bionic ear via 3D printing of a cell-seeded hydrogel matrix in the anatomic geometry of a human ear, along with an intertwined conducting polymer consisting of infused silver nanoparticles. This allowed for in vitro culturing of cartilage tissue around an inductive coil antenna in the ear, which subsequently enables readout of inductively-coupled signals from cochlea-shaped electrodes. The printed ear exhibits enhanced auditory sensing for radio frequency reception, and complementary left and right ears can listen to stereo audio music. Overall, our approach suggests a means to intricately merge biologic and nanoelectronic functionalities via 3D printing. 13. 3D structured illumination microscopy Dougherty, William M.; Goodwin, Paul C. 2011-03-01 Three-dimensional structured illumination microscopy achieves double the lateral and axial resolution of wide-field microscopy, using conventional fluorescent dyes, proteins and sample preparation techniques. A three-dimensional interference-fringe pattern excites the fluorescence, filling in the "missing cone" of the wide field optical transfer function, thereby enabling axial (z) discrimination. The pattern acts as a spatial carrier frequency that mixes with the higher spatial frequency components of the image, which usually succumb to the diffraction limit. The fluorescence image encodes the high frequency content as a down-mixed, moiré-like pattern. A series of images is required, wherein the 3D pattern is shifted and rotated, providing down-mixed data for a system of linear equations. Super-resolution is obtained by solving these equations. The speed with which the image series can be obtained can be a problem for the microscopy of living cells. Challenges include pattern-switching speeds, optical efficiency, wavefront quality and fringe contrast, fringe pitch optimization, and polarization issues. We will review some recent developments in 3D-SIM hardware with the goal of super-resolved z-stacks of motile cells. 14. Martian terrain & airbags - 3D NASA Technical Reports Server (NTRS) 1997-01-01 Portions of the lander's deflated airbags and a petal are at lower left in this image, taken in stereo by the Imager for Mars Pathfinder (IMP) on Sol 3. 3D glasses are necessary to identify surface detail. This image is part of a 3D 'monster' panorama of the area surrounding the landing site. Mars Pathfinder is the second in NASA's Discovery program of low-cost spacecraft with highly focused science goals. The Jet Propulsion Laboratory, Pasadena, CA, developed and manages the Mars Pathfinder mission for NASA's Office of Space Science, Washington, D.C. JPL is an operating division of the California Institute of Technology (Caltech). The Imager for Mars Pathfinder (IMP) was developed by the University of Arizona Lunar and Planetary Laboratory under contract to JPL. Peter Smith is the Principal Investigator. Click below to see the left and right views individually. [figure removed for brevity, see original site] Left [figure removed for brevity, see original site] Right 15. Martian terrain & airbags - 3D NASA Technical Reports Server (NTRS) 1997-01-01 Portions of the lander's deflated airbags and a petal are at the lower area of this image, taken in stereo by the Imager for Mars Pathfinder (IMP) on Sol 3. 3D glasses are necessary to identify surface detail. This image is part of a 3D 'monster' panorama of the area surrounding the landing site. Mars Pathfinder is the second in NASA's Discovery program of low-cost spacecraft with highly focused science goals. The Jet Propulsion Laboratory, Pasadena, CA, developed and manages the Mars Pathfinder mission for NASA's Office of Space Science, Washington, D.C. JPL is an operating division of the California Institute of Technology (Caltech). The Imager for Mars Pathfinder (IMP) was developed by the University of Arizona Lunar and Planetary Laboratory under contract to JPL. Peter Smith is the Principal Investigator. Click below to see the left and right views individually. [figure removed for brevity, see original site] Left [figure removed for brevity, see original site] Right 16. Martian terrain & airbags - 3D NASA Technical Reports Server (NTRS) 1997-01-01 Portions of the lander's deflated airbags and a petal are at lower left in this image, taken in stereo by the Imager for Mars Pathfinder (IMP) on Sol 3. 3D glasses are necessary to identify surface detail. This image is part of a 3D 'monster' panorama of the area surrounding the landing site. Mars Pathfinder is the second in NASA's Discovery program of low-cost spacecraft with highly focused science goals. The Jet Propulsion Laboratory, Pasadena, CA, developed and manages the Mars Pathfinder mission for NASA's Office of Space Science, Washington, D.C. JPL is an operating division of the California Institute of Technology (Caltech). The Imager for Mars Pathfinder (IMP) was developed by the University of Arizona Lunar and Planetary Laboratory under contract to JPL. Peter Smith is the Principal Investigator. Click below to see the left and right views individually. [figure removed for brevity, see original site] Left [figure removed for brevity, see original site] Right 17. Martian terrain & airbags - 3D NASA Technical Reports Server (NTRS) 1997-01-01 Portions of the lander's deflated airbags and a petal are at the lower area of this image, taken in stereo by the Imager for Mars Pathfinder (IMP) on Sol 3. 3D glasses are necessary to identify surface detail. This image is part of a 3D 'monster' panorama of the area surrounding the landing site. Mars Pathfinder is the second in NASA's Discovery program of low-cost spacecraft with highly focused science goals. The Jet Propulsion Laboratory, Pasadena, CA, developed and manages the Mars Pathfinder mission for NASA's Office of Space Science, Washington, D.C. JPL is an operating division of the California Institute of Technology (Caltech). The Imager for Mars Pathfinder (IMP) was developed by the University of Arizona Lunar and Planetary Laboratory under contract to JPL. Peter Smith is the Principal Investigator. Click below to see the left and right views individually. [figure removed for brevity, see original site] Left [figure removed for brevity, see original site] Right 18. LOTT RANCH 3D PROJECT SciTech Connect Larry Lawrence; Bruce Miller 2004-09-01 The Lott Ranch 3D seismic prospect located in Garza County, Texas is a project initiated in September of 1991 by the J.M. Huber Corp., a petroleum exploration and production company. By today's standards the 126 square mile project does not seem monumental, however at the time it was conceived it was the most intensive land 3D project ever attempted. Acquisition began in September of 1991 utilizing GEO-SEISMIC, INC., a seismic data contractor. The field parameters were selected by J.M. Huber, and were of a radical design. The recording instruments used were GeoCor IV amplifiers designed by Geosystems Inc., which record the data in signed bit format. It would not have been practical, if not impossible, to have processed the entire raw volume with the tools available at that time. The end result was a dataset that was thought to have little utility due to difficulties in processing the field data. In 1997, Yates Energy Corp. located in Roswell, New Mexico, formed a partnership to further develop the project. Through discussions and meetings with Pinnacle Seismic, it was determined that the original Lott Ranch 3D volume could be vastly improved upon reprocessing. Pinnacle Seismic had shown the viability of improving field-summed signed bit data on smaller 2D and 3D projects. Yates contracted Pinnacle Seismic Ltd. to perform the reprocessing. This project was initiated with high resolution being a priority. Much of the potential resolution was lost through the initial summing of the field data. Modern computers that are now being utilized have tremendous speed and storage capacities that were cost prohibitive when this data was initially processed. Software updates and capabilities offer a variety of quality control and statics resolution, which are pertinent to the Lott Ranch project. The reprocessing effort was very successful. The resulting processed data-set was then interpreted using modern PC-based interpretation and mapping software. Production data, log data 19. Ideal Positions: 3D Sonography, Medical Visuality, Popular Culture. PubMed Seiber, Tim 2016-03-01 As digital technologies are integrated into medical environments, they continue to transform the experience of contemporary health care. Importantly, medicine is increasingly visual. In the history of sonography, visibility has played an important role in accessing fetal bodies for diagnostic and entertainment purposes. With the advent of three-dimensional (3D) rendering, sonography presents the fetus visually as already a child. The aesthetics of this process and the resulting imagery, made possible in digital networks, discloses important changes in the relationship between technology and biology, reproductive health and political debates, and biotechnology and culture. 20. 3-D HYDRODYNAMIC MODELING IN A GEOSPATIAL FRAMEWORK SciTech Connect Bollinger, J; Alfred Garrett, A; Larry Koffman, L; David Hayes, D 2006-08-24 3-D hydrodynamic models are used by the Savannah River National Laboratory (SRNL) to simulate the transport of thermal and radionuclide discharges in coastal estuary systems. Development of such models requires accurate bathymetry, coastline, and boundary condition data in conjunction with the ability to rapidly discretize model domains and interpolate the required geospatial data onto the domain. To facilitate rapid and accurate hydrodynamic model development, SRNL has developed a pre- and post-processor application in a geospatial framework to automate the creation of models using existing data. This automated capability allows development of very detailed models to maximize exploitation of available surface water radionuclide sample data and thermal imagery. 1. 3D endoscopic imaging using structured illumination technique (Conference Presentation) Le, Hanh N. D.; Nguyen, Hieu; Wang, Zhaoyang; Kang, Jin U. 2017-02-01 Surgeons have been increasingly relying on minimally invasive surgical guidance techniques not only to reduce surgical trauma but also to achieve accurate and objective surgical risk evaluations. A typical minimally invasive surgical guidance system provides visual assistance in two-dimensional anatomy and pathology of internal organ within a limited field of view. In this work, we propose and implement a structure illumination endoscope to provide a simple, inexpensive 3D endoscopic imaging to conduct high resolution 3D imagery for use in surgical guidance system. The system is calibrated and validated for quantitative depth measurement in both calibrated target and human subject. The system exhibits a depth of field of 20 mm, depth resolution of 0.2mm and a relative accuracy of 0.1%. The demonstrated setup affirms the feasibility of using the structured illumination endoscope for depth quantization and assisting medical diagnostic assessments 2. High-definition 3D display for training applications Pezzaniti, J. Larry; Edmondson, Richard; Vaden, Justin; Hyatt, Brian; Morris, James; Chenault, David; Tchon, Joe; Barnidge, Tracy 2010-04-01 In this paper, we report on the development of a high definition stereoscopic liquid crystal display for use in training applications. The display technology provides full spatial and temporal resolution on a liquid crystal display panel consisting of 1920×1200 pixels at 60 frames per second. Display content can include mixed 2D and 3D data. Source data can be 3D video from cameras, computer generated imagery, or fused data from a variety of sensor modalities. Discussion of the use of this display technology in military and medical industries will be included. Examples of use in simulation and training for robot tele-operation, helicopter landing, surgical procedures, and vehicle repair, as well as for DoD mission rehearsal will be presented. 3. 3D Printed Shelby Cobra ScienceCinema Love, Lonnie 2016-11-02 ORNL's newly printed 3D Shelby Cobra was showcased at the 2015 NAIAS in Detroit. This "laboratory on wheels" uses the Shelby Cobra design, celebrating the 50th anniversary of this model and honoring the first vehicle to be voted a national monument. The Shelby was printed at the Department of Energy’s Manufacturing Demonstration Facility at ORNL using the BAAM (Big Area Additive Manufacturing) machine and is intended as a “plug-n-play” laboratory on wheels. The Shelby will allow research and development of integrated components to be tested and enhanced in real time, improving the use of sustainable, digital manufacturing solutions in the automotive industry. 4. Quasi 3D dispersion experiment Bakucz, P. 2003-04-01 This paper studies the problem of tracer dispersion in a coloured fluid flowing through a two-phase 3D rough channel-system in a 40 cm*40 cm plexi-container filled by homogen glass fractions and colourless fluid. The unstable interface between the driving coloured fluid and the colourless fluid develops viscous fingers with a fractal structure at high capillary number. Five two-dimensional fractal fronts have been observed at the same time using four cameras along the vertical side-walls and using one camera located above the plexi-container. In possession of five fronts the spatial concentration contours are determined using statistical models. The concentration contours are self-affine fractal curves with a fractal dimension D=2.19. This result is valid for disperison at high Péclet numbers. 5. ShowMe3D SciTech Connect Sinclair, Michael B 2012-01-05 ShowMe3D is a data visualization graphical user interface specifically designed for use with hyperspectral image obtained from the Hyperspectral Confocal Microscope. The program allows the user to select and display any single image from a three dimensional hyperspectral image stack. By moving a slider control, the user can easily move between images of the stack. The user can zoom into any region of the image. The user can select any pixel or region from the displayed image and display the fluorescence spectrum associated with that pixel or region. The user can define up to 3 spectral filters to apply to the hyperspectral image and view the image as it would appear from a filter-based confocal microscope. The user can also obtain statistics such as intensity average and variance from selected regions. 6. Supernova Remnant in 3-D NASA Technical Reports Server (NTRS) 2009-01-01 wavelengths. Since the amount of the wavelength shift is related to the speed of motion, one can determine how fast the debris are moving in either direction. Because Cas A is the result of an explosion, the stellar debris is expanding radially outwards from the explosion center. Using simple geometry, the scientists were able to construct a 3-D model using all of this information. A program called 3-D Slicer modified for astronomical use by the Astronomical Medicine Project at Harvard University in Cambridge, Mass. was used to display and manipulate the 3-D model. Commercial software was then used to create the 3-D fly-through. The blue filaments defining the blast wave were not mapped using the Doppler effect because they emit a different kind of light synchrotron radiation that does not emit light at discrete wavelengths, but rather in a broad continuum. The blue filaments are only a representation of the actual filaments observed at the blast wave. This visualization shows that there are two main components to this supernova remnant: a spherical component in the outer parts of the remnant and a flattened (disk-like) component in the inner region. The spherical component consists of the outer layer of the star that exploded, probably made of helium and carbon. These layers drove a spherical blast wave into the diffuse gas surrounding the star. The flattened component that astronomers were unable to map into 3-D prior to these Spitzer observations consists of the inner layers of the star. It is made from various heavier elements, not all shown in the visualization, such as oxygen, neon, silicon, sulphur, argon and iron. High-velocity plumes, or jets, of this material are shooting out from the explosion in the plane of the disk-like component mentioned above. Plumes of silicon appear in the northeast and southwest, while those of iron are seen in the southeast and north. These jets were already known and Doppler velocity measurements have been made for these 7. Supernova Remnant in 3-D NASA Technical Reports Server (NTRS) 2009-01-01 wavelengths. Since the amount of the wavelength shift is related to the speed of motion, one can determine how fast the debris are moving in either direction. Because Cas A is the result of an explosion, the stellar debris is expanding radially outwards from the explosion center. Using simple geometry, the scientists were able to construct a 3-D model using all of this information. A program called 3-D Slicer modified for astronomical use by the Astronomical Medicine Project at Harvard University in Cambridge, Mass. was used to display and manipulate the 3-D model. Commercial software was then used to create the 3-D fly-through. The blue filaments defining the blast wave were not mapped using the Doppler effect because they emit a different kind of light synchrotron radiation that does not emit light at discrete wavelengths, but rather in a broad continuum. The blue filaments are only a representation of the actual filaments observed at the blast wave. This visualization shows that there are two main components to this supernova remnant: a spherical component in the outer parts of the remnant and a flattened (disk-like) component in the inner region. The spherical component consists of the outer layer of the star that exploded, probably made of helium and carbon. These layers drove a spherical blast wave into the diffuse gas surrounding the star. The flattened component that astronomers were unable to map into 3-D prior to these Spitzer observations consists of the inner layers of the star. It is made from various heavier elements, not all shown in the visualization, such as oxygen, neon, silicon, sulphur, argon and iron. High-velocity plumes, or jets, of this material are shooting out from the explosion in the plane of the disk-like component mentioned above. Plumes of silicon appear in the northeast and southwest, while those of iron are seen in the southeast and north. These jets were already known and Doppler velocity measurements have been made for these 8. 3D Printing of Graphene Aerogels. PubMed Zhang, Qiangqiang; Zhang, Feng; Medarametla, Sai Pradeep; Li, Hui; Zhou, Chi; Lin, Dong 2016-04-06 3D printing of a graphene aerogel with true 3D overhang structures is highlighted. The aerogel is fabricated by combining drop-on-demand 3D printing and freeze casting. The water-based GO ink is ejected and freeze-cast into designed 3D structures. The lightweight (<10 mg cm(-3) ) 3D printed graphene aerogel presents superelastic and high electrical conduction. 9. Oblique Photogrammetry Supporting 3d Urban Reconstruction of Complex Scenarios Toschi, I.; Ramos, M. M.; Nocerino, E.; Menna, F.; Remondino, F.; Moe, K.; Poli, D.; Legat, K.; Fassi, F. 2017-05-01 Accurate 3D city models represent an important source of geospatial information to support various "smart city" applications, such as space management, energy assessment, 3D cartography, noise and pollution mapping as well as disaster management. Even though remarkable progress has been made in recent years, there are still many open issues, especially when it comes to the 3D modelling of complex urban scenarios like historical and densely-built city centres featuring narrow streets and non-conventional building shapes. Most approaches introduce strong building priors/constraints on symmetry and roof typology that penalize urban environments having high variations of roof shapes. Furthermore, although oblique photogrammetry is rapidly maturing, the use of slanted views for façade reconstruction is not completely included in the reconstruction pipeline of state-of-the-art software. This paper aims to investigate state-of-the-art methods for 3D building modelling in complex urban scenarios with the support of oblique airborne images. A reconstruction approach based on roof primitives fitting is tested. Oblique imagery is then exploited to support the manual editing of the generated building models. At the same time, mobile mapping data are collected at cm resolution and then integrated with the aerial ones. All approaches are tested on the historical city centre of Bergamo (Italy). 10. 3D Structure of Tillage Soils González-Torre, Iván; Losada, Juan Carlos; Falconer, Ruth; Hapca, Simona; Tarquis, Ana M. 2015-04-01 Soil structure may be defined as the spatial arrangement of soil particles, aggregates and pores. The geometry of each one of these elements, as well as their spatial arrangement, has a great influence on the transport of fluids and solutes through the soil. Fractal/Multifractal methods have been increasingly applied to quantify soil structure thanks to the advances in computer technology (Tarquis et al., 2003). There is no doubt that computed tomography (CT) has provided an alternative for observing intact soil structure. These CT techniques reduce the physical impact to sampling, providing three-dimensional (3D) information and allowing rapid scanning to study sample dynamics in near real-time (Houston et al., 2013a). However, several authors have dedicated attention to the appropriate pore-solid CT threshold (Elliot and Heck, 2007; Houston et al., 2013b) and the better method to estimate the multifractal parameters (Grau et al., 2006; Tarquis et al., 2009). The aim of the present study is to evaluate the effect of the algorithm applied in the multifractal method (box counting and box gliding) and the cube size on the calculation of generalized fractal dimensions (Dq) in grey images without applying any threshold. To this end, soil samples were extracted from different areas plowed with three tools (moldboard, chissel and plow). Soil samples for each of the tillage treatment were packed into polypropylene cylinders of 8 cm diameter and 10 cm high. These were imaged using an mSIMCT at 155keV and 25 mA. An aluminium filter (0.25 mm) was applied to reduce beam hardening and later several corrections where applied during reconstruction. References Elliot, T.R. and Heck, R.J. 2007. A comparison of 2D and 3D thresholding of CT imagery. Can. J. Soil Sci., 87(4), 405-412. Grau, J, Médez, V.; Tarquis, A.M., Saa, A. and Díaz, M.C.. 2006. Comparison of gliding box and box-counting methods in soil image analysis. Geoderma, 134, 349-359. González-Torres, Iván. Theory and 11. Improvement of the signal-to-noise ratio in static-mode down-looking synthetic aperture imaging ladar Lu, Zhiyong; Sun, Jianfeng; Zhang, Ning; Zhou, Yu; Cai, Guangyu; Liu, Liren 2015-09-01 The static-mode down-looking synthetic aperture imaging ladar (SAIL) can keep the target and carrying-platform still during the collection process. Improvement of the signal-to-noise ratio in static-mode down-looking SAIL is investigated. The signal-to-noise ratio is improved by increasing scanning time and sampling rate in static-mode down-looking SAIL. In the experiment, the targets are reconstructed in different scanning time and different sampling rate. As the increasing of the scanning time and sampling rate, the reconstructed images become clearer. These techniques have a great potential for applications in extensive synthetic aperture imaging ladar fields. 12. 3-D visualization and animation technologies in anatomical imaging PubMed Central McGhee, John 2010-01-01 This paper explores a 3-D computer artist’s approach to the creation of three-dimensional computer-generated imagery (CGI) derived from clinical scan data. Interpretation of scientific imagery, such as magnetic resonance imaging (MRI), is restricted to the eye of the trained medical practitioner in a clinical or scientific context. In the research work described here, MRI data are visualized and interpreted by a 3-D computer artist using the tools of the digital animator to navigate image complexity and widen interaction. In this process, the artefact moves across disciplines; it is no longer tethered to its diagnostic origins. It becomes an object that has visual attributes such as light, texture and composition, and a visual aesthetic of its own. The introduction of these visual attributes provides a platform for improved accessibility by a lay audience. The paper argues that this more artisan approach to clinical data visualization has a potential real-world application as a communicative tool for clinicians and patients during consultation. PMID:20002229 13. 3-D visualization and animation technologies in anatomical imaging. PubMed McGhee, John 2010-02-01 This paper explores a 3-D computer artist's approach to the creation of three-dimensional computer-generated imagery (CGI) derived from clinical scan data. Interpretation of scientific imagery, such as magnetic resonance imaging (MRI), is restricted to the eye of the trained medical practitioner in a clinical or scientific context. In the research work described here, MRI data are visualized and interpreted by a 3-D computer artist using the tools of the digital animator to navigate image complexity and widen interaction. In this process, the artefact moves across disciplines; it is no longer tethered to its diagnostic origins. It becomes an object that has visual attributes such as light, texture and composition, and a visual aesthetic of its own. The introduction of these visual attributes provides a platform for improved accessibility by a lay audience. The paper argues that this more artisan approach to clinical data visualization has a potential real-world application as a communicative tool for clinicians and patients during consultation. 14. 3D ultrafast laser scanner Mahjoubfar, A.; Goda, K.; Wang, C.; Fard, A.; Adam, J.; Gossett, D. R.; Ayazi, A.; Sollier, E.; Malik, O.; Chen, E.; Liu, Y.; Brown, R.; Sarkhosh, N.; Di Carlo, D.; Jalali, B. 2013-03-01 Laser scanners are essential for scientific research, manufacturing, defense, and medical practice. Unfortunately, often times the speed of conventional laser scanners (e.g., galvanometric mirrors and acousto-optic deflectors) falls short for many applications, resulting in motion blur and failure to capture fast transient information. Here, we present a novel type of laser scanner that offers roughly three orders of magnitude higher scan rates than conventional methods. Our laser scanner, which we refer to as the hybrid dispersion laser scanner, performs inertia-free laser scanning by dispersing a train of broadband pulses both temporally and spatially. More specifically, each broadband pulse is temporally processed by time stretch dispersive Fourier transform and further dispersed into space by one or more diffractive elements such as prisms and gratings. As a proof-of-principle demonstration, we perform 1D line scans at a record high scan rate of 91 MHz and 2D raster scans and 3D volumetric scans at an unprecedented scan rate of 105 kHz. The method holds promise for a broad range of scientific, industrial, and biomedical applications. To show the utility of our method, we demonstrate imaging, nanometer-resolved surface vibrometry, and high-precision flow cytometry with real-time throughput that conventional laser scanners cannot offer due to their low scan rates. 15. 3D multiplexed immunoplasmonics microscopy Bergeron, Éric; Patskovsky, Sergiy; Rioux, David; Meunier, Michel 2016-07-01 Selective labelling, identification and spatial distribution of cell surface biomarkers can provide important clinical information, such as distinction between healthy and diseased cells, evolution of a disease and selection of the optimal patient-specific treatment. Immunofluorescence is the gold standard for efficient detection of biomarkers expressed by cells. However, antibodies (Abs) conjugated to fluorescent dyes remain limited by their photobleaching, high sensitivity to the environment, low light intensity, and wide absorption and emission spectra. Immunoplasmonics is a novel microscopy method based on the visualization of Abs-functionalized plasmonic nanoparticles (fNPs) targeting cell surface biomarkers. Tunable fNPs should provide higher multiplexing capacity than immunofluorescence since NPs are photostable over time, strongly scatter light at their plasmon peak wavelengths and can be easily functionalized. In this article, we experimentally demonstrate accurate multiplexed detection based on the immunoplasmonics approach. First, we achieve the selective labelling of three targeted cell surface biomarkers (cluster of differentiation 44 (CD44), epidermal growth factor receptor (EGFR) and voltage-gated K+ channel subunit KV1.1) on human cancer CD44+ EGFR+ KV1.1+ MDA-MB-231 cells and reference CD44- EGFR- KV1.1+ 661W cells. The labelling efficiency with three stable specific immunoplasmonics labels (functionalized silver nanospheres (CD44-AgNSs), gold (Au) NSs (EGFR-AuNSs) and Au nanorods (KV1.1-AuNRs)) detected by reflected light microscopy (RLM) is similar to the one with immunofluorescence. Second, we introduce an improved method for 3D localization and spectral identification of fNPs based on fast z-scanning by RLM with three spectral filters corresponding to the plasmon peak wavelengths of the immunoplasmonics labels in the cellular environment (500 nm for 80 nm AgNSs, 580 nm for 100 nm AuNSs and 700 nm for 40 nm × 92 nm AuNRs). Third, the developed 16. 3D Kitaev spin liquids Hermanns, Maria The Kitaev honeycomb model has become one of the archetypal spin models exhibiting topological phases of matter, where the magnetic moments fractionalize into Majorana fermions interacting with a Z2 gauge field. In this talk, we discuss generalizations of this model to three-dimensional lattice structures. Our main focus is the metallic state that the emergent Majorana fermions form. In particular, we discuss the relation of the nature of this Majorana metal to the details of the underlying lattice structure. Besides (almost) conventional metals with a Majorana Fermi surface, one also finds various realizations of Dirac semi-metals, where the gapless modes form Fermi lines or even Weyl nodes. We introduce a general classification of these gapless quantum spin liquids using projective symmetry analysis. Furthermore, we briefly outline why these Majorana metals in 3D Kitaev systems provide an even richer variety of Dirac and Weyl phases than possible for electronic matter and comment on possible experimental signatures. Work done in collaboration with Kevin O'Brien and Simon Trebst. 17. Crowdsourcing Based 3d Modeling Somogyi, A.; Barsi, A.; Molnar, B.; Lovas, T. 2016-06-01 Web-based photo albums that support organizing and viewing the users' images are widely used. These services provide a convenient solution for storing, editing and sharing images. In many cases, the users attach geotags to the images in order to enable using them e.g. in location based applications on social networks. Our paper discusses a procedure that collects open access images from a site frequently visited by tourists. Geotagged pictures showing the image of a sight or tourist attraction are selected and processed in photogrammetric processing software that produces the 3D model of the captured object. For the particular investigation we selected three attractions in Budapest. To assess the geometrical accuracy, we used laser scanner and DSLR as well as smart phone photography to derive reference values to enable verifying the spatial model obtained from the web-album images. The investigation shows how detailed and accurate models could be derived applying photogrammetric processing software, simply by using images of the community, without visiting the site. 18. NIF Ignition Target 3D Point Design SciTech Connect Jones, O; Marinak, M; Milovich, J; Callahan, D 2008-11-05 We have developed an input file for running 3D NIF hohlraums that is optimized such that it can be run in 1-2 days on parallel computers. We have incorporated increasing levels of automation into the 3D input file: (1) Configuration controlled input files; (2) Common file for 2D and 3D, different types of capsules (symcap, etc.); and (3) Can obtain target dimensions, laser pulse, and diagnostics settings automatically from NIF Campaign Management Tool. Using 3D Hydra calculations to investigate different problems: (1) Intrinsic 3D asymmetry; (2) Tolerance to nonideal 3D effects (e.g. laser power balance, pointing errors); and (3) Synthetic diagnostics. 19. 3D reconstruction of a tree stem using video images and pulse distances Treesearch N. E. Clark 2002-01-01 This paper demonstrates how a 3D tree stem model can be reconstructed using video imagery combined with laser pulse distance measurements. Perspective projection is used to place the data collected with the portable video laser-rangefinding device into a real world coordinate system. This hybrid methodology uses a relatively small number of range measurements (compared... 20. 3-D Cavern Enlargement Analyses SciTech Connect EHGARTNER, BRIAN L.; SOBOLIK, STEVEN R. 2002-03-01 Three-dimensional finite element analyses simulate the mechanical response of enlarging existing caverns at the Strategic Petroleum Reserve (SPR). The caverns are located in Gulf Coast salt domes and are enlarged by leaching during oil drawdowns as fresh water is injected to displace the crude oil from the caverns. The current criteria adopted by the SPR limits cavern usage to 5 drawdowns (leaches). As a base case, 5 leaches were modeled over a 25 year period to roughly double the volume of a 19 cavern field. Thirteen additional leaches where then simulated until caverns approached coalescence. The cavern field approximated the geometries and geologic properties found at the West Hackberry site. This enabled comparisons are data collected over nearly 20 years to analysis predictions. The analyses closely predicted the measured surface subsidence and cavern closure rates as inferred from historic well head pressures. This provided the necessary assurance that the model displacements, strains, and stresses are accurate. However, the cavern field has not yet experienced the large scale drawdowns being simulated. Should they occur in the future, code predictions should be validated with actual field behavior at that time. The simulations were performed using JAS3D, a three dimensional finite element analysis code for nonlinear quasi-static solids. The results examine the impacts of leaching and cavern workovers, where internal cavern pressures are reduced, on surface subsidence, well integrity, and cavern stability. The results suggest that the current limit of 5 oil drawdowns may be extended with some mitigative action required on the wells and later on to surface structure due to subsidence strains. The predicted stress state in the salt shows damage to start occurring after 15 drawdowns with significant failure occurring at the 16th drawdown, well beyond the current limit of 5 drawdowns. 1. America's National Parks 3d (4) Atmospheric Science Data Center 2017-04-11 article title:  America's National Parks Viewed in 3D by NASA's MISR (Anaglyph 4)   ... four new anaglyphs that showcase 33 of our nation's national parks, monuments, historical sites and recreation areas in glorious 3D.   ... 2. America's National Parks 3d (3) Atmospheric Science Data Center 2016-12-30 article title:  America's National Parks Viewed in 3D by NASA's MISR (Anaglyph 3)   ... four new anaglyphs that showcase 33 of our nation's national parks, monuments, historical sites and recreation areas in glorious 3D.   ... 3. America's National Parks 3d (2) Atmospheric Science Data Center 2016-12-30 article title:  America's National Parks Viewed in 3D by NASA's MISR (Anaglyph 2)   ... four new anaglyphs that showcase 33 of our nation's national parks, monuments, historical sites and recreation areas in glorious 3D.   ... 4. America's National Parks 3d (1) Atmospheric Science Data Center 2016-12-30 article title:  America's National Parks Viewed in 3D by NASA's MISR (Anaglyph 1)   ... four new anaglyphs that showcase 33 of our nation's national parks, monuments, historical sites and recreation areas in glorious 3D.   ... 5. 3D ultrasound in fetal spina bifida. PubMed Schramm, T; Gloning, K-P; Minderer, S; Tutschek, B 2008-12-01 3D ultrasound can be used to study the fetal spine, but skeletal mode can be inconclusive for the diagnosis of fetal spina bifida. We illustrate a diagnostic approach using 2D and 3D ultrasound and indicate possible pitfalls. 6. An interactive multiview 3D display system Zhang, Zhaoxing; Geng, Zheng; Zhang, Mei; Dong, Hui 2013-03-01 The progresses in 3D display systems and user interaction technologies will help more effective 3D visualization of 3D information. They yield a realistic representation of 3D objects and simplifies our understanding to the complexity of 3D objects and spatial relationship among them. In this paper, we describe an autostereoscopic multiview 3D display system with capability of real-time user interaction. Design principle of this autostereoscopic multiview 3D display system is presented, together with the details of its hardware/software architecture. A prototype is built and tested based upon multi-projectors and horizontal optical anisotropic display structure. Experimental results illustrate the effectiveness of this novel 3D display and user interaction system. 7. [3D emulation of epicardium dynamic mapping]. PubMed Lu, Jun; Yang, Cui-Wei; Fang, Zu-Xiang 2005-03-01 In order to realize epicardium dynamic mapping of the whole atria, 3-D graphics are drawn with OpenGL. Some source codes are introduced in the paper to explain how to produce, read, and manipulate 3-D model data. 8. LADAR performance simulations with a high spectral resolution atmospheric transmittance and radiance model: LEEDR Roth, Benjamin D.; Fiorino, Steven T. 2012-06-01 In this study of atmospheric effects on Geiger Mode laser ranging and detection (LADAR), the parameter space is explored primarily using the Air Force Institute of Technology Center for Directed Energy's (AFIT/CDE) Laser Environmental Effects Definition and Reference (LEEDR) code. The expected performance of LADAR systems is assessed at operationally representative wavelengths of 1.064, 1.56 and 2.039 μm at a number of locations worldwide. Signal attenuation and background noise are characterized using LEEDR. These results are compared to standard atmosphere and Fast Atmospheric Signature Code (FASCODE) assessments. Scenarios evaluated are based on air-toground engagements including both down looking oblique and vertical geometries in which anticipated clear air aerosols are expected to occur. Engagement geometry variations are considered to determine optimum employment techniques to exploit or defeat the environmental conditions. Results, presented primarily in the form of worldwide plots of notional signal to noise ratios, show a significant climate dependence, but large variances between climatological and standard atmosphere assessments. An overall average absolute mean difference ratio of 1.03 is found when climatological signal-to-noise ratios at 40 locations are compared to their equivalent standard atmosphere assessment. Atmospheric transmission is shown to not always correlate with signal-to-noise ratios between different atmosphere profiles. Allowing aerosols to swell with relative humidity proves to be significant especially for up looking geometries reducing the signal-to-noise ratio several orders of magnitude. Turbulence blurring effects that impact tracking and imaging show that the LADAR system has little capability at a 50km range yet the turbulence has little impact at a 3km range. 9. 3-D Extensions for Trustworthy Systems DTIC Science & Technology 2011-01-01 modifications to the floor planning stage of the 3-D design flow that are necessary to support our design approach. We strongly recommend that the 3-D EDA ...and we outline problems, challenges, attacks, solutions, and topics for future research. 15. SUBJECT TERMS 16. SECURITY CLASSIFICATION OF: 17...Requirements for automated 3-D IC design tools for the physical layout of components. Since fully automated Electronic Design Automation ( EDA ) for 3-D 10. Microfabricating 3D Structures by Laser Origami DTIC Science & Technology 2011-11-09 10.1117/2.1201111.003952 Microfabricating 3D structures by laser origami Alberto Piqué, Scott Mathews, Andrew Birnbaum, and Nicholas Charipar A new...folding known as origami allows the transformation of flat patterns into 3D shapes. A similar approach can be used to generate 3D structures com...materials Figure 1. (A–C) Schematic illustrating the steps in the laser origami process and (D) a resulting folded out-of-plane 3D structure. that can 11. Laser Based 3D Volumetric Display System DTIC Science & Technology 1993-03-01 Literature, Costa Mesa, CA July 1983. 3. "A Real Time Autostereoscopic Multiplanar 3D Display System", Rodney Don Williams, Felix Garcia, Jr., Texas...8217 .- NUMBERS LASER BASED 3D VOLUMETRIC DISPLAY SYSTEM PR: CD13 0. AUTHOR(S) PE: N/AWIU: DN303151 P. Soltan, J. Trias, W. Robinson, W. Dahlke 7...laser generated 3D volumetric images on a rotating double helix, (where the 3D displays are computer controlled for group viewing with the naked eye 12. 3-D Object Recognition from Point Cloud Data Smith, W.; Walker, A. S.; Zhang, B. 2011-09-01 The market for real-time 3-D mapping includes not only traditional geospatial applications but also navigation of unmanned autonomous vehicles (UAVs). Massively parallel processes such as graphics processing unit (GPU) computing make real-time 3-D object recognition and mapping achievable. Geospatial technologies such as digital photogrammetry and GIS offer advanced capabilities to produce 2-D and 3-D static maps using UAV data. The goal is to develop real-time UAV navigation through increased automation. It is challenging for a computer to identify a 3-D object such as a car, a tree or a house, yet automatic 3-D object recognition is essential to increasing the productivity of geospatial data such as 3-D city site models. In the past three decades, researchers have used radiometric properties to identify objects in digital imagery with limited success, because these properties vary considerably from image to image. Consequently, our team has developed software that recognizes certain types of 3-D objects within 3-D point clouds. Although our software is developed for modeling, simulation and visualization, it has the potential to be valuable in robotics and UAV applications. The locations and shapes of 3-D objects such as buildings and trees are easily recognizable by a human from a brief glance at a representation of a point cloud such as terrain-shaded relief. The algorithms to extract these objects have been developed and require only the point cloud and minimal human inputs such as a set of limits on building size and a request to turn on a squaring option. The algorithms use both digital surface model (DSM) and digital elevation model (DEM), so software has also been developed to derive the latter from the former. The process continues through the following steps: identify and group 3-D object points into regions; separate buildings and houses from trees; trace region boundaries; regularize and simplify boundary polygons; construct complex roofs. Several case 13. The simulation of space-time speckle and impact based on synthetic aperture imaging ladar Xu, Qian; Liu, Liren; Zhou, Yu; Sun, Jianfeng; Wu, Yapeng 2012-10-01 In synthetic aperture imaging ladar (SAIL), spatially and temporally varied speckles are resulted from the linear wavelength chirped laser signal. The random phase and amplitude of space-time speckle is imported to heterodyne beat signal by antenna aperture integration. The numerical evolution for such an effect is presented. Our research indicates the random phase and amplitude is closely related to the ratio of antenna aperture and speckle scale. According to computer simulation results, the scale design of optical antenna aperture to reduce the image degradation is proposed. 14. Measurement of polarization parameters of the targets in synthetic aperture imaging LADAR Xu, Qian; Sun, Jianfeng; Lu, Wei; Hou, Peipei; Ma, Xiaoping; Lu, Zhiyong; Sun, Zhiwei; Liu, Liren 2015-09-01 In Synthetic aperture imaging ladar (SAIL), the polarization state change of the backscattered light will affect the imaging. Polarization state of the reflected field is always determined by the interaction of the light and the materials on the target plane. The Stokes parameters, which can provide the information on both light intensity and polarization state, are the ideal quantities for characterizing the above features. In this paper, a measurement system of the polarization characteristic for the SAIL target materials is designed. The measurement results are expected to be useful in target identification and recognition. 15. Image quality analysis and improvement of Ladar reflective tomography for space object recognition Wang, Jin-cheng; Zhou, Shi-wei; Shi, Liang; Hu, Yi-Hua; Wang, Yong 2016-01-01 Some problems in the application of Ladar reflective tomography for space object recognition are studied in this work. An analytic target model is adopted to investigate the image reconstruction properties with limited relative angle range, which are useful to verify the target shape from the incomplete image, analyze the shadowing effect of the target and design the satellite payloads against recognition via reflective tomography approach. We proposed an iterative maximum likelihood method basing on Bayesian theory, which can effectively compress the pulse width and greatly improve the image resolution of incoherent LRT system without loss of signal to noise ratio. 16. Teaching Geography with 3-D Visualization Technology ERIC Educational Resources Information Center Anthamatten, Peter; Ziegler, Susy S. 2006-01-01 Technology that helps students view images in three dimensions (3-D) can support a broad range of learning styles. "Geo-Wall systems" are visualization tools that allow scientists, teachers, and students to project stereographic images and view them in 3-D. We developed and presented 3-D visualization exercises in several undergraduate courses.… 17. Expanding Geometry Understanding with 3D Printing ERIC Educational Resources Information Center Cochran, Jill A.; Cochran, Zane; Laney, Kendra; Dean, Mandi 2016-01-01 With the rise of personal desktop 3D printing, a wide spectrum of educational opportunities has become available for educators to leverage this technology in their classrooms. Until recently, the ability to create physical 3D models was well beyond the scope, skill, and budget of many schools. However, since desktop 3D printers have become readily… 18. Imaging a Sustainable Future in 3D Schuhr, W.; Lee, J. D.; Kanngieser, E. 2012-07-01 It is the intention of this paper, to contribute to a sustainable future by providing objective object information based on 3D photography as well as promoting 3D photography not only for scientists, but also for amateurs. Due to the presentation of this article by CIPA Task Group 3 on "3D Photographs in Cultural Heritage", the presented samples are masterpieces of historic as well as of current 3D photography concentrating on cultural heritage. In addition to a report on exemplarily access to international archives of 3D photographs, samples for new 3D photographs taken with modern 3D cameras, as well as by means of a ground based high resolution XLITE staff camera and also 3D photographs taken from a captive balloon and the use of civil drone platforms are dealt with. To advise on optimum suited 3D methodology, as well as to catch new trends in 3D, an updated synoptic overview of the 3D visualization technology, even claiming completeness, has been carried out as a result of a systematic survey. In this respect, e.g., today's lasered crystals might be "early bird" products in 3D, which, due to lack in resolution, contrast and color, remember to the stage of the invention of photography. 19. Expanding Geometry Understanding with 3D Printing ERIC Educational Resources Information Center Cochran, Jill A.; Cochran, Zane; Laney, Kendra; Dean, Mandi 2016-01-01 With the rise of personal desktop 3D printing, a wide spectrum of educational opportunities has become available for educators to leverage this technology in their classrooms. Until recently, the ability to create physical 3D models was well beyond the scope, skill, and budget of many schools. However, since desktop 3D printers have become readily… 20. Teaching Geography with 3-D Visualization Technology ERIC Educational Resources Information Center Anthamatten, Peter; Ziegler, Susy S. 2006-01-01 Technology that helps students view images in three dimensions (3-D) can support a broad range of learning styles. "Geo-Wall systems" are visualization tools that allow scientists, teachers, and students to project stereographic images and view them in 3-D. We developed and presented 3-D visualization exercises in several undergraduate courses.… 1. 3D Printing and Its Urologic Applications PubMed Central Soliman, Youssef; Feibus, Allison H; Baum, Neil 2015-01-01 3D printing is the development of 3D objects via an additive process in which successive layers of material are applied under computer control. This article discusses 3D printing, with an emphasis on its historical context and its potential use in the field of urology. PMID:26028997 2. Beowulf 3D: a case study Engle, Rob 2008-02-01 This paper discusses the creative and technical challenges encountered during the production of "Beowulf 3D," director Robert Zemeckis' adaptation of the Old English epic poem and the first film to be simultaneously released in IMAX 3D and digital 3D formats. 3. 3D Flow Visualization Using Texture Advection NASA Technical Reports Server (NTRS) Kao, David; Zhang, Bing; Kim, Kwansik; Pang, Alex; Moran, Pat (Technical Monitor) 2001-01-01 Texture advection is an effective tool for animating and investigating 2D flows. In this paper, we discuss how this technique can be extended to 3D flows. In particular, we examine the use of 3D and 4D textures on 3D synthetic and computational fluid dynamics flow fields. 4. Quantitative 3-D imaging topogrammetry for telemedicine applications NASA Technical Reports Server (NTRS) Altschuler, Bruce R. 1994-01-01 The technology to reliably transmit high-resolution visual imagery over short to medium distances in real time has led to the serious considerations of the use of telemedicine, telepresence, and telerobotics in the delivery of health care. These concepts may involve, and evolve toward: consultation from remote expert teaching centers; diagnosis; triage; real-time remote advice to the surgeon; and real-time remote surgical instrument manipulation (telerobotics with virtual reality). Further extrapolation leads to teledesign and telereplication of spare surgical parts through quantitative teleimaging of 3-D surfaces tied to CAD/CAM devices and an artificially intelligent archival data base of 'normal' shapes. The ability to generate 'topogrames' or 3-D surface numerical tables of coordinate values capable of creating computer-generated virtual holographic-like displays, machine part replication, and statistical diagnostic shape assessment is critical to the progression of telemedicine. Any virtual reality simulation will remain in 'video-game' realm until realistic dimensional and spatial relational inputs from real measurements in vivo during surgeries are added to an ever-growing statistical data archive. The challenges of managing and interpreting this 3-D data base, which would include radiographic and surface quantitative data, are considerable. As technology drives toward dynamic and continuous 3-D surface measurements, presenting millions of X, Y, Z data points per second of flexing, stretching, moving human organs, the knowledge base and interpretive capabilities of 'brilliant robots' to work as a surgeon's tireless assistants becomes imaginable. The brilliant robot would 'see' what the surgeon sees--and more, for the robot could quantify its 3-D sensing and would 'see' in a wider spectral range than humans, and could zoom its 'eyes' from the macro world to long-distance microscopy. Unerring robot hands could rapidly perform machine-aided suturing with 5. Quantitative 3-D imaging topogrammetry for telemedicine applications NASA Technical Reports Server (NTRS) Altschuler, Bruce R. 1994-01-01 The technology to reliably transmit high-resolution visual imagery over short to medium distances in real time has led to the serious considerations of the use of telemedicine, telepresence, and telerobotics in the delivery of health care. These concepts may involve, and evolve toward: consultation from remote expert teaching centers; diagnosis; triage; real-time remote advice to the surgeon; and real-time remote surgical instrument manipulation (telerobotics with virtual reality). Further extrapolation leads to teledesign and telereplication of spare surgical parts through quantitative teleimaging of 3-D surfaces tied to CAD/CAM devices and an artificially intelligent archival data base of 'normal' shapes. The ability to generate 'topogrames' or 3-D surface numerical tables of coordinate values capable of creating computer-generated virtual holographic-like displays, machine part replication, and statistical diagnostic shape assessment is critical to the progression of telemedicine. Any virtual reality simulation will remain in 'video-game' realm until realistic dimensional and spatial relational inputs from real measurements in vivo during surgeries are added to an ever-growing statistical data archive. The challenges of managing and interpreting this 3-D data base, which would include radiographic and surface quantitative data, are considerable. As technology drives toward dynamic and continuous 3-D surface measurements, presenting millions of X, Y, Z data points per second of flexing, stretching, moving human organs, the knowledge base and interpretive capabilities of 'brilliant robots' to work as a surgeon's tireless assistants becomes imaginable. The brilliant robot would 'see' what the surgeon sees--and more, for the robot could quantify its 3-D sensing and would 'see' in a wider spectral range than humans, and could zoom its 'eyes' from the macro world to long-distance microscopy. Unerring robot hands could rapidly perform machine-aided suturing with 6. 3-D Perspective Kamchatka Peninsula Russia NASA Technical Reports Server (NTRS) 2000-01-01 This perspective view shows the western side of the volcanically active Kamchatka Peninsula in eastern Russia. The image was generated using the first data collected during the Shuttle Radar Topography Mission (SRTM). In the foreground is the Sea of Okhotsk. Inland from the coast, vegetated floodplains and low relief hills rise toward snow capped peaks. The topographic effects on snow and vegetation distribution are very clear in this near-horizontal view. Forming the skyline is the Sredinnyy Khrebet, the volcanic mountain range that makes up the spine of the peninsula. High resolution SRTM topographic data will be used by geologists to study how volcanoes form and to understand the hazards posed by future eruptions. This image was generated using topographic data from SRTM and an enhanced true-color image from the Landsat 7 satellite. This image contains about 2,400 meters (7,880 feet) of total relief. The topographic expression was enhanced by adding artificial shading as calculated from the SRTM elevation model. The Landsat data was provided by the United States Geological Survey's Earth Resources Observations Systems (EROS) Data Center, Sioux Falls, South Dakota. SRTM, launched on February 11, 2000, used the same radar instrument that comprised the Spaceborne Imaging Radar-C/X-Band Synthetic Aperture Radar (SIR-C/X-SAR) that flew twice on the Space Shuttle Endeavour in 1994. To collect the 3-D SRTM data, engineers added a 60- meter-long (200-foot) mast, installed additional C-band and X-band antennas, and improved tracking and navigation devices. SRTM collected three dimensional measurements of nearly 80 percent of the Earth's surface. SRTM is a cooperative project between NASA, the National Imagery and Mapping Agency (NIMA) of the U.S. Department of Defense, and the German and Italian space agencies. Size: 33.3 km (20.6 miles) wide x 136 km (84 miles) coast to skyline. Location: 58.3 deg. North lat., 160 deg. East long. Orientation: Easterly view, 2 degrees 7. 3-D Perspective View, Kamchatka Peninsula, Russia NASA Technical Reports Server (NTRS) 2000-01-01 This perspective view shows the western side of the volcanically active Kamchatka Peninsula in eastern Russia. The image was generated using the first data collected during the Shuttle Radar Topography Mission (SRTM). In the foreground is the Sea of Okhotsk. Inland from the coast, vegetated floodplains and low relief hills rise toward snow capped peaks. The topographic effects on snow and vegetation distribution are very clear in this near-horizontal view. Forming the skyline is the Sredinnyy Khrebet, the volcanic mountain range that makes up the spine of the peninsula. High resolution SRTM topographic data will be used by geologists to study how volcanoes form and to understand the hazards posed by future eruptions. This image was generated using topographic data from SRTM and an enhanced true-color image from the Landsat 7 satellite. This image contains about 2,400 meters (7,880 feet) of total relief. The topographic expression was enhanced by adding artificial shading as calculated from the SRTM elevation model. The Landsat data was provided by the United States Geological Survey's Earth Resources Observations Systems (EROS) Data Center, Sioux Falls, South Dakota. SRTM, launched on February 11, 2000, used the same radar instrument that comprised the Spaceborne Imaging Radar-C/X-Band Synthetic Aperture Radar(SIR-C/X-SAR) that flew twice on the Space Shuttle Endeavour in 1994. To collect the 3-D SRTM data, engineers added a 60-meter-long (200-foot) mast, installed additional C-band and X-band antennas, and improved tracking and navigation devices. SRTM collected three-dimensional measurements of nearly 80 percent of the Earth's surface. SRTM is a cooperative project between NASA, the National Imagery and Mapping Agency (NIMA) of the U.S. Department of Defense, and the German and Italian space agencies. It is managed by NASA's Jet Propulsion Laboratory, Pasadena, CA, for NASA's Earth Science Enterprise, Washington, D.C. Size: 33.3 km (20.6 miles) wide x 8. Case study: Beauty and the Beast 3D: benefits of 3D viewing for 2D to 3D conversion Handy Turner, Tara 2010-02-01 From the earliest stages of the Beauty and the Beast 3D conversion project, the advantages of accurate desk-side 3D viewing was evident. While designing and testing the 2D to 3D conversion process, the engineering team at Walt Disney Animation Studios proposed a 3D viewing configuration that not only allowed artists to "compose" stereoscopic 3D but also improved efficiency by allowing artists to instantly detect which image features were essential to the stereoscopic appeal of a shot and which features had minimal or even negative impact. At a time when few commercial 3D monitors were available and few software packages provided 3D desk-side output, the team designed their own prototype devices and collaborated with vendors to create a "3D composing" workstation. This paper outlines the display technologies explored, final choices made for Beauty and the Beast 3D, wish-lists for future development and a few rules of thumb for composing compelling 2D to 3D conversions. 9. Mini 3D for shallow gas reconnaissance SciTech Connect Vallieres, T. des; Enns, D.; Kuehn, H.; Parron, D.; Lafet, Y.; Van Hulle, D. 1996-12-31 The Mini 3D project was undertaken by TOTAL and ELF with the support of CEPM (Comite dEtudes Petrolieres et Marines) to define an economical method of obtaining 3D seismic HR data for shallow gas assessment. An experimental 3D survey was carried out with classical site survey techniques in the North Sea. From these data 19 simulations, were produced to compare different acquisition geometries ranging from dual, 600 m long cables to a single receiver. Results show that short offset, low fold and very simple streamer positioning are sufficient to give a reliable 3D image of gas charged bodies. The 3D data allow a much more accurate risk delineation than 2D HR data. Moreover on financial grounds Mini-3D is comparable in cost to a classical HR 2D survey. In view of these results, such HR 3D should now be the standard for shallow gas surveying. 10. Effective 3-D surface modeling for geographic information systems Yüksek, K.; Alparslan, M.; Mendi, E. 2016-01-01 In this work, we propose a dynamic, flexible and interactive urban digital terrain platform with spatial data and query processing capabilities of geographic information systems, multimedia database functionality and graphical modeling infrastructure. A new data element, called Geo-Node, which stores image, spatial data and 3-D CAD objects is developed using an efficient data structure. The system effectively handles data transfer of Geo-Nodes between main memory and secondary storage with an optimized directional replacement policy (DRP) based buffer management scheme. Polyhedron structures are used in digital surface modeling and smoothing process is performed by interpolation. The experimental results show that our framework achieves high performance and works effectively with urban scenes independent from the amount of spatial data and image size. The proposed platform may contribute to the development of various applications such as Web GIS systems based on 3-D graphics standards (e.g., X3-D and VRML) and services which integrate multi-dimensional spatial information and satellite/aerial imagery. 11. Digital acquisition system for high-speed 3-D imaging Yafuso, Eiji 1997-11-01 High-speed digital three-dimensional (3-D) imagery is possible using multiple independent charge-coupled device (CCD) cameras with sequentially triggered acquisition and individual field storage capability. The system described here utilizes sixteen independent cameras, providing versatility in configuration and image acquisition. By aligning the cameras in nearly coincident lines-of-sight, a sixteen frame two-dimensional (2-D) sequence can be captured. The delays can be individually adjusted lo yield a greater number of acquired frames during the more rapid segments of the event. Additionally, individual integration periods may be adjusted to ensure adequate radiometric response while minimizing image blur. An alternative alignment and triggering scheme arranges the cameras into two angularly separated banks of eight cameras each. By simultaneously triggering correlated stereo pairs, an eight-frame sequence of stereo images may be captured. In the first alignment scheme the camera lines-of-sight cannot be made precisely coincident. Thus representation of the data as a monocular sequence introduces the issue of independent camera coordinate registration with the real scene. This issue arises more significantly using the stereo pair method to reconstruct quantitative 3-D spatial information of the event as a function of time. The principal development here will be the derivation and evaluation of a solution transform and its inverse for the digital data which will yield a 3-D spatial mapping as a function of time. 12. Effective 3-D surface modeling for geographic information systems Yüksek, K.; Alparslan, M.; Mendi, E. 2013-11-01 In this work, we propose a dynamic, flexible and interactive urban digital terrain platform (DTP) with spatial data and query processing capabilities of Geographic Information Systems (GIS), multimedia database functionality and graphical modeling infrastructure. A new data element, called Geo-Node, which stores image, spatial data and 3-D CAD objects is developed using an efficient data structure. The system effectively handles data transfer of Geo-Nodes between main memory and secondary storage with an optimized Directional Replacement Policy (DRP) based buffer management scheme. Polyhedron structures are used in Digital Surface Modeling (DSM) and smoothing process is performed by interpolation. The experimental results show that our framework achieves high performance and works effectively with urban scenes independent from the amount of spatial data and image size. The proposed platform may contribute to the development of various applications such as Web GIS systems based on 3-D graphics standards (e.g. X3-D and VRML) and services which integrate multi-dimensional spatial information and satellite/aerial imagery. 13. Simulation of a Geiger-mode imaging LADAR system for performance assessment. PubMed Kim, Seongjoon; Lee, Impyeong; Kwon, Yong Joon 2013-07-03 As LADAR systems applications gradually become more diverse, new types of systems are being developed. When developing new systems, simulation studies are an essential prerequisite. A simulator enables performance predictions and optimal system parameters at the design level, as well as providing sample data for developing and validating application algorithms. The purpose of the study is to propose a method for simulating a Geiger-mode imaging LADAR system. We develop simulation software to assess system performance and generate sample data for the applications. The simulation is based on three aspects of modeling-the geometry, radiometry and detection. The geometric model computes the ranges to the reflection points of the laser pulses. The radiometric model generates the return signals, including the noises. The detection model determines the flight times of the laser pulses based on the nature of the Geiger-mode detector. We generated sample data using the simulator with the system parameters and analyzed the detection performance by comparing the simulated points to the reference points. The proportion of the outliers in the simulated points reached 25.53%, indicating the need for efficient outlier elimination algorithms. In addition, the false alarm rate and dropout rate of the designed system were computed as 1.76% and 1.06%, respectively. 14. Simulation of a Geiger-Mode Imaging LADAR System for Performance Assessment PubMed Central Kim, Seongjoon; Lee, Impyeong; Kwon, Yong Joon 2013-01-01 As LADAR systems applications gradually become more diverse, new types of systems are being developed. When developing new systems, simulation studies are an essential prerequisite. A simulator enables performance predictions and optimal system parameters at the design level, as well as providing sample data for developing and validating application algorithms. The purpose of the study is to propose a method for simulating a Geiger-mode imaging LADAR system. We develop simulation software to assess system performance and generate sample data for the applications. The simulation is based on three aspects of modeling—the geometry, radiometry and detection. The geometric model computes the ranges to the reflection points of the laser pulses. The radiometric model generates the return signals, including the noises. The detection model determines the flight times of the laser pulses based on the nature of the Geiger-mode detector. We generated sample data using the simulator with the system parameters and analyzed the detection performance by comparing the simulated points to the reference points. The proportion of the outliers in the simulated points reached 25.53%, indicating the need for efficient outlier elimination algorithms. In addition, the false alarm rate and dropout rate of the designed system were computed as 1.76% and 1.06%, respectively. PMID:23823970 15. High-sensitivity 3 to 5 micron PPLN LADAR wavelength converter system Kingsley, S. A.; Sriram, S.; Powers, P. E. 2005-05-01 Remote sensing systems, such as LIDAR, have benefited greatly from nonlinear sources capable of generating tunable mid-infrared wavelengths (3-5 microns). Much work has focused on improving the energy output of these sources so as to improve the system's range. We present a different approach to improving the range by focusing on improving the receiver of a LADAR system employing nonlinear optical techniques. In this paper, we will present results of a receiver system based on frequency converting mid-infrared wavelengths to the 1.5 μm region using Periodically-Poled Lithium Niobate (PPLN). By doing so, optical amplifiers and avalanche photodetectors (APDs) developed for the fiber optics communications industry can be used, thus providing very high detection sensitivity and high speed without the need for cryogenically cooled optical detectors. We will present results of laboratory experiments with 3 μm, 2.5 ns FWHM LADAR pulses that have been converted to 1.5 μm. Detection sensitivities as low as 1.5 x 10^-13 Joules have been demonstrated. The performance of the Peltier-cooled 1.5 μm InGaAs APD quasi photon-counting receiver will be described. 16. High power CO2 coherent ladar haven't quit the stage of military affairs Zhang, Heyong 2015-05-01 The invention of the laser in 1960 created the possibility of using a source of coherent light as a transmitter for a laser radar (ladar). Coherent ladar shares many of the basic features of more common microwave radars. However, it is the extremely short operating wavelength of lasers that introduces new military applications, especially in the area of missile identification, space target tracking, remote rang finding, camouflage discrimination and toxic agent detection. Therefore, the most popular application field such as laser imaging and ranging were focused on CO2 laser in the last few decades. But during the development of solid state and fiber laser, some people said that the CO2 laser will be disappeared and will be replaced by the solid and fiber laser in the field of military and industry. The coherent CO2 laser radar will have the same destiny in the field of military affairs. However, to my opinion, the high power CO2 laser will be the most important laser source for laser radar and countermeasure in the future. Song, Zhi-yuan; Zhu, Shao-lan; Dong, Li-jun; Feng, Li; He, Hao-dong 2011-06-01 The traditional technique of phase laser range finder is mixing high frequency signals with analog circuits and filtering them to obtain the useful signal with low frequency. But the analog mixing circuits are susceptible to interference and will bring amplitude attenuation, phase jitter and offset and this way has difficulties in achieving high precision ranging and fast speed ranging at the same time. The method of this paper is based on under-sampling technique with digital synchronous detection and referring to Digital down converter technique of digital IF receiver in radar system. This method not only reduces the complexity of data processing, improves the speed and accuracy of phase detection at the same time, but also reduces requirements for ADC devices and DSP chips in the ladar system by a lower sampling rate. At the same time, the structure of electronic system is global simplified compared with traditional analog ladar system and the anti-jamming is greatly enhanced. So this method has important research value. 18. Multi-Purpose Crew Vehicle Camera Asset Planning: Imagery Previsualization NASA Technical Reports Server (NTRS) Beaulieu, K. 2014-01-01 Using JSC-developed and other industry-standard off-the-shelf 3D modeling, animation, and rendering software packages, the Image Science Analysis Group (ISAG) supports Orion Project imagery planning efforts through dynamic 3D simulation and realistic previsualization of ground-, vehicle-, and air-based camera output. 19. 3-D Technology Approaches for Biological Ecologies Liu, Liyu; Austin, Robert; U. S-China Physical-Oncology Sciences Alliance (PS-OA) Team Constructing three dimensional (3-D) landscapes is an inevitable issue in deep study of biological ecologies, because in whatever scales in nature, all of the ecosystems are composed by complex 3-D environments and biological behaviors. Just imagine if a 3-D technology could help complex ecosystems be built easily and mimic in vivo microenvironment realistically with flexible environmental controls, it will be a fantastic and powerful thrust to assist researchers for explorations. For years, we have been utilizing and developing different technologies for constructing 3-D micro landscapes for biophysics studies in in vitro. Here, I will review our past efforts, including probing cancer cell invasiveness with 3-D silicon based Tepuis, constructing 3-D microenvironment for cell invasion and metastasis through polydimethylsiloxane (PDMS) soft lithography, as well as explorations of optimized stenting positions for coronary bifurcation disease with 3-D wax printing and the latest home designed 3-D bio-printer. Although 3-D technologies is currently considered not mature enough for arbitrary 3-D micro-ecological models with easy design and fabrication, I hope through my talk, the audiences will be able to sense its significance and predictable breakthroughs in the near future. This work was supported by the State Key Development Program for Basic Research of China (Grant No. 2013CB837200), the National Natural Science Foundation of China (Grant No. 11474345) and the Beijing Natural Science Foundation (Grant No. 7154221). 20. RT3D tutorials for GMS users SciTech Connect Clement, T.P.; Jones, N.L. 1998-02-01 RT3D (Reactive Transport in 3-Dimensions) is a computer code that solves coupled partial differential equations that describe reactive-flow and transport of multiple mobile and/or immobile species in a three dimensional saturated porous media. RT3D was developed from the single-species transport code, MT3D (DoD-1.5, 1997 version). As with MT3D, RT3D also uses the USGS groundwater flow model MODFLOW for computing spatial and temporal variations in groundwater head distribution. This report presents a set of tutorial problems that are designed to illustrate how RT3D simulations can be performed within the Department of Defense Groundwater Modeling System (GMS). GMS serves as a pre- and post-processing interface for RT3D. GMS can be used to define all the input files needed by RT3D code, and later the code can be launched from within GMS and run as a separate application. Once the RT3D simulation is completed, the solution can be imported to GMS for graphical post-processing. RT3D v1.0 supports several reaction packages that can be used for simulating different types of reactive contaminants. Each of the tutorials, described below, provides training on a different RT3D reaction package. Each reaction package has different input requirements, and the tutorials are designed to describe these differences. Furthermore, the tutorials illustrate the various options available in GMS for graphical post-processing of RT3D results. Users are strongly encouraged to complete the tutorials before attempting to use RT3D and GMS on a routine basis. 1. 3D change detection - Approaches and applications Qin, Rongjun; Tian, Jiaojiao; Reinartz, Peter 2016-12-01 Due to the unprecedented technology development of sensors, platforms and algorithms for 3D data acquisition and generation, 3D spaceborne, airborne and close-range data, in the form of image based, Light Detection and Ranging (LiDAR) based point clouds, Digital Elevation Models (DEM) and 3D city models, become more accessible than ever before. Change detection (CD) or time-series data analysis in 3D has gained great attention due to its capability of providing volumetric dynamics to facilitate more applications and provide more accurate results. The state-of-the-art CD reviews aim to provide a comprehensive synthesis and to simplify the taxonomy of the traditional remote sensing CD techniques, which mainly sit within the boundary of 2D image/spectrum analysis, largely ignoring the particularities of 3D aspects of the data. The inclusion of 3D data for change detection (termed 3D CD), not only provides a source with different modality for analysis, but also transcends the border of traditional top-view 2D pixel/object-based analysis to highly detailed, oblique view or voxel-based geometric analysis. This paper reviews the recent developments and applications of 3D CD using remote sensing and close-range data, in support of both academia and industry researchers who seek for solutions in detecting and analyzing 3D dynamics of various objects of interest. We first describe the general considerations of 3D CD problems in different processing stages and identify CD types based on the information used, being the geometric comparison and geometric-spectral analysis. We then summarize relevant works and practices in urban, environment, ecology and civil applications, etc. Given the broad spectrum of applications and different types of 3D data, we discuss important issues in 3D CD methods. Finally, we present concluding remarks in algorithmic aspects of 3D CD. 2. 3D Printer Coupon removal and stowage NASA Image and Video Library 2014-12-09 iss042e031282 (12/09/2014) ---US Astronaut Barry (Butch) Wilmore holding a 3D coupon works with the new 3D printer aboard the International Space Station. The 3D Printing experiment in zero gravity demonstrates that a 3D printer works normally in space. In general, a 3D printer extrudes streams of heated plastic, metal or other material, building layer on top of layer to create 3 dimensional objects. Testing a 3D printer using relatively low-temperature plastic feedstock on the International Space Station is the first step towards establishing an on-demand machine shop in space, a critical enabling component for deep-space crewed missions and in-space manufacturing. 3. 3D measurement for rapid prototyping Albrecht, Peter; Lilienblum, Tilo; Sommerkorn, Gerd; Michaelis, Bernd 1996-08-01 Optical 3-D measurement is an interesting approach for rapid prototyping. On one hand it's necessary to get the 3-D data of an object and on the other hand it's necessary to check the manufactured object (quality checking). Optical 3-D measurement can realize both. Classical 3-D measurement procedures based on photogrammetry cause systematic errors at strongly curved surfaces or steps in surfaces. One possibility to reduce these errors is to calculate the 3-D coordinates from several successively taken images. Thus it's possible to get higher spatial resolution and to reduce the systematic errors at 'problem surfaces.' Another possibility is to process the measurement values by neural networks. A modified associative memory smoothes and corrects the calculated 3-D coordinates using a-priori knowledge about the measurement object. 4. Digital holography and 3-D imaging. PubMed Banerjee, Partha; Barbastathis, George; Kim, Myung; Kukhtarev, Nickolai 2011-03-01 This feature issue on Digital Holography and 3-D Imaging comprises 15 papers on digital holographic techniques and applications, computer-generated holography and encryption techniques, and 3-D display. It is hoped that future work in the area leads to innovative applications of digital holography and 3-D imaging to biology and sensing, and to the development of novel nonlinear dynamic digital holographic techniques. 5. 3D carotid plaque MR Imaging PubMed Central Parker, Dennis L. 2015-01-01 SYNOPSIS There has been significant progress made in 3D carotid plaque magnetic resonance imaging techniques in recent years. 3D plaque imaging clearly represents the future in clinical use. With effective flow suppression techniques, choices of different contrast weighting acquisitions, and time-efficient imaging approaches, 3D plaque imaging offers flexible imaging plane and view angle analysis, large coverage, multi-vascular beds capability, and even can be used in fast screening. PMID:26610656 6. Photorefractive Polymers for Updateable 3D Displays DTIC Science & Technology 2010-02-24 Final Performance Report 3. DATES COVERED (From - To) 01-01-2007 to 11-30-2009 4. TITLE AND SUBTITLE Photorefractive Polymers for Updateable 3D ...ABSTRACT During the tenure of this project a large area updateable 3D color display has been developed for the first time using a new co-polymer...photorefractive polymers have been demonstrated. Moreover, a 6 inch × 6 inch sample was fabricated demonstrating the feasibility of making large area 3D 7. Dimensional accuracy of 3D printed vertebra Ogden, Kent; Ordway, Nathaniel; Diallo, Dalanda; Tillapaugh-Fay, Gwen; Aslan, Can 2014-03-01 3D printer applications in the biomedical sciences and medical imaging are expanding and will have an increasing impact on the practice of medicine. Orthopedic and reconstructive surgery has been an obvious area for development of 3D printer applications as the segmentation of bony anatomy to generate printable models is relatively straightforward. There are important issues that should be addressed when using 3D printed models for applications that may affect patient care; in particular the dimensional accuracy of the printed parts needs to be high to avoid poor decisions being made prior to surgery or therapeutic procedures. In this work, the dimensional accuracy of 3D printed vertebral bodies derived from CT data for a cadaver spine is compared with direct measurements on the ex-vivo vertebra and with measurements made on the 3D rendered vertebra using commercial 3D image processing software. The vertebra was printed on a consumer grade 3D printer using an additive print process using PLA (polylactic acid) filament. Measurements were made for 15 different anatomic features of the vertebral body, including vertebral body height, endplate width and depth, pedicle height and width, and spinal canal width and depth, among others. It is shown that for the segmentation and printing process used, the results of measurements made on the 3D printed vertebral body are substantially the same as those produced by direct measurement on the vertebra and measurements made on the 3D rendered vertebra. 8. TAURUS. 3-D Finite Element Code Postprocessor SciTech Connect Whirley, R.G. 1984-05-01 TAURUS reads the binary plot files generated by the LLNL three-dimensional finite element analysis codes, NIKE3D, DYNA3D, TACO3D, TOPAZ3D, and GEMINI and plots contours, time histories,and deformed shapes. Contours of a large number of quantities may be plotted on meshes consisting of plate, shell, and solid type elements. TAURUS can compute a variety of strain measures, reaction forces along constrained boundaries, and momentum. TAURUS has three phases: initialization, geometry display with contouring, and time history processing. 9. TAURUS. 3-D Finite Element Code Postprocessor SciTech Connect Kennedy, T. 1992-03-03 TAURUS reads the binary plot files generated by the LLNL three-dimensional finite element analysis codes, NIKE3D, DYNA3D, TACO3D, TOPAZ3D, and GEMINI and plots contours, time histories, and deformed shapes. Contours of a large number of quantities may be plotted on meshes consisting of plate, shell, and solid type elements. TAURUS can compute a variety of strain measures, reaction forces along constrained boundaries, and momentum. TAURUS has three phases: initialization, geometry display with contouring, and time history processing. 10. TAURUS. 3-D Finite Element Code Postprocessor SciTech Connect Whirley, R.G. 1993-11-30 TAURUS reads the binary plot files generated by the LLNL three-dimensional finite element analysis codes, NIKE3D, DYNA3D, TACO3D, TOPAZ3D, and GEMINI and plots contours, time histories,and deformed shapes. Contours of a large number of quantities may be plotted on meshes consisting of plate, shell, and solid type elements. TAURUS can compute a variety of strain measures, reaction forces along constrained boundaries, and momentum. TAURUS has three phases: initialization, geometry display with contouring, and time history processing. 11. TAURUS. 3-d Finite Element Code Postprocessor SciTech Connect Whirley, R.G. 1991-05-01 TAURUS reads the binary plot files generated by the LLNL three-dimensional finite element analysis codes, NIKE3D (ESTSC 139), DYNA3D (ESTSC 138), TACO3D (ESTSC 287), TOPAZ3D (ESTSC 231), and GEMINI (ESTSC 455) and plots contours, time histories,and deformed shapes. Contours of a large number of quantities may be plotted on meshes consisting of plate, shell, and solid type elements. TAURUS can compute a variety of strain measures, reaction forces along constrained boundaries, and momentum. TAURUS has three phases: initialization, geometry display with contouring, and time history processing. 12. TAURUS. 3-d Finite Element Code Postprocessor SciTech Connect Whirley, R.G. 1992-03-03 TAURUS reads the binary plot files generated by the LLNL three-dimensional finite element analysis codes, NIKE3D (ESTSC 139), DYNA3D (ESTSC 138), TACO3D (ESTSC 287), TOPAZ3D (ESTSC 231), and GEMINI (ESTSC 455) and plots contours, time histories,and deformed shapes. Contours of a large number of quantities may be plotted on meshes consisting of plate, shell, and solid type elements. TAURUS can compute a variety of strain measures, reaction forces along constrained boundaries, and momentum. TAURUS has three phases: initialization, geometry display with contouring, and time history processing. 13. TAURUS. 3-D Finite Element Code Postprocessor SciTech Connect Whirley, R.G. 1992-03-03 TAURUS reads the binary plot files generated by the LLNL three-dimensional finite element analysis codes, NIKE3D, DYNA3D, TACO3D, TOPAZ3D, and GEMINI and plots contours, time histories,and deformed shapes. Contours of a large number of quantities may be plotted on meshes consisting of plate, shell, and solid type elements. TAURUS can compute a variety of strain measures, reaction forces along constrained boundaries, and momentum. TAURUS has three phases: initialization, geometry display with contouring, and time history processing. 14. 3D Lunar Terrain Reconstruction from Apollo Images NASA Technical Reports Server (NTRS) Broxton, Michael J.; Nefian, Ara V.; Moratto, Zachary; Kim, Taemin; Lundy, Michael; Segal, Alkeksandr V. 2009-01-01 Generating accurate three dimensional planetary models is becoming increasingly important as NASA plans manned missions to return to the Moon in the next decade. This paper describes a 3D surface reconstruction system called the Ames Stereo Pipeline that is designed to produce such models automatically by processing orbital stereo imagery. We discuss two important core aspects of this system: (1) refinement of satellite station positions and pose estimates through least squares bundle adjustment; and (2) a stochastic plane fitting algorithm that generalizes the Lucas-Kanade method for optimal matching between stereo pair images.. These techniques allow us to automatically produce seamless, highly accurate digital elevation models from multiple stereo image pairs while significantly reducing the influence of image noise. Our technique is demonstrated on a set of 71 high resolution scanned images from the Apollo 15 mission 15. FastScript3D - A Companion to Java 3D NASA Technical Reports Server (NTRS) Koenig, Patti 2005-01-01 FastScript3D is a computer program, written in the Java 3D(TM) programming language, that establishes an alternative language that helps users who lack expertise in Java 3D to use Java 3D for constructing three-dimensional (3D)-appearing graphics. The FastScript3D language provides a set of simple, intuitive, one-line text-string commands for creating, controlling, and animating 3D models. The first word in a string is the name of a command; the rest of the string contains the data arguments for the command. The commands can also be used as an aid to learning Java 3D. Developers can extend the language by adding custom text-string commands. The commands can define new 3D objects or load representations of 3D objects from files in formats compatible with such other software systems as X3D. The text strings can be easily integrated into other languages. FastScript3D facilitates communication between scripting languages [which enable programming of hyper-text markup language (HTML) documents to interact with users] and Java 3D. The FastScript3D language can be extended and customized on both the scripting side and the Java 3D side. 16. 3D PDF - a means of public access to geological 3D - objects, using the example of GTA3D Slaby, Mark-Fabian; Reimann, Rüdiger 2013-04-01 17. Finding the Enemy: Using 3-D Laser Radar (LADAR) Imaging for Real Time Combat Identification of Ground Targets in an Obscured Environment DTIC Science & Technology 2010-04-01 Algorithms”, p. 197. 23 Ibid, p. 197. 24 http://encyclopedia2.thefreedictionary.com/Bayesian+theory, p. 1. 25 Abdallah, Mahmoud A., Tayib I. Samu , and...Bibliography Abdallah, Mahmoud A., Tayib I. Samu , and William A. Grissom. “Automatic Target Identification Using Neural Networks.” SPIE Vol 18. 3D ultrafast ultrasound imaging in vivo Provost, Jean; Papadacci, Clement; Esteban Arango, Juan; Imbault, Marion; Fink, Mathias; Gennisson, Jean-Luc; Tanter, Mickael; Pernot, Mathieu 2014-10-01 Very high frame rate ultrasound imaging has recently allowed for the extension of the applications of echography to new fields of study such as the functional imaging of the brain, cardiac electrophysiology, and the quantitative imaging of the intrinsic mechanical properties of tumors, to name a few, non-invasively and in real time. In this study, we present the first implementation of Ultrafast Ultrasound Imaging in 3D based on the use of either diverging or plane waves emanating from a sparse virtual array located behind the probe. It achieves high contrast and resolution while maintaining imaging rates of thousands of volumes per second. A customized portable ultrasound system was developed to sample 1024 independent channels and to drive a 32  ×  32 matrix-array probe. Its ability to track in 3D transient phenomena occurring in the millisecond range within a single ultrafast acquisition was demonstrated for 3D Shear-Wave Imaging, 3D Ultrafast Doppler Imaging, and, finally, 3D Ultrafast combined Tissue and Flow Doppler Imaging. The propagation of shear waves was tracked in a phantom and used to characterize its stiffness. 3D Ultrafast Doppler was used to obtain 3D maps of Pulsed Doppler, Color Doppler, and Power Doppler quantities in a single acquisition and revealed, at thousands of volumes per second, the complex 3D flow patterns occurring in the ventricles of the human heart during an entire cardiac cycle, as well as the 3D in vivo interaction of blood flow and wall motion during the pulse wave in the carotid at the bifurcation. This study demonstrates the potential of 3D Ultrafast Ultrasound Imaging for the 3D mapping of stiffness, tissue motion, and flow in humans in vivo and promises new clinical applications of ultrasound with reduced intra—and inter-observer variability. 19. An aerial 3D printing test mission Hirsch, Michael; McGuire, Thomas; Parsons, Michael; Leake, Skye; Straub, Jeremy 2016-05-01 This paper provides an overview of an aerial 3D printing technology, its development and its testing. This technology is potentially useful in its own right. In addition, this work advances the development of a related in-space 3D printing technology. A series of aerial 3D printing test missions, used to test the aerial printing technology, are discussed. Through completing these test missions, the design for an in-space 3D printer may be advanced. The current design for the in-space 3D printer involves focusing thermal energy to heat an extrusion head and allow for the extrusion of molten print material. Plastics can be used as well as composites including metal, allowing for the extrusion of conductive material. A variety of experiments will be used to test this initial 3D printer design. High altitude balloons will be used to test the effects of microgravity on 3D printing, as well as parabolic flight tests. Zero pressure balloons can be used to test the effect of long 3D printing missions subjected to low temperatures. Vacuum chambers will be used to test 3D printing in a vacuum environment. The results will be used to adapt a current prototype of an in-space 3D printer. Then, a small scale prototype can be sent into low-Earth orbit as a 3-U cube satellite. With the ability to 3D print in space demonstrated, future missions can launch production hardware through which the sustainability and durability of structures in space will be greatly improved. 20. 3D ultrafast ultrasound imaging in vivo. PubMed Provost, Jean; Papadacci, Clement; Arango, Juan Esteban; Imbault, Marion; Fink, Mathias; Gennisson, Jean-Luc; Tanter, Mickael; Pernot, Mathieu 2014-10-07 Very high frame rate ultrasound imaging has recently allowed for the extension of the applications of echography to new fields of study such as the functional imaging of the brain, cardiac electrophysiology, and the quantitative imaging of the intrinsic mechanical properties of tumors, to name a few, non-invasively and in real time. In this study, we present the first implementation of Ultrafast Ultrasound Imaging in 3D based on the use of either diverging or plane waves emanating from a sparse virtual array located behind the probe. It achieves high contrast and resolution while maintaining imaging rates of thousands of volumes per second. A customized portable ultrasound system was developed to sample 1024 independent channels and to drive a 32  ×  32 matrix-array probe. Its ability to track in 3D transient phenomena occurring in the millisecond range within a single ultrafast acquisition was demonstrated for 3D Shear-Wave Imaging, 3D Ultrafast Doppler Imaging, and, finally, 3D Ultrafast combined Tissue and Flow Doppler Imaging. The propagation of shear waves was tracked in a phantom and used to characterize its stiffness. 3D Ultrafast Doppler was used to obtain 3D maps of Pulsed Doppler, Color Doppler, and Power Doppler quantities in a single acquisition and revealed, at thousands of volumes per second, the complex 3D flow patterns occurring in the ventricles of the human heart during an entire cardiac cycle, as well as the 3D in vivo interaction of blood flow and wall motion during the pulse wave in the carotid at the bifurcation. This study demonstrates the potential of 3D Ultrafast Ultrasound Imaging for the 3D mapping of stiffness, tissue motion, and flow in humans in vivo and promises new clinical applications of ultrasound with reduced intra--and inter-observer variability. 1. Wow! 3D Content Awakens the Classroom ERIC Educational Resources Information Center Gordon, Dan 2010-01-01 From her first encounter with stereoscopic 3D technology designed for classroom instruction, Megan Timme, principal at Hamilton Park Pacesetter Magnet School in Dallas, sensed it could be transformative. Last spring, when she began pilot-testing 3D content in her third-, fourth- and fifth-grade classrooms, Timme wasn't disappointed. Students… 2. Pathways for Learning from 3D Technology ERIC Educational Resources Information Center Carrier, L. Mark; Rab, Saira S.; Rosen, Larry D.; Vasquez, Ludivina; Cheever, Nancy A. 2012-01-01 The purpose of this study was to find out if 3D stereoscopic presentation of information in a movie format changes a viewer's experience of the movie content. Four possible pathways from 3D presentation to memory and learning were considered: a direct connection based on cognitive neuroscience research; a connection through "immersion"… 3. Wow! 3D Content Awakens the Classroom ERIC Educational Resources Information Center Gordon, Dan 2010-01-01 From her first encounter with stereoscopic 3D technology designed for classroom instruction, Megan Timme, principal at Hamilton Park Pacesetter Magnet School in Dallas, sensed it could be transformative. Last spring, when she began pilot-testing 3D content in her third-, fourth- and fifth-grade classrooms, Timme wasn't disappointed. Students… 4. Infrastructure for 3D Imaging Test Bed DTIC Science & Technology 2007-05-11 analysis. (c.) Real time detection & analysis of human gait: using a video camera we capture walking human silhouette for pattern modeling and gait ... analysis . Fig. 5 shows the scanning result result that is fed into a Geo-magic software tool for 3D meshing. Fig. 5: 3D scanning result In 5. Berries on the Ground 2 3-D NASA Image and Video Library 2004-02-12 This 3-D anaglyph, from NASA Mars Exploration Rover Spirit, shows a microscopic image taken of soil featuring round, blueberry-shaped rock formations on the crater floor at Meridiani Planum, Mars. 3D glasses are necessary to view this image. 6. 3D Printing of Molecular Models ERIC Educational Resources Information Center 2016-01-01 Physical molecular models have played a valuable role in our understanding of the invisible nano-scale world. We discuss 3D printing and its use in producing models of the molecules of life. Complex biomolecular models, produced from 3D printed parts, can demonstrate characteristics of molecular structure and function, such as viral self-assembly,… 7. 3D Printing. What's the Harm? ERIC Educational Resources Information Center Love, Tyler S.; Roy, Ken 2016-01-01 Health concerns from 3D printing were first documented by Stephens, Azimi, Orch, and Ramos (2013), who found that commercially available 3D printers were producing hazardous levels of ultrafine particles (UFPs) and volatile organic compounds (VOCs) when plastic materials were melted through the extruder. UFPs are particles less than 100 nanometers… 8. 3D Printed Block Copolymer Nanostructures ERIC Educational Resources Information Center Scalfani, Vincent F.; Turner, C. Heath; Rupar, Paul A.; Jenkins, Alexander H.; Bara, Jason E. 2015-01-01 The emergence of 3D printing has dramatically advanced the availability of tangible molecular and extended solid models. Interestingly, there are few nanostructure models available both commercially and through other do-it-yourself approaches such as 3D printing. This is unfortunate given the importance of nanotechnology in science today. In this… 9. 3D elastic control for mobile devices. PubMed Hachet, Martin; Pouderoux, Joachim; Guitton, Pascal 2008-01-01 To increase the input space of mobile devices, the authors developed a proof-of-concept 3D elastic controller that easily adapts to mobile devices. This embedded device improves the completion of high-level interaction tasks such as visualization of large documents and navigation in 3D environments. It also opens new directions for tomorrow's mobile applications. 10. Immersive 3D Geovisualization in Higher Education ERIC Educational Resources Information Center Philips, Andrea; Walz, Ariane; Bergner, Andreas; Graeff, Thomas; Heistermann, Maik; Kienzler, Sarah; Korup, Oliver; Lipp, Torsten; Schwanghart, Wolfgang; Zeilinger, Gerold 2015-01-01 In this study, we investigate how immersive 3D geovisualization can be used in higher education. Based on MacEachren and Kraak's geovisualization cube, we examine the usage of immersive 3D geovisualization and its usefulness in a research-based learning module on flood risk, called GEOSimulator. Results of a survey among participating students… 11. Stereo 3-D Vision in Teaching Physics ERIC Educational Resources Information Center Zabunov, Svetoslav 2012-01-01 Stereo 3-D vision is a technology used to present images on a flat surface (screen, paper, etc.) and at the same time to create the notion of three-dimensional spatial perception of the viewed scene. A great number of physical processes are much better understood when viewed in stereo 3-D vision compared to standard flat 2-D presentation. The… 12. 3D Printing of Molecular Models ERIC Educational Resources Information Center 2016-01-01 Physical molecular models have played a valuable role in our understanding of the invisible nano-scale world. We discuss 3D printing and its use in producing models of the molecules of life. Complex biomolecular models, produced from 3D printed parts, can demonstrate characteristics of molecular structure and function, such as viral self-assembly,… 13. Computer Assisted Cancer Device - 3D Imaging DTIC Science & Technology 2006-10-01 tomosynthesis images of the breast. iCAD has identified several sources of 3D tomosynthesis data, and has begun adapting its image analysis...collaborative relationships with major manufacturers of tomosynthesis equipment. 21. iCAD believes that tomosynthesis , a 3D breast imaging technique...purported advantages of tomosynthesis relative to conventional mammography include; improved lesion visibility, improved lesion detectability and 14. 3D Printed Block Copolymer Nanostructures ERIC Educational Resources Information Center Scalfani, Vincent F.; Turner, C. Heath; Rupar, Paul A.; Jenkins, Alexander H.; Bara, Jason E. 2015-01-01 The emergence of 3D printing has dramatically advanced the availability of tangible molecular and extended solid models. Interestingly, there are few nanostructure models available both commercially and through other do-it-yourself approaches such as 3D printing. This is unfortunate given the importance of nanotechnology in science today. In this… 15. Stereo 3-D Vision in Teaching Physics ERIC Educational Resources Information Center Zabunov, Svetoslav 2012-01-01 Stereo 3-D vision is a technology used to present images on a flat surface (screen, paper, etc.) and at the same time to create the notion of three-dimensional spatial perception of the viewed scene. A great number of physical processes are much better understood when viewed in stereo 3-D vision compared to standard flat 2-D presentation. The… 16. Case study of 3D fingerprints applications. PubMed Liu, Feng; Liang, Jinrong; Shen, Linlin; Yang, Meng; Zhang, David; Lai, Zhihui 2017-01-01 Human fingers are 3D objects. More information will be provided if three dimensional (3D) fingerprints are available compared with two dimensional (2D) fingerprints. Thus, this paper firstly collected 3D finger point cloud data by Structured-light Illumination method. Additional features from 3D fingerprint images are then studied and extracted. The applications of these features are finally discussed. A series of experiments are conducted to demonstrate the helpfulness of 3D information to fingerprint recognition. Results show that a quick alignment can be easily implemented under the guidance of 3D finger shape feature even though this feature does not work for fingerprint recognition directly. The newly defined distinctive 3D shape ridge feature can be used for personal authentication with Equal Error Rate (EER) of ~8.3%. Also, it is helpful to remove false core point. Furthermore, a promising of EER ~1.3% is realized by combining this feature with 2D features for fingerprint recognition which indicates the prospect of 3D fingerprint recognition. 17. A 3D Geostatistical Mapping Tool SciTech Connect Weiss, W. W.; Stevenson, Graig; Patel, Ketan; Wang, Jun 1999-02-09 This software provides accurate 3D reservoir modeling tools and high quality 3D graphics for PC platforms enabling engineers and geologists to better comprehend reservoirs and consequently improve their decisions. The mapping algorithms are fractals, kriging, sequential guassian simulation, and three nearest neighbor methods. 18. Topology dictionary for 3D video understanding. PubMed Tung, Tony; Matsuyama, Takashi 2012-08-01 This paper presents a novel approach that achieves 3D video understanding. 3D video consists of a stream of 3D models of subjects in motion. The acquisition of long sequences requires large storage space (2 GB for 1 min). Moreover, it is tedious to browse data sets and extract meaningful information. We propose the topology dictionary to encode and describe 3D video content. The model consists of a topology-based shape descriptor dictionary which can be generated from either extracted patterns or training sequences. The model relies on 1) topology description and classification using Reeb graphs, and 2) a Markov motion graph to represent topology change states. We show that the use of Reeb graphs as the high-level topology descriptor is relevant. It allows the dictionary to automatically model complex sequences, whereas other strategies would require prior knowledge on the shape and topology of the captured subjects. Our approach serves to encode 3D video sequences, and can be applied for content-based description and summarization of 3D video sequences. Furthermore, topology class labeling during a learning process enables the system to perform content-based event recognition. Experiments were carried out on various 3D videos. We showcase an application for 3D video progressive summarization using the topology dictionary. 19. 3D, or Not to Be? ERIC Educational Resources Information Center Norbury, Keith 2012-01-01 It may be too soon for students to be showing up for class with popcorn and gummy bears, but technology similar to that behind the 3D blockbuster movie "Avatar" is slowly finding its way into college classrooms. 3D classroom projectors are taking students on fantastic voyages inside the human body, to the ruins of ancient Greece--even to faraway… 20. Immersive 3D Geovisualization in Higher Education ERIC Educational Resources Information Center Philips, Andrea; Walz, Ariane; Bergner, Andreas; Graeff, Thomas; Heistermann, Maik; Kienzler, Sarah; Korup, Oliver; Lipp, Torsten; Schwanghart, Wolfgang; Zeilinger, Gerold 2015-01-01 In this study, we investigate how immersive 3D geovisualization can be used in higher education. Based on MacEachren and Kraak's geovisualization cube, we examine the usage of immersive 3D geovisualization and its usefulness in a research-based learning module on flood risk, called GEOSimulator. Results of a survey among participating students… 1. 3D, or Not to Be? ERIC Educational Resources Information Center Norbury, Keith 2012-01-01 It may be too soon for students to be showing up for class with popcorn and gummy bears, but technology similar to that behind the 3D blockbuster movie "Avatar" is slowly finding its way into college classrooms. 3D classroom projectors are taking students on fantastic voyages inside the human body, to the ruins of ancient Greece--even to faraway… 2. 3D Printing. What's the Harm? ERIC Educational Resources Information Center Love, Tyler S.; Roy, Ken 2016-01-01 Health concerns from 3D printing were first documented by Stephens, Azimi, Orch, and Ramos (2013), who found that commercially available 3D printers were producing hazardous levels of ultrafine particles (UFPs) and volatile organic compounds (VOCs) when plastic materials were melted through the extruder. UFPs are particles less than 100 nanometers… 3. DOUGLAS XA3D-1 #413 AIRPLANE. NASA Image and Video Library 1955-07-27 DOUGLAS XA3D-1 #413 AIRPLANE MOUNTED IN THE NACA AMES RESEARCH CENTER'S 40X80_FOOT SUBSONIC WIND TUNNEL Testing the boundary layer control of the A3D in the 40 x 80 wind tunnel. Boundary layer control was added to increase the lift of the wing for take off from an aircraft carrier. 4. DOUGLAS XA3D-1 #413 AIRPLANE. NASA Image and Video Library 1955-07-27 DOUGLAS XA3D-1 #413 AIRPLANE MOUNTED IN THE NACA AMES RESEARCH CENTER'S 40X80_FOOT SUBSONIC WIND TUNNEL sweptback wing Testing the wing boundary layer control of the A3D in the 40 x 80 wind tunnel. Boundary layer control was added to increase the lift of the wing for aircraft carrier take off and landing. 5. Static & Dynamic Response of 3D Solids SciTech Connect Lin, Jerry 1996-07-15 NIKE3D is a large deformations 3D finite element code used to obtain the resulting displacements and stresses from multi-body static and dynamic structural thermo-mechanics problems with sliding interfaces. Many nonlinear and temperature dependent constitutive models are available. 6. Integration of real-time 3D image acquisition and multiview 3D display Zhang, Zhaoxing; Geng, Zheng; Li, Tuotuo; Li, Wei; Wang, Jingyi; Liu, Yongchun 2014-03-01 Seamless integration of 3D acquisition and 3D display systems offers enhanced experience in 3D visualization of the real world objects or scenes. The vivid representation of captured 3D objects displayed on a glasses-free 3D display screen could bring the realistic viewing experience to viewers as if they are viewing real-world scene. Although the technologies in 3D acquisition and 3D display have advanced rapidly in recent years, effort is lacking in studying the seamless integration of these two different aspects of 3D technologies. In this paper, we describe our recent progress on integrating a light-field 3D acquisition system and an autostereoscopic multiview 3D display for real-time light field capture and display. This paper focuses on both the architecture design and the implementation of the hardware and the software of this integrated 3D system. A prototype of the integrated 3D system is built to demonstrate the real-time 3D acquisition and 3D display capability of our proposed system. 7. Quon 3D language for quantum information PubMed Central Liu, Zhengwei; Wozniakowski, Alex; Jaffe, Arthur M. 2017-01-01 We present a 3D topological picture-language for quantum information. Our approach combines charged excitations carried by strings, with topological properties that arise from embedding the strings in the interior of a 3D manifold with boundary. A quon is a composite that acts as a particle. Specifically, a quon is a hemisphere containing a neutral pair of open strings with opposite charge. We interpret multiquons and their transformations in a natural way. We obtain a type of relation, a string–genus “joint relation,” involving both a string and the 3D manifold. We use the joint relation to obtain a topological interpretation of the C∗-Hopf algebra relations, which are widely used in tensor networks. We obtain a 3D representation of the controlled NOT (CNOT) gate that is considerably simpler than earlier work, and a 3D topological protocol for teleportation. PMID:28167790 8. 2D/3D switchable displays Dekker, T.; de Zwart, S. T.; Willemsen, O. H.; Hiddink, M. G. H.; IJzerman, W. L. 2006-02-01 A prerequisite for a wide market acceptance of 3D displays is the ability to switch between 3D and full resolution 2D. In this paper we present a robust and cost effective concept for an auto-stereoscopic switchable 2D/3D display. The display is based on an LCD panel, equipped with switchable LC-filled lenticular lenses. We will discuss 3D image quality, with the focus on display uniformity. We show that slanting the lenticulars in combination with a good lens design can minimize non-uniformities in our 20" 2D/3D monitors. Furthermore, we introduce fractional viewing systems as a very robust concept to further improve uniformity in the case slanting the lenticulars and optimizing the lens design are not sufficient. We will discuss measurements and numerical simulations of the key optical characteristics of this display. Finally, we discuss 2D image quality, the switching characteristics and the residual lens effect. 9. 6D Interpretation of 3D Gravity Herfray, Yannick; Krasnov, Kirill; Scarinci, Carlos 2017-02-01 We show that 3D gravity, in its pure connection formulation, admits a natural 6D interpretation. The 3D field equations for the connection are equivalent to 6D Hitchin equations for the Chern–Simons 3-form in the total space of the principal bundle over the 3-dimensional base. Turning this construction around one gets an explanation of why the pure connection formulation of 3D gravity exists. More generally, we interpret 3D gravity as the dimensional reduction of the 6D Hitchin theory. To this end, we show that any \\text{SU}(2) invariant closed 3-form in the total space of the principal \\text{SU}(2) bundle can be parametrised by a connection together with a 2-form field on the base. The dimensional reduction of the 6D Hitchin theory then gives rise to 3D gravity coupled to a topological 2-form field. 10. BEAMS3D Neutral Beam Injection Model SciTech Connect Lazerson, Samuel 2014-04-14 With the advent of applied 3D fi elds in Tokamaks and modern high performance stellarators, a need has arisen to address non-axisymmetric effects on neutral beam heating and fueling. We report on the development of a fully 3D neutral beam injection (NBI) model, BEAMS3D, which addresses this need by coupling 3D equilibria to a guiding center code capable of modeling neutral and charged particle trajectories across the separatrix and into the plasma core. Ionization, neutralization, charge-exchange, viscous velocity reduction, and pitch angle scattering are modeled with the ADAS atomic physics database [1]. Benchmark calculations are presented to validate the collisionless particle orbits, neutral beam injection model, frictional drag, and pitch angle scattering effects. A calculation of neutral beam heating in the NCSX device is performed, highlighting the capability of the code to handle 3D magnetic fields. 11. Fabrication of 3D Silicon Sensors SciTech Connect Kok, A.; Hansen, T.E.; Hansen, T.A.; Lietaer, N.; Summanwar, A.; Kenney, C.; Hasi, J.; Da Via, C.; Parker, S.I.; /Hawaii U. 2012-06-06 Silicon sensors with a three-dimensional (3-D) architecture, in which the n and p electrodes penetrate through the entire substrate, have many advantages over planar silicon sensors including radiation hardness, fast time response, active edge and dual readout capabilities. The fabrication of 3D sensors is however rather complex. In recent years, there have been worldwide activities on 3D fabrication. SINTEF in collaboration with Stanford Nanofabrication Facility have successfully fabricated the original (single sided double column type) 3D detectors in two prototype runs and the third run is now on-going. This paper reports the status of this fabrication work and the resulted yield. The work of other groups such as the development of double sided 3D detectors is also briefly reported. 12. Biocompatible 3D Matrix with Antimicrobial Properties. PubMed Ion, Alberto; Andronescu, Ecaterina; Rădulescu, Dragoș; Rădulescu, Marius; Iordache, Florin; Vasile, Bogdan Ștefan; Surdu, Adrian Vasile; Albu, Madalina Georgiana; Maniu, Horia; Chifiriuc, Mariana Carmen; Grumezescu, Alexandru Mihai; Holban, Alina Maria 2016-01-20 The aim of this study was to develop, characterize and assess the biological activity of a new regenerative 3D matrix with antimicrobial properties, based on collagen (COLL), hydroxyapatite (HAp), β-cyclodextrin (β-CD) and usnic acid (UA). The prepared 3D matrix was characterized by Scanning Electron Microscopy (SEM), Fourier Transform Infrared Microscopy (FT-IRM), Transmission Electron Microscopy (TEM), and X-ray Diffraction (XRD). In vitro qualitative and quantitative analyses performed on cultured diploid cells demonstrated that the 3D matrix is biocompatible, allowing the normal development and growth of MG-63 osteoblast-like cells and exhibited an antimicrobial effect, especially on the Staphylococcus aureus strain, explained by the particular higher inhibitory activity of usnic acid (UA) against Gram positive bacterial strains. Our data strongly recommend the obtained 3D matrix to be used as a successful alternative for the fabrication of three dimensional (3D) anti-infective regeneration matrix for bone tissue engineering. 13. 3D Ultrafast Ultrasound Imaging In Vivo PubMed Central Provost, Jean; Papadacci, Clement; Arango, Juan Esteban; Imbault, Marion; Gennisson, Jean-Luc; Tanter, Mickael; Pernot, Mathieu 2014-01-01 Very high frame rate ultrasound imaging has recently allowed for the extension of the applications of echography to new fields of study such as the functional imaging of the brain, cardiac electrophysiology, and the quantitative real-time imaging of the intrinsic mechanical properties of tumors, to name a few, non-invasively and in real time. In this study, we present the first implementation of Ultrafast Ultrasound Imaging in three dimensions based on the use of either diverging or plane waves emanating from a sparse virtual array located behind the probe. It achieves high contrast and resolution while maintaining imaging rates of thousands of volumes per second. A customized portable ultrasound system was developed to sample 1024 independent channels and to drive a 32×32 matrix-array probe. Its capability to track in 3D transient phenomena occurring in the millisecond range within a single ultrafast acquisition was demonstrated for 3-D Shear-Wave Imaging, 3-D Ultrafast Doppler Imaging and finally 3D Ultrafast combined Tissue and Flow Doppler. The propagation of shear waves was tracked in a phantom and used to characterize its stiffness. 3-D Ultrafast Doppler was used to obtain 3-D maps of Pulsed Doppler, Color Doppler, and Power Doppler quantities in a single acquisition and revealed, for the first time, the complex 3-D flow patterns occurring in the ventricles of the human heart during an entire cardiac cycle, and the 3-D in vivo interaction of blood flow and wall motion during the pulse wave in the carotid at the bifurcation. This study demonstrates the potential of 3-D Ultrafast Ultrasound Imaging for the 3-D real-time mapping of stiffness, tissue motion, and flow in humans in vivo and promises new clinical applications of ultrasound with reduced intra- and inter-observer variability. PMID:25207828 14. Pathways for Learning from 3D Technology PubMed Central Carrier, L. Mark; Rab, Saira S.; Rosen, Larry D.; Vasquez, Ludivina; Cheever, Nancy A. 2016-01-01 The purpose of this study was to find out if 3D stereoscopic presentation of information in a movie format changes a viewer's experience of the movie content. Four possible pathways from 3D presentation to memory and learning were considered: a direct connection based on cognitive neuroscience research; a connection through "immersion" in that 3D presentations could provide additional sensorial cues (e.g., depth cues) that lead to a higher sense of being surrounded by the stimulus; a connection through general interest such that 3D presentation increases a viewer’s interest that leads to greater attention paid to the stimulus (e.g., "involvement"); and a connection through discomfort, with the 3D goggles causing discomfort that interferes with involvement and thus with memory. The memories of 396 participants who viewed two-dimensional (2D) or 3D movies at movie theaters in Southern California were tested. Within three days of viewing a movie, participants filled out an online anonymous questionnaire that queried them about their movie content memories, subjective movie-going experiences (including emotional reactions and "presence") and demographic backgrounds. The responses to the questionnaire were subjected to path analyses in which several different links between 3D presentation to memory (and other variables) were explored. The results showed there were no effects of 3D presentation, either directly or indirectly, upon memory. However, the largest effects of 3D presentation were on emotions and immersion, with 3D presentation leading to reduced positive emotions, increased negative emotions and lowered immersion, compared to 2D presentations. PMID:28078331 15. 3D Visualization Development of SIUE Campus Nellutla, Shravya Geographic Information Systems (GIS) has progressed from the traditional map-making to the modern technology where the information can be created, edited, managed and analyzed. Like any other models, maps are simplified representations of real world. Hence visualization plays an essential role in the applications of GIS. The use of sophisticated visualization tools and methods, especially three dimensional (3D) modeling, has been rising considerably due to the advancement of technology. There are currently many off-the-shelf technologies available in the market to build 3D GIS models. One of the objectives of this research was to examine the available ArcGIS and its extensions for 3D modeling and visualization and use them to depict a real world scenario. Furthermore, with the advent of the web, a platform for accessing and sharing spatial information on the Internet, it is possible to generate interactive online maps. Integrating Internet capacity with GIS functionality redefines the process of sharing and processing the spatial information. Enabling a 3D map online requires off-the-shelf GIS software, 3D model builders, web server, web applications and client server technologies. Such environments are either complicated or expensive because of the amount of hardware and software involved. Therefore, the second objective of this research was to investigate and develop simpler yet cost-effective 3D modeling approach that uses available ArcGIS suite products and the free 3D computer graphics software for designing 3D world scenes. Both ArcGIS Explorer and ArcGIS Online will be used to demonstrate the way of sharing and distributing 3D geographic information on the Internet. A case study of the development of 3D campus for the Southern Illinois University Edwardsville is demonstrated. 16. The psychology of the 3D experience Janicke, Sophie H.; Ellis, Andrew 2013-03-01 With 3D televisions expected to reach 50% home saturation as early as 2016, understanding the psychological mechanisms underlying the user response to 3D technology is critical for content providers, educators and academics. Unfortunately, research examining the effects of 3D technology has not kept pace with the technology's rapid adoption, resulting in large-scale use of a technology about which very little is actually known. Recognizing this need for new research, we conducted a series of studies measuring and comparing many of the variables and processes underlying both 2D and 3D media experiences. In our first study, we found narratives within primetime dramas had the power to shift viewer attitudes in both 2D and 3D settings. However, we found no difference in persuasive power between 2D and 3D content. We contend this lack of effect was the result of poor conversion quality and the unique demands of 3D production. In our second study, we found 3D technology significantly increased enjoyment when viewing sports content, yet offered no added enjoyment when viewing a movie trailer. The enhanced enjoyment of the sports content was shown to be the result of heightened emotional arousal and attention in the 3D condition. We believe the lack of effect found for the movie trailer may be genre-related. In our final study, we found 3D technology significantly enhanced enjoyment of two video games from different genres. The added enjoyment was found to be the result of an increased sense of presence. 17. Auditory Imagery: Empirical Findings ERIC Educational Resources Information Center Hubbard, Timothy L. 2010-01-01 The empirical literature on auditory imagery is reviewed. Data on (a) imagery for auditory features (pitch, timbre, loudness), (b) imagery for complex nonverbal auditory stimuli (musical contour, melody, harmony, tempo, notational audiation, environmental sounds), (c) imagery for verbal stimuli (speech, text, in dreams, interior monologue), (d)… 18. Auditory Imagery: Empirical Findings ERIC Educational Resources Information Center Hubbard, Timothy L. 2010-01-01 The empirical literature on auditory imagery is reviewed. Data on (a) imagery for auditory features (pitch, timbre, loudness), (b) imagery for complex nonverbal auditory stimuli (musical contour, melody, harmony, tempo, notational audiation, environmental sounds), (c) imagery for verbal stimuli (speech, text, in dreams, interior monologue), (d)… 19. The 3D Elevation Program: summary for Michigan USGS Publications Warehouse Carswell, William J. 2014-01-01 The National Enhanced Elevation Assessment evaluated multiple elevation data acquisition options to determine the optimal data quality and data replacement cycle relative to cost to meet the identified requirements of the user community. The evaluation demonstrated that lidar acquisition at quality level 2 for the conterminous United States and quality level 5 interferometric synthetic aperture radar (ifsar) data for Alaska with a 6- to 10-year acquisition cycle provided the highest benefit/cost ratios. The 3D Elevation Program (3DEP) initiative selected an 8-year acquisition cycle for the respective quality levels. 3DEP, managed by the U.S. Geological Survey, the Office of Management and Budget Circular A–16 lead agency for terrestrial elevation data, responds to the growing need for high-quality topographic data and a wide range of other 3D representations of the Nation's natural and constructed features. The Michigan Statewide Authoritative Imagery and Lidar (MiSAIL) program provides statewide lidar coordination with local, State, and national groups in support of 3DEP for Michigan. 20. 3D city models completion by fusing lidar and image data Grammatikopoulos, L.; Kalisperakis, I.; Petsa, E.; Stentoumis, C. 2015-05-01 A fundamental step in the generation of visually detailed 3D city models is the acquisition of high fidelity 3D data. Typical approaches employ DSM representations usually derived from Lidar (Light Detection and Ranging) airborne scanning or image based procedures. In this contribution, we focus on the fusion of data from both these methods in order to enhance or complete them. Particularly, we combine an existing Lidar and orthomosaic dataset (used as reference), with a new aerial image acquisition (including both vertical and oblique imagery) of higher resolution, which was carried out in the area of Kallithea, in Athens, Greece. In a preliminary step, a digital orthophoto and a DSM is generated from the aerial images in an arbitrary reference system, by employing a Structure from Motion and dense stereo matching framework. The image-to-Lidar registration is performed by 2D feature (SIFT and SURF) extraction and matching among the two orthophotos. The established point correspondences are assigned with 3D coordinates through interpolation on the reference Lidar surface, are then backprojected onto the aerial images, and finally matched with 2D image features located in the vicinity of the backprojected 3D points. Consequently, these points serve as Ground Control Points with appropriate weights for final orientation and calibration of the images through a bundle adjustment solution. By these means, the aerial imagery which is optimally aligned to the reference dataset can be used for the generation of an enhanced and more accurately textured 3D city model. 1. Improvement of range accuracy of photon counting chirped AM ladar using phase postprocessing. PubMed Zhang, Zijing; Zhao, Yuan; Zhang, Yong; Wu, Long; Su, Jianzhong 2013-04-10 The photon counting detection of Geiger mode avalanche photodiode is discrete due to its dead time, therefore the intermediate frequency (IF) spectrum is also discrete after the mixing and fast Fourier transform processing. When the peak of the IF spectrum is in the interval of the discrete IF spectrum, it limits the range accuracy without obtaining the exact position of the desired target in the interval. In this paper, the phase postprocessing method is proposed, which extracts not only the frequency of the IF signal, but also the phase of the IF signal that was not exploited before. The theoretical analysis demonstrates significant improvements in the range accuracy of the ladar and the simulation verifies the validity of the method. 2. Target recognition of ladar range images using even-order Zernike moments. PubMed Liu, Zheng-Jun; Li, Qi; Xia, Zhi-Wei; Wang, Qi 2012-11-01 Ladar range images have attracted considerable attention in automatic target recognition fields. In this paper, Zernike moments (ZMs) are applied to classify the target of the range image from an arbitrary azimuth angle. However, ZMs suffer from high computational costs. To improve the performance of target recognition based on small samples, even-order ZMs with serial-parallel backpropagation neural networks (BPNNs) are applied to recognize the target of the range image. It is found that the rotation invariance and classified performance of the even-order ZMs are both better than for odd-order moments and for moments compressed by principal component analysis. The experimental results demonstrate that combining the even-order ZMs with serial-parallel BPNNs can significantly improve the recognition rate for small samples. 3. A synthetic aperture imaging ladar demonstrator with Ø300mm antenna and changeable footprint Zhou, Yu; Zhi, Yanan; Yan, Aimin; Xu, Nan; Wang, Lijuan; Wu, Yapeng; Luan, Zhu; Sun, Jianfeng; Liu, Liren 2010-08-01 A demonstrator of synthetic aperture imaging ladar (SAIL) is constructed with the maximum aperture Ø300mm of antenna telescope. This demonstrator can be set with a rectangular aperture to produce a rectangular footprint suitable for scanning format with a high resolution and a wide strip. Particularly, the demonstrator is designed not only for the farfield application but also for the verifying and testing in the near-field in the laboratory space. And a 90 degree optical hybrid is used to mitigate the external phase errors caused by turbulence and vibration along line of sight direction and the internal phase errors caused by local fiber delay line. This paper gives the details of the systematic design, and the progresses of the experiment at a target distance around 130m. 4. Speckle effect in a down-looking synthetic aperture imaging ladar Xu, Qian; Zhou, Yu; Sun, Jianfeng; Lu, Zhiyong; Sun, Zhiwei; Liu, Liren 2014-09-01 Down-looking synthetic aperture imaging ladar(SAIL) has overcome many difficulties in side-looking SAIL. However, it is inevitably impacted by the speckle effect. There is temporally varying speckle effect due to the angular deflecting of two coaxial polarization-orthogonal beams transmitted in the orthogonal direction of travel, and a spatial varying speckle effect in the travel direction. Under the coaxial heterodyne, phase variations caused by speckle effect are compensated, leaving the amplitude variations of speckle field. In this paper, the speckle effect in the down-looking SAIL is analyzed, expressions for two-dimensional data collection contained speckle effect are obtained and the two-dimensional image influenced by speckle effect is simulated. 5. Simulation of synthetic aperture imaging ladar (SAIL) for three-dimensional target model Yi, Ning; Wu, Zhen-Sen 2010-11-01 In conventional imaging laser radar, the resolution of target is constrained by the diffraction-limited, which includes the beamwidth of the laser in the target plane and the telescope's aperture. Synthetic aperture imaging Ladar (SAIL) is an imaging technique which employs aperture synthesis with coherent laser radar, the resolution is determined by the total frequency spread of the source and is independent of range, so can achieve fine resolution in long range. Ray tracing is utilized here to obtain two-dimensional scattering properties from three-dimensional geometric model of actual target, and range-doppler algorithm is used for synthetic aperture process in laser image simulation. The results show that the SAIL can support better resolution. 6. Optically rewritable 3D liquid crystal displays. PubMed Sun, J; Srivastava, A K; Zhang, W; Wang, L; Chigrinov, V G; Kwok, H S 2014-11-01 Optically rewritable liquid crystal display (ORWLCD) is a concept based on the optically addressed bi-stable display that does not need any power to hold the image after being uploaded. Recently, the demand for the 3D image display has increased enormously. Several attempts have been made to achieve 3D image on the ORWLCD, but all of them involve high complexity for image processing on both hardware and software levels. In this Letter, we disclose a concept for the 3D-ORWLCD by dividing the given image in three parts with different optic axis. A quarter-wave plate is placed on the top of the ORWLCD to modify the emerging light from different domains of the image in different manner. Thereafter, Polaroid glasses can be used to visualize the 3D image. The 3D image can be refreshed, on the 3D-ORWLCD, in one-step with proper ORWLCD printer and image processing, and therefore, with easy image refreshing and good image quality, such displays can be applied for many applications viz. 3D bi-stable display, security elements, etc. 7. Medical 3D Printing for the Radiologist. PubMed Mitsouras, Dimitris; Liacouras, Peter; Imanzadeh, Amir; Giannopoulos, Andreas A; Cai, Tianrun; Kumamaru, Kanako K; George, Elizabeth; Wake, Nicole; Caterson, Edward J; Pomahac, Bohdan; Ho, Vincent B; Grant, Gerald T; Rybicki, Frank J 2015-01-01 While use of advanced visualization in radiology is instrumental in diagnosis and communication with referring clinicians, there is an unmet need to render Digital Imaging and Communications in Medicine (DICOM) images as three-dimensional (3D) printed models capable of providing both tactile feedback and tangible depth information about anatomic and pathologic states. Three-dimensional printed models, already entrenched in the nonmedical sciences, are rapidly being embraced in medicine as well as in the lay community. Incorporating 3D printing from images generated and interpreted by radiologists presents particular challenges, including training, materials and equipment, and guidelines. The overall costs of a 3D printing laboratory must be balanced by the clinical benefits. It is expected that the number of 3D-printed models generated from DICOM images for planning interventions and fabricating implants will grow exponentially. Radiologists should at a minimum be familiar with 3D printing as it relates to their field, including types of 3D printing technologies and materials used to create 3D-printed anatomic models, published applications of models to date, and clinical benefits in radiology. Online supplemental material is available for this article. (©)RSNA, 2015. 8. 3D imaging in forensic odontology. PubMed Evans, Sam; Jones, Carl; Plassmann, Peter 2010-06-16 This paper describes the investigation of a new 3D capture method for acquiring and subsequent forensic analysis of bite mark injuries on human skin. When documenting bite marks with standard 2D cameras errors in photographic technique can occur if best practice is not followed. Subsequent forensic analysis of the mark is problematic when a 3D structure is recorded into a 2D space. Although strict guidelines (BAFO) exist, these are time-consuming to follow and, due to their complexity, may produce errors. A 3D image capture and processing system might avoid the problems resulting from the 2D reduction process, simplifying the guidelines and reducing errors. Proposed Solution: a series of experiments are described in this paper to demonstrate that the potential of a 3D system might produce suitable results. The experiments tested precision and accuracy of the traditional 2D and 3D methods. A 3D image capture device minimises the amount of angular distortion, therefore such a system has the potential to create more robust forensic evidence for use in courts. A first set of experiments tested and demonstrated which method of forensic analysis creates the least amount of intra-operator error. A second set tested and demonstrated which method of image capture creates the least amount of inter-operator error and visual distortion. In a third set the effects of angular distortion on 2D and 3D methods of image capture were evaluated. 9. NUBEAM developments and 3d halo modeling Gorelenkova, M. V.; Medley, S. S.; Kaye, S. M. 2012-10-01 Recent developments related to the 3D halo model in NUBEAM code are described. To have a reliable halo neutral source for diagnostic simulation, the TRANSP/NUBEAM code has been enhanced with full implementation of ADAS atomic physic ground state and excited state data for hydrogenic beams and mixed species plasma targets. The ADAS codes and database provide the density and temperature dependence of the atomic data, and the collective nature of the state excitation process. To be able to populate 3D halo output with sufficient statistical resolution, the capability to control the statistics of fast ion CX modeling and for thermal halo launch has been added to NUBEAM. The 3D halo neutral model is based on modification and extension of the beam in box'' aligned 3d Cartesian grid that includes the neutral beam itself, 3D fast neutral densities due to CX of partially slowed down fast ions in the beam halo region, 3D thermal neutral densities due to CX deposition and fast neutral recapture source. More details on the 3D halo simulation design will be presented. 10. Medical 3D Printing for the Radiologist PubMed Central Mitsouras, Dimitris; Liacouras, Peter; Imanzadeh, Amir; Giannopoulos, Andreas A.; Cai, Tianrun; Kumamaru, Kanako K.; George, Elizabeth; Wake, Nicole; Caterson, Edward J.; Pomahac, Bohdan; Ho, Vincent B.; Grant, Gerald T. 2015-01-01 While use of advanced visualization in radiology is instrumental in diagnosis and communication with referring clinicians, there is an unmet need to render Digital Imaging and Communications in Medicine (DICOM) images as three-dimensional (3D) printed models capable of providing both tactile feedback and tangible depth information about anatomic and pathologic states. Three-dimensional printed models, already entrenched in the nonmedical sciences, are rapidly being embraced in medicine as well as in the lay community. Incorporating 3D printing from images generated and interpreted by radiologists presents particular challenges, including training, materials and equipment, and guidelines. The overall costs of a 3D printing laboratory must be balanced by the clinical benefits. It is expected that the number of 3D-printed models generated from DICOM images for planning interventions and fabricating implants will grow exponentially. Radiologists should at a minimum be familiar with 3D printing as it relates to their field, including types of 3D printing technologies and materials used to create 3D-printed anatomic models, published applications of models to date, and clinical benefits in radiology. Online supplemental material is available for this article. ©RSNA, 2015 PMID:26562233 11. 3D bioprinting of tissues and organs. PubMed Murphy, Sean V; Atala, Anthony 2014-08-01 Additive manufacturing, otherwise known as three-dimensional (3D) printing, is driving major innovations in many areas, such as engineering, manufacturing, art, education and medicine. Recent advances have enabled 3D printing of biocompatible materials, cells and supporting components into complex 3D functional living tissues. 3D bioprinting is being applied to regenerative medicine to address the need for tissues and organs suitable for transplantation. Compared with non-biological printing, 3D bioprinting involves additional complexities, such as the choice of materials, cell types, growth and differentiation factors, and technical challenges related to the sensitivities of living cells and the construction of tissues. Addressing these complexities requires the integration of technologies from the fields of engineering, biomaterials science, cell biology, physics and medicine. 3D bioprinting has already been used for the generation and transplantation of several tissues, including multilayered skin, bone, vascular grafts, tracheal splints, heart tissue and cartilaginous structures. Other applications include developing high-throughput 3D-bioprinted tissue models for research, drug discovery and toxicology. 12. Consideration of techniques to mitigate the unauthorized 3D printing production of keys Straub, Jeremy; Kerlin, Scott 2016-05-01 The illicit production of 3D printed keys based on remote-sensed imagery is problematic as it allows a would-be intruder to access a secured facility without the attack attempt being as obviously detectable as conventional techniques. This paper considers the problem from multiple perspectives. First, it looks at different attack types and considers the prospective attack from a digital information perspective. Second, based on this, techniques for securing keys are considered. Third, the design of keys is considered from the perspective of making them more difficult to duplicate using visible light sensing and 3D printing. Policy and legal considerations are discussed. 13. Extra Dimensions: 3D in PDF Documentation Graf, Norman A. 2012-12-01 14. How We 3D-Print Aerogel SciTech Connect 2015-04-23 A new type of graphene aerogel will make for better energy storage, sensors, nanoelectronics, catalysis and separations. Lawrence Livermore National Laboratory researchers have made graphene aerogel microlattices with an engineered architecture via a 3D printing technique known as direct ink writing. The research appears in the April 22 edition of the journal, Nature Communications. The 3D printed graphene aerogels have high surface area, excellent electrical conductivity, are lightweight, have mechanical stiffness and exhibit supercompressibility (up to 90 percent compressive strain). In addition, the 3D printed graphene aerogel microlattices show an order of magnitude improvement over bulk graphene materials and much better mass transport. 15. FUN3D Manual: 12.4 NASA Technical Reports Server (NTRS) Biedron, Robert T.; Derlaga, Joseph M.; Gnoffo, Peter A.; Hammond, Dana P.; Jones, William T.; Kleb, Bil; Lee-Rausch, Elizabeth M.; Nielsen, Eric J.; Park, Michael A.; Rumsey, Christopher L.; 2014-01-01 This manual describes the installation and execution of FUN3D version 12.4, including optional dependent packages. FUN3D is a suite of computational fluid dynamics simulation and design tools that uses mixedelement unstructured grids in a large number of formats, including structured multiblock and overset grid systems. A discretely-exact adjoint solver enables efficient gradient-based design and grid adaptation to reduce estimated discretization error. FUN3D is available with and without a reacting, real-gas capability. This generic gas option is available only for those persons that qualify for its beta release status. 16. NASA Technical Reports Server (NTRS) Biedron, Robert T.; Carlson, Jan-Renee; Derlaga, Joseph M.; Gnoffo, Peter A.; Hammond, Dana P.; Jones, William T.; Kleb, Bil; Lee-Rausch, Elizabeth M.; Nielsen, Eric J.; Park, Michael A.; Rumsey, Christopher L.; Thomas, James L.; Wood, William A. 2016-01-01 This manual describes the installation and execution of FUN3D version 12.9, including optional dependent packages. FUN3D is a suite of computational fluid dynamics simulation and design tools that uses mixed-element unstructured grids in a large number of formats, including structured multiblock and overset grid systems. A discretely-exact adjoint solver enables efficient gradient-based design and grid adaptation to reduce estimated discretization error. FUN3D is available with and without a reacting, real-gas capability. This generic gas option is available only for those persons that qualify for its beta release status. 17. FUN3D Manual: 13.0 NASA Technical Reports Server (NTRS) Biedron, Robert T.; Carlson, Jan-Renee; Derlaga, Joseph M.; Gnoffo, Peter A.; Hammond, Dana P.; Jones, William T.; Kleb, Bill; Lee-Rausch, Elizabeth M.; Nielsen, Eric J.; Park, Michael A.; Rumsey, Christopher L.; Thomas, James L.; Wood, William A. 2016-01-01 This manual describes the installation and execution of FUN3D version 13.0, including optional dependent packages. FUN3D is a suite of computational fluid dynamics simulation and design tools that uses mixed-element unstructured grids in a large number of formats, including structured multiblock and overset grid systems. A discretely-exact adjoint solver enables efficient gradient-based design and grid adaptation to reduce estimated discretization error. FUN3D is available with and without a reacting, real-gas capability. This generic gas option is available only for those persons that qualify for its beta release status. 18. FUN3D Manual: 13.1 NASA Technical Reports Server (NTRS) Biedron, Robert T.; Carlson, Jan-Renee; Derlaga, Joseph M.; Gnoffo, Peter A.; Hammond, Dana P.; Jones, William T.; Kleb, Bil; Lee-Rausch, Elizabeth M.; Nielsen, Eric J.; Park, Michael A.; Rumsey, Christopher L.; Thomas, James L.; Wood, William A. 2017-01-01 This manual describes the installation and execution of FUN3D version 13.1, including optional dependent packages. FUN3D is a suite of computational fluid dynamics simulation and design tools that uses mixed-element unstructured grids in a large number of formats, including structured multiblock and overset grid systems. A discretely-exact adjoint solver enables efficient gradient-based design and grid adaptation to reduce estimated discretization error. FUN3D is available with and without a reacting, real-gas capability. This generic gas option is available only for those persons that qualify for its beta release status. 19. FUN3D Manual: 12.7 NASA Technical Reports Server (NTRS) Biedron, Robert T.; Carlson, Jan-Renee; Derlaga, Joseph M.; Gnoffo, Peter A.; Hammond, Dana P.; Jones, William T.; Kleb, Bil; Lee-Rausch, Elizabeth M.; Nielsen, Eric J.; Park, Michael A.; Rumsey, Christopher L.; Thomas, James L.; Wood, William A. 2015-01-01 This manual describes the installation and execution of FUN3D version 12.7, including optional dependent packages. FUN3D is a suite of computational fluid dynamics simulation and design tools that uses mixed-element unstructured grids in a large number of formats, including structured multiblock and overset grid systems. A discretely-exact adjoint solver enables efficient gradient-based design and grid adaptation to reduce estimated discretization error. FUN3D is available with and without a reacting, real-gas capability. This generic gas option is available only for those persons that qualify for its beta release status. 20. FUN3D Manual: 12.6 NASA Technical Reports Server (NTRS) Biedron, Robert T.; Derlaga, Joseph M.; Gnoffo, Peter A.; Hammond, Dana P.; Jones, William T.; Kleb, William L.; Lee-Rausch, Elizabeth M.; Nielsen, Eric J.; Park, Michael A.; Rumsey, Christopher L.; Thomas, James L.; Wood, William A. 2015-01-01 This manual describes the installation and execution of FUN3D version 12.6, including optional dependent packages. FUN3D is a suite of computational fluid dynamics simulation and design tools that uses mixed-element unstructured grids in a large number of formats, including structured multiblock and overset grid systems. A discretely-exact adjoint solver enables efficient gradient-based design and grid adaptation to reduce estimated discretization error. FUN3D is available with and without a reacting, real-gas capability. This generic gas option is available only for those persons that qualify for its beta release status.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.3128661513328552, "perplexity": 4291.876555985643}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-05/segments/1516084890187.52/warc/CC-MAIN-20180121040927-20180121060927-00641.warc.gz"}
http://openstudy.com/updates/5105d7ace4b03186c3f9db19
Here's the question you clicked on: 55 members online • 0 viewing ## anonymous 3 years ago What is the limit for this graph? Delete Cancel Submit • This Question is Closed 1. anonymous • 3 years ago Best Response You've already chosen the best response. 0 ##### 1 Attachment 2. anonymous • 3 years ago Best Response You've already chosen the best response. 0 @phi can u help? @satellite73 ? 3. anonymous • 3 years ago Best Response You've already chosen the best response. 0 @zepdrix 4. anonymous • 3 years ago Best Response You've already chosen the best response. 0 umm can u help @dumbcow 5. anonymous • 3 years ago Best Response You've already chosen the best response. 0 As it approaches what? 6. anonymous • 3 years ago Best Response You've already chosen the best response. 0 idk 7. anonymous • 3 years ago Best Response You've already chosen the best response. 0 Then go find out 8. anonymous • 3 years ago Best Response You've already chosen the best response. 0 the original points where these ##### 1 Attachment 9. anonymous • 3 years ago Best Response You've already chosen the best response. 0 are you given a function? it looks like it has horizontal asymptote around 5 ?? 10. anonymous • 3 years ago Best Response You've already chosen the best response. 0 yea the funtion is lim x-> x^2 + 1/ x -1 11. anonymous • 3 years ago Best Response You've already chosen the best response. 0 $\frac{x^{2} +1}{x-1}$ ? 12. anonymous • 3 years ago Best Response You've already chosen the best response. 0 yes 13. anonymous • 3 years ago Best Response You've already chosen the best response. 0 in that case, some of the points are incorrect x=-2 --> (4+1)/(-2-1) = 5/-3 14. anonymous • 3 years ago Best Response You've already chosen the best response. 0 ohh so it will be -1.6? 15. anonymous • 3 years ago Best Response You've already chosen the best response. 0 right 16. anonymous • 3 years ago Best Response You've already chosen the best response. 0 ok 17. anonymous • 3 years ago Best Response You've already chosen the best response. 0 so the graph is wrong? 18. anonymous • 3 years ago Best Response You've already chosen the best response. 0 so there is not limit here? 19. anonymous • 3 years ago Best Response You've already chosen the best response. 0 are u there @dumbcow ? 20. anonymous • 3 years ago Best Response You've already chosen the best response. 0 @Mertsj can u help 21. Mertsj • 3 years ago Best Response You've already chosen the best response. 0 Vertical asymptote at x = 1 22. Not the answer you are looking for? Search for more explanations. • Attachments: Find more explanations on OpenStudy ##### spraguer (Moderator) 5→ View Detailed Profile 23 • Teamwork 19 Teammate • Problem Solving 19 Hero • You have blocked this person. • ✔ You're a fan Checking fan status... Thanks for being so helpful in mathematics. If you are getting quality help, make sure you spread the word about OpenStudy.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 1.0000039339065552, "perplexity": 25356.382504520567}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-36/segments/1471982295966.49/warc/CC-MAIN-20160823195815-00255-ip-10-153-172-175.ec2.internal.warc.gz"}
http://www.ams.org/mathscinet-getitem?mr=35:2283
MathSciNet bibliographic data MR211402 55.40 Wall, C. T. C. Finiteness conditions for ${\rm CW}$${\rm CW}$ complexes. II. Proc. Roy. Soc. Ser. A 295 1966 129–139. Links to the journal or article are not yet available For users without a MathSciNet license , Relay Station allows linking from MR numbers in online mathematical literature directly to electronic journals and original articles. Subscribers receive the added value of full MathSciNet reviews.
{"extraction_info": {"found_math": true, "script_math_tex": 1, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9535884857177734, "perplexity": 3543.6663480215525}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-50/segments/1480698543170.25/warc/CC-MAIN-20161202170903-00472-ip-10-31-129-80.ec2.internal.warc.gz"}
https://rupress.org/jcb/article/116/4/977/59409/A-normally-masked-nuclear-matrix-antigen-that?searchresult=1
mAbs were generated against HeLa nuclear matrix proteins and one, HIB2, which selectively stained mitotic cells, was selected for further study. Western blot analysis showed H1B2 antibody detected a protein of 240 kD in the nuclear matrix fractions. The H1B2 antigen was completely masked in immunofluorescently stained interphase cells. However, removing chromatin with DNase I digestion and 0.25 M ammonium sulfate extraction exposed the protein epitope. The resulting fluorescence pattern was bright, highly punctate, and entirely nuclear. Further extraction of the nuclear matrix with 2 M NaCl uncovers an underlying, anastomosing network of 9-13 nm core filaments. Most of the H1B2 antigen was retained in the fibrogranular masses enmeshed in the core filament network and not in the filaments themselves. The H1B2 antigen showed remarkable behavior at mitosis. As cells approached prophase the antigen became unmasked to immunofluorescent staining without the removal of chromatin. First appearing as a bright spot, the antibody staining spread through the nucleus finally concentrating in the region around the condensed chromosomes. The antibody also brightly stained the spindle poles and, more weakly, in a punctate pattern in the cytoskeleton around the spindle. As the chromosomes separated at anaphase, H1B2 remained with the separating daughter sets of chromosomes. The H1B2 antigen returned to the reforming nucleus at telophase, but left a bright staining region in the midbody. Immunoelectron microscopy of resinless sections showed that, in the mitotic cell, the H1B2 antibody did not stain chromosomes and centrioles themselves, but decorated a fibrogranular network surrounding and connected to the chromosomes and a fibrogranular structure surrounding the centriole. This content is only available as a PDF.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8063486814498901, "perplexity": 19043.11830458051}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-21/segments/1652662510138.6/warc/CC-MAIN-20220516140911-20220516170911-00170.warc.gz"}
https://publications.mfo.de/handle/mfo/1350/browse?type=author&value=Ingalls%2C+Colin
Now showing items 1-2 of 2 • #### The Magic Square of Reflections and Rotations  [OWP-2018-13] (Mathematisches Forschungsinstitut Oberwolfach, 2018-07-01) We show how Coxeter’s work implies a bijection between complex reflection groups of rank two and real reflection groups in 0(3). We also consider this magic square of reflections and rotations in the framework of Clifford ... • #### A McKay Correspondence for Reflection Groups  [OWP-2018-14] (Mathematisches Forschungsinstitut Oberwolfach, 2018-07-02) We construct a noncommutative desingularization of the discriminant of a finite reflection group $G$ as a quotient of the skew group ring $A=S*G$. If $G$ is generated by order two reflections, then this quotient identifies ...
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8565515279769897, "perplexity": 905.1415472635522}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-29/segments/1593655886706.29/warc/CC-MAIN-20200704201650-20200704231650-00068.warc.gz"}
https://www.transtutors.com/questions/18-4-do-it-the-fastener-division-of-southern-fasteners-manufactures-zippers-and-then-1356494.htm
# 18-4 DO IT! The fastener division of Southern Fasteners manufactures zippers and then sells them... 18-4 DO IT! The fastener division of Southern Fasteners manufactures zippers and then sells them to customers for $8 per unit. Its variable cost is$3 per unit, and its fixed cost per unit is $1.50. Management would like the fastener division to transfer 12,000 of these zippers to another division within the company at a price of$3. The fastener division could avoid \$0.20 per zipper of variable packaging costs by selling internally. Determine the minimum transfer price (a) assuming the fastener division is not oper- ating at full capacity, and (b) assuming the fastener division is operating at full capacity. ✔  The  Navigator EXERCISES Hi, Pleas...
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.4380071461200714, "perplexity": 11060.734180901827}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-30/segments/1531676590051.20/warc/CC-MAIN-20180718041450-20180718061450-00615.warc.gz"}
http://math.tutorcircle.com/number-sense/rounding-fractions-to-the-nearest-whole-number.html
Sales Toll Free No: 1-800-481-2338 # Rounding Fractions to the Nearest Whole Number TopIn this page, we will learn how to round fractions to the nearest whole number. Given below are the rules for converting fractions: Rules for Converting Fractions: 1. Convert fraction value from the improper fraction to the mixed fraction 2. Check if the fraction value is less than $\frac{1}{2}$. Then, keep the whole number value same and it will be the round off value. 3. Check if the fraction value is greater than $\frac{1}{2}$. Then, add 1 in the whole number and drop the fraction number. That will be the result value. Given below are some of the examples in rounding fractions to the nearest whole number. Example 1: Find the round off value of $\frac{7}{2}$ Solution: First, convert it into mixed fraction which is $3$$\frac{1}{2} Now, \frac{1}{2} is equal to \frac{1}{2}. So, the result will be 3. Example 2: Find the round off value of \frac{8}{3}. Solution: Converting to mixed fraction, we get 2$$\frac{2}{3}$ Here, $\frac{2}{3}$ < $\frac{1}{2}$ 4 = 4 So, the result will be 2. Example 3: Find the round off value of $4$$\frac{2}{5}. Solution: \frac{2}{5} < \frac{1}{2} 4 < 5 Therefore, the result will be 4. Example 4: Find the round off value of 25$$\frac{1}{7}$. Solution: $\frac{1}{7}$ < $\frac{1}{2}$ 2 < 7 Therefore, the result will be 26. Example 5: Find the round off value of $53$$\frac{2}{3} Solution: \frac{2}{3} > \frac{1}{2} 4 > 3 Therefore, the result will be 54. Example 6: Find the round off value of 33$$\frac{2}{5}$. Solution: $\frac{2}{5}$ < $\frac{1}{2}$ 4 < 5 Therefore, the result will be 33.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.4235338568687439, "perplexity": 1194.9289752589132}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-15/segments/1397609538110.1/warc/CC-MAIN-20140416005218-00416-ip-10-147-4-33.ec2.internal.warc.gz"}
http://quant.stackexchange.com/questions/9376/quantitative-finance-mentality-for-success
# Quantitative finance mentality for success [closed] I have a strong statistical background (particularly in Time Series analysis) and previously have spent a lot of time modelling sports, Baseball in particular. After reading "Analysis of Financial Time Series" by Ruey S. Tsay, I have become interested in quantitative finance. I'd like to know the mentality that quants have when it comes to the stock market. What I mean by this is what is the general idea to be successful. I'm not asking exactly HOW to do it but merely what to focus on. For example to be successful with Baseball modelling I focused on predicting the distribution of competing teams' runs. I saw a comment here by Matt Wolf If I have to summarize my take on financial markets then I would say success has everything to do with managing risk in smart ways as well as seizing opportunities in times of market inefficiencies and it has very little to nothing to do with forecasting the future. I understand that risk must be managed in smart ways and clearly to make any sort of expected profit, the only way to do this is to capitalise on market inefficiencies. However I do not get the part to do with forecasting the future. Surely this is necessary! Does it mean that success is not about POINT forecasts of the future but instead distributional forecasts e.g. option pricing seems all about distributional forecasting. What I took away from it was that you shouldn't try to predict stock movements by some ARMA, ARIMA or a more general dependency equation or have I completely missed the point? - ## closed as off-topic by Shane, Bob Jansen♦, BlackMamba, chrisaycockNov 6 '13 at 12:46 This question appears to be off-topic. The users who voted to close gave these specific reasons: • "Questions seeking career advice are off-topic because this site is intended solely for questions about quantitative finance as defined by our scope." – Bob Jansen, chrisaycock • "Basic financial questions are off-topic as they are assumed to be common knowledge for those studying or working in the field of quantitative finance." – Shane, BlackMamba If this question can be reworded to fit the rules in the help center, please edit the question. This is a very subjective question. One thing you need to understand is that there are many types of quants and it is not always about predicting the future returns. Many quantitative analysts are involved in market-making, this is where you sell products at a slight cost to customers and try to stay more or less neutral to market moves. When you read about most of models that are discussed based on the non-arbitrage principle and replication, these models are used to find the fair price to quote to the customer. To simplify, it does not matter if the price will realise statistically if you can replicate the price via a hedge as the cost of the product will be the cost of the hedge. Most of the option models used for pricing are about finding a sensible interpolation/extrapolation for maturities/strikes that are not liquid in the market and producing hedges to manage the risk in order to lock the customer's margin. Some other people are involved in proprietary strategies, this is what most of people not working in finance believe that traders are doing: making money out of the market. Even in this topic, people are not always using forecasts, you could be using arbitrages where by using a combination of trades you could be locking money. However it is true that some quants or traders will try to forecast a relationship, it could be anything like a pairs to trade which is statistically mean reverting, a relationship that leads to abnormal positive returns, noticing that the series has momentum, using regime switching to find periods where the momentum exists, ... etc. So even though you could specialise in forecasts, there are other ways to make money out of the market. -
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.22660936415195465, "perplexity": 963.5446330366098}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2015-35/segments/1440645340161.79/warc/CC-MAIN-20150827031540-00186-ip-10-171-96-226.ec2.internal.warc.gz"}
https://hidden-facts.info/relationship-between-and/what-is-the-relationship-between-wind-velocity-and-wave-height.php
# What is the relationship between wind velocity and wave height In fluid dynamics, wind waves, or wind-generated waves, are surface waves that occur on the Given the variability of wave height, the largest individual waves are likely to be somewhat less than twice the reported significant wave height for a . The relationship between the wavelength, period, and velocity of any wave is. Predicting the height of the waves depending on the wind strength. It's time to tell you about forecasting wave height depending on the wind speed. the relationship between the dimensionless parameters of the waves obey universal laws. characterize wind, wave and currents, are taken from a m height meteorological mast, Relation between wave height, wave period and wind speed. They are often found where there is a sudden rise in the sea floor, such as a reef or sandbar. Deceleration of the wave base is sufficient to cause upward acceleration and a significant forward velocity excess of the upper part of the crest. The peak rises and overtakes the forward face, forming a "barrel" or "tube" as it collapses. They tend to form on steep shorelines. These waves can knock swimmers over and drag them back into deeper water. When the shoreline is near vertical, waves do not break, but are reflected. Most of the energy is retained in the wave as it returns to seaward. ### Wind wave - Wikipedia Interference patterns are caused by superposition of the incident and reflected waves, and the superposition may cause localised instability when peaks cross, and these peaks may break due to instability. Airy wave theory Stokes drift in shallow water waves Animation Wind waves are mechanical waves that propagate along the interface between water and air ; the restoring force is provided by gravity, and so they are often referred to as surface gravity waves. As the wind blows, pressure and friction perturb the equilibrium of the water surface and transfer energy from the air to the water, forming waves. The initial formation of waves by the wind is described in the theory of Phillips fromand the subsequent growth of the small waves has been modeled by Milesalso in The wave conditions are: As a result, the surface of the water forms not an exact sine wavebut more a trochoid with the sharper curves upwards—as modeled in trochoidal wave theory. Wind waves are thus a combination of transversal and longitudinal waves. When waves propagate in shallow waterwhere the depth is less than half the wavelength the particle trajectories are compressed into ellipses. The empirical relation for the fully formed waves height, which can serve as the upper limit of assessment of wave height for any wind speed has been derived. Everything got more complicated. At the place of the wave prediction models of the first generation came second-generation model using the energy spectrum. ### Online calculator: The waves and the wind. Wave height statistical forecasting In the early s, there were wave models of the third-generation 3G. Actually, we hadn't reached the fourth-generation models yet, but the most commonly used model is the third generation WAM model Hasselmann, S. Of course, there are still shortcomings, for example, these models can not predict the waves in a rapidly changing wind situations, but still 3G models provide a good result. In the pre-computer era, you could use a model built on the nomogram for wave heights forecasting in relatively simple situations, such as pre-assessment or for small projects which have been given, for example, in Shore Protection Manual. There are 3 situations possible when the simplified prediction will give quite an exact estimation. The wind is blowing in a constant direction over some distance and not limited by time enough time - then the growth of the wave is determined and limited by the length of acceleration fetch-limited. The wind rapidly increases within a short period of time and not limited by distance enough distance - then the growth of the wave is determined and limited by elapsed time duration-limited. This occurs very rarely in nature. The wind is blowing in a constant direction at a sufficient distance and for a sufficient time so the wave will be fully formed fully developed wave under these conditions. Note that even in the open ocean waves rarely reach the limit values at wind speeds greater than 50 knots. Empirically, we obtained the following dependence for the case when wave growth is limited by the length of the acceleration. The time waves require under the wind influence at the velocity on the distance to achieve the maximum possible for a given distance heights. The relationship between the significant wave height and the distance The relationship between the period of the wave and the distance The drag coefficient For a fully developed waves Also the transition from the duration of the wind to the length of the acceleration i. Thus, if the duration of action and length of the acceleration of the wind is known, it is necessary to select the most restrictive value. If the wave generation height is limited by the time it is necessary to replace it by an equivalent distance and calculate the wave height based on it. In case of shallow water equations remain valid except for the additional limitations under which the wave period can not exceed the following ratiosThen the order of the wave height prediction for the shallow water is as follows: Assess the wave period for a given distance and wind speed using conventional formula. In the case of shallow water verify the conditions of the period and depth. ## The waves and the wind. Wave height statistical forecasting If they are exceeded take the boundary value. In the case of the wave boundary value, find the distance corresponding to the generation of waves with such period. Calculate the height in accordance with the value of the distance. If the wave height exceeds 0. Some more important notes These empirical formulas derived for relatively normal weather conditions, and are not applicable for the assessment of the wave height in the event of, for example, a hurricane. Nomograms contained in the directory is built for the wind speed no higher than These empirical formulas are used for statistical forecasting of wave heights, so the height of these formulas is nothing more than a significant wave height determined by the dispersion of the wave spectrum as follows: This is a more modern definition of significant height of the waves, and the very first definition, which was given to Walter Munk during World War II, was:
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.975754976272583, "perplexity": 558.5117314997462}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-30/segments/1563195527204.71/warc/CC-MAIN-20190721205413-20190721231413-00459.warc.gz"}
http://thenocturnalastrostudent.blogspot.com/2012/12/almost-time.html
## Friday, 28 December 2012 ### Almost Time I will soon be embarking on my 3(hopefully) year journey that is my PhD.I can't say I'm not nervous but I must say I'm more excited. I've had quite a long break(almost 7 months) from doing anything really, (bar demonstrating in labs and a bit of reading) so I will have to see how easy it is to get back into the groove of work. I start Jan 7th quite a strange time but my supervisor has been over in america so I had to wait for him to come back. At least I'm not alone in this I have a fellow PhD student starting with me. ### AGN I suppose tell you about the subject of my PhD. I'm going to be observing AGN (Active Galactic Nuclei) using x-rays - what about them has not yet been confirmed still talking to my supervisor about that one. AGN are thought to be made up of 5-6 regions the first of which is A super massive (billions of times the mass of the sun) black holes which are accreting matter. This liberates the matter of its gravitational potential, as the viral theorem states that it must radiate half of it's energy during it's in fall. Before the matter can accrete on to the central black hole it must lose it's angular momentum. This is done in the second region an accretion disk which forms from in falling matter which is flattened into a disk by it's rotation around the central black hole. The accretion disk emits as a black body with the innermost regions being the hottest and emitting in the extreme ultra-violet. Annoyingly the energy of a photon from the extreme ultra-violet part of the EM spectrum has enough energy to ionise hydrogen, and due to the hydrogen rich nature of our galaxy is completely absorbed. The third and fifth regions are observed in the spectra of AGN they are the two line emitting regions. The BLR (Broad Line Region) so called as the lines in the spectra are broadened (who said physics is complicated ) the reason for the broadening is thought to be due to it's closer proximity to the black hole. The NLR (Narrow Line Region) is therefore further out than the BLR but it is also less dense as it contains forbidden lines which are emission lines that are normally collisionally suppressed at higher densities (atoms are not allowed to emit there lines normal as they take too long allowing other atoms bump into them) from this knowledge we can limits to the density of the NLR. Both regions are thought to be clumpy with the matter arranged in clouds. The awake ones of you will have noticed that I missed out the forth region it is known as the dusty torus(ring doughnut shape) which absorbs radiation from the accretion disk and BLR, which heats it causing it to emit in the IR (infra-red). It has the effect of screening the accretion disk and BLR from the observer if it falls in the line of site. This is the basis behind the unified model of AGN, that the difference in AGN types are due to orientation which the AGN is observed from as illustrated in this image. There is also another distinction between different types of AGN radio loud and radio quite. Radio loud AGN emit much more more in the radio band than their quite counterparts hence the name. This thought to be due to the presence(/or lack of) a relativistic jet. Hope you have enjoyed my very brief overview the structure of AGN. -Micky P.S. This blog is going to become a place for me to keep track of my day to day work hope you guy's enjoy it, although it's really more for my benefit.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 2, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7323399782180786, "perplexity": 797.4582121452547}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-13/segments/1521257646189.21/warc/CC-MAIN-20180319003616-20180319023616-00315.warc.gz"}
https://jira.lsstcorp.org/browse/DM-6349?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
# Replace cameraGeom PAF files XMLWordPrintable #### Details • Type: Story • Status: Done • Resolution: Done • Fix Version/s: None • Component/s: • Labels: • Story Points: 10 • Epic Link: • Sprint: Alert Production F16 - 7, Alert Production F16 - 8, Alert Production F16 - 9, Alert Production F16 - 10 • Team: Alert Production #### Description PAF files have long been deprecated, but continue to be used for describing the camera geometry. We need to replace the PAF cameraGeom files used for CFHT-MegaCam, DECam, LSSTSim and SDSS, and the scripts used to convert these files to FITS files for reading by the Mappers. They might be replaced by a configuration like YAML, or pure python. #### Activity No builds found. Paul Price created issue - Paul Price made changes - Field Original Value New Value Link This issue is blocked by RFC-192 [ RFC-192 ] Simon Krughoff made changes - Team Alert Production [ 10300 ] Labels SciencePipelines John Parejko made changes - Assignee Simon Krughoff [ krughoff ] John Parejko [ parejkoj ] Hide John Parejko added a comment - Taking this on while I'm blocked by other things. Show John Parejko added a comment - Taking this on while I'm blocked by other things. John Parejko made changes - Epic Link DM-5691 [ 23683 ] John Parejko made changes - Sprint Alert Production F16 - 7 [ 236 ] Story Points 10 Hide John Parejko added a comment - Notes from Simon Krughoff and my chat about this: Cameras that we need to deal with built exclusively in code • monocam • test Don't need to do anything: there is only one source, by definition. (But! check that the serialized files are not committed to the repos.) reads text files and generates FITS (and other) files lsstSim Delete persisted files, and ensure that makeLsstCameraRepository.py is run by scons and that it puts things where the butler expects them. parses PAF suprime hsc sdss Translate PAF to something else, then save that. Write a PAF to YAML mapper, serialize that, delete PAF, delete any FITS (etc.) persisted config, ensure scons runs FITS generator. Need to check these cfht decam If these use PAF, do the above. Otherwise, do something else. How to check that we got it right? • Butlerize old and new and do for obj in dir(old): assert getattr(old, obj) == getattr(new, obj) but that may not work since == isn't defined on all our SWIGed things. • Write the persistence formats and checksum/diff them (old vs. new). Show John Parejko added a comment - Notes from Simon Krughoff and my chat about this: Cameras that we need to deal with built exclusively in code monocam test Don't need to do anything: there is only one source, by definition. (But! check that the serialized files are not committed to the repos.) reads text files and generates FITS (and other) files lsstSim Delete persisted files, and ensure that makeLsstCameraRepository.py is run by scons and that it puts things where the butler expects them. parses PAF suprime hsc sdss Translate PAF to something else, then save that. Write a PAF to YAML mapper, serialize that, delete PAF, delete any FITS (etc.) persisted config, ensure scons runs FITS generator. Need to check these cfht decam If these use PAF, do the above. Otherwise, do something else. How to check that we got it right? Butlerize old and new and do for obj in dir(old): assert getattr(old, obj) == getattr(new, obj) but that may not work since == isn't defined on all our SWIGed things. Write the persistence formats and checksum/diff them (old vs. new). Hide Paul Price added a comment - Suprime-Cam and HSC (both in obs_subaru) do not use PAF, but are using the persisted files (camera.py, 1 FITS file per CCD) as the primary source. Show Paul Price added a comment - Suprime-Cam and HSC (both in obs_subaru) do not use PAF, but are using the persisted files (camera.py, 1 FITS file per CCD) as the primary source. Simon Krughoff made changes - Sprint Alert Production F16 - 7 [ 236 ] Alert Production F16 - 7, Alert Production F16 - 8 [ 236, 245 ] Simon Krughoff made changes - Rank Ranked higher Hide John Parejko added a comment - Do we need to do the same for the defects files, or can those just remain as FITS files? Show John Parejko added a comment - Do we need to do the same for the defects files, or can those just remain as FITS files? Hide Paul Price added a comment - I think the guiding principle should be that there is only one authoritative source, and anything else is built from that source. In obs_subaru, the defects FITS files and registry are built by scons from text-based descriptions. Show Paul Price added a comment - I think the guiding principle should be that there is only one authoritative source, and anything else is built from that source. In obs_subaru, the defects FITS files and registry are built by scons from text-based descriptions. Hide John Parejko added a comment - - edited Please review the obs_lsstSim commit. Note that you'll need sconsUtils DM-7179 in order to build with scons. The included tests pass, but its probably worth running with some other tests (suggestions are welcome), as there are some changes to the currently-committed camera descriptions, but I don't know if they matter or not. Show John Parejko added a comment - - edited Please review the obs_lsstSim commit. Note that you'll need sconsUtils DM-7179 in order to build with scons. The included tests pass, but its probably worth running with some other tests (suggestions are welcome), as there are some changes to the currently-committed camera descriptions, but I don't know if they matter or not. John Parejko made changes - Reviewers Simon Krughoff [ krughoff ] Status To Do [ 10001 ] In Review [ 10004 ] John Parejko made changes - Link This issue relates to DM-7181 [ DM-7181 ] Tim Jenness made changes - Link This issue is blocked by DM-7179 [ DM-7179 ] Hide John Parejko added a comment - Confirming from my notes above: obs_monocam and obs_test do not contain any generated FITS files, so are already in compliance. Show John Parejko added a comment - Confirming from my notes above: obs_monocam and obs_test do not contain any generated FITS files, so are already in compliance. Hide John Parejko added a comment - More details on the other surveys: • obs_sdss needs to generate the description files from the yanny files in etc/ at scons time. • obs_cfht could just use the persisted FITS files, but it's worth checking how feasible it is to scons-generate them from the amp info tables (e.g. what changes need to be made to the tables and/or generating code). • obs_decam should be able to generate the FITS files via makeDecamCameraRepository.py and the included DetectorLayoutFile (chipcenters.txt) and SegmentsFile (segmentfile.txt). • obs_subaru: the Camera.paf and Electronics.paf files do not exist in the repo. Paul Price: are you ok with the persisted FITS files being the sole source in this case, or can we get those two files? I can't see them in the repo history either. Show John Parejko added a comment - More details on the other surveys: obs_sdss needs to generate the description files from the yanny files in etc/ at scons time. obs_cfht could just use the persisted FITS files, but it's worth checking how feasible it is to scons-generate them from the amp info tables (e.g. what changes need to be made to the tables and/or generating code). obs_decam should be able to generate the FITS files via makeDecamCameraRepository.py and the included DetectorLayoutFile (chipcenters.txt) and SegmentsFile (segmentfile.txt). obs_subaru: the Camera.paf and Electronics.paf files do not exist in the repo. Paul Price : are you ok with the persisted FITS files being the sole source in this case, or can we get those two files? I can't see them in the repo history either. Hide Paul Price added a comment - I think we're happy with how obs_subaru is for now. The camera is quite stable at the moment, and I don't think we want to write a parser. If there is a generic parser available in the future, then maybe we could generate the FITS files at build time, but it's not necessary right now. Show Paul Price added a comment - I think we're happy with how obs_subaru is for now. The camera is quite stable at the moment, and I don't think we want to write a parser. If there is a generic parser available in the future, then maybe we could generate the FITS files at build time, but it's not necessary right now. Hide John Parejko added a comment - New question: should I unify where the camera description files live? There's several different directory structures. • decam lives in decam/cameraGeom • cfht lives in cfht/megacam • hsc, suprimecam live in hsc/camera and suprimecam/camera respectively • sdss and lsstSim lives in description/camera Show John Parejko added a comment - New question: should I unify where the camera description files live? There's several different directory structures. decam lives in decam/cameraGeom cfht lives in cfht/megacam hsc, suprimecam live in hsc/camera and suprimecam/camera respectively sdss and lsstSim lives in description/camera Hide Paul Price added a comment - To allow multiple cameras within the same obs package, if you're going to standardise (which doesn't appear to be necessary, but may be desirable), then I think it needs to live in a directory named after the camera. Show Paul Price added a comment - To allow multiple cameras within the same obs package, if you're going to standardise (which doesn't appear to be necessary, but may be desirable), then I think it needs to live in a directory named after the camera. Hide John Parejko added a comment - ... then I think it needs to live in a directory named after the camera. Yes, that's what I was thinking: use obs_BLAH/CAMERANAME/camera for everything (so obs_subaru doesn't have to change). Just would make it easier to find things. Show John Parejko added a comment - ... then I think it needs to live in a directory named after the camera. Yes, that's what I was thinking: use obs_BLAH/CAMERANAME/camera for everything (so obs_subaru doesn't have to change). Just would make it easier to find things. Simon Krughoff made changes - Epic Link DM-5691 [ 23683 ] DM-7362 [ 26448 ] Hide Simon Krughoff added a comment - John Parejko I looked the obs_lsstSim changes over. They generally seem fine. I am still wondering whether we need to tell people to just grab the calibration frames via globus. I think it's easier to just have people simulate the blank images since there appear to be no defects whatsoever. If there were defects we'd need the bias and flat frames to find them. Anyway, I trust your judgement. Feel free to procede. Show Simon Krughoff added a comment - John Parejko I looked the obs_lsstSim changes over. They generally seem fine. I am still wondering whether we need to tell people to just grab the calibration frames via globus. I think it's easier to just have people simulate the blank images since there appear to be no defects whatsoever. If there were defects we'd need the bias and flat frames to find them. Anyway, I trust your judgement. Feel free to procede. Hide John Parejko added a comment - Simon Krughoff Can you please checkout obs_lsstSim and try out this ticket branch with some higher-level tests? I'd like help checking whether I haven't broken anything, and you're well-placed to do so. Show John Parejko added a comment - Simon Krughoff Can you please checkout obs_lsstSim and try out this ticket branch with some higher-level tests? I'd like help checking whether I haven't broken anything, and you're well-placed to do so. Simon Krughoff made changes - Sprint Alert Production F16 - 7, Alert Production F16 - 8 [ 236, 245 ] Alert Production F16 - 7, Alert Production F16 - 8, Alert Production F16 - 9 [ 236, 245, 247 ] Simon Krughoff made changes - Rank Ranked higher Hide Simon Krughoff added a comment - John Parejko I ran some twinkles data with the new obs_lsstSim and it worked fine. I'd say you are good to move ahead. Show Simon Krughoff added a comment - John Parejko I ran some twinkles data with the new obs_lsstSim and it worked fine. I'd say you are good to move ahead. Simon Krughoff made changes - Sprint Alert Production F16 - 7, Alert Production F16 - 8, Alert Production F16 - 9 [ 236, 245, 247 ] Alert Production F16 - 7, Alert Production F16 - 8, Alert Production F16 - 9, Alert Production F16 - 10 [ 236, 245, 247, 284 ] Simon Krughoff made changes - Rank Ranked higher Simon Krughoff made changes - Sprint Alert Production F16 - 7, Alert Production F16 - 8, Alert Production F16 - 9, Alert Production F16 - 10 [ 236, 245, 247, 284 ] Alert Production F16 - 7, Alert Production F16 - 8, Alert Production F16 - 9, Alert Production F16 - 10, Alert Production F16 - 11 [ 236, 245, 247, 284, 289 ] John Parejko made changes - Sprint Alert Production F16 - 7, Alert Production F16 - 8, Alert Production F16 - 9, Alert Production F16 - 10, Alert Production F16 - 11 [ 236, 245, 247, 284, 289 ] Alert Production F16 - 7, Alert Production F16 - 8, Alert Production F16 - 9, Alert Production F16 - 10 [ 236, 245, 247, 284 ] John Parejko made changes - Rank Ranked lower Hide John Parejko added a comment - Back to "in progress" the partial review passed, and the design can go forward. Show John Parejko added a comment - Back to "in progress" the partial review passed, and the design can go forward. John Parejko made changes - Status In Review [ 10004 ] In Progress [ 3 ] Simon Krughoff made changes - Epic Link DM-7362 [ 26448 ] DM-8472 [ 28104 ] Tim Jenness made changes - Link This issue is triggered by RFC-192 [ RFC-192 ] Tim Jenness made changes - Link This issue is blocked by RFC-192 [ RFC-192 ] Simon Krughoff made changes - Epic Link DM-8472 [ 28104 ] DM-9680 [ 30785 ] Hide Robert Lupton added a comment - I have a yaml format that has successfully represented the ctio 0.9m and comCam, and I think will handle the full lsstCam. There's a ticket DM-11196 to move this to obs_base (although there is not yet any agreement that we will use it!). Show Robert Lupton added a comment - I have a yaml format that has successfully represented the ctio 0.9m and comCam, and I think will handle the full lsstCam. There's a ticket DM-11196 to move this to obs_base (although there is not yet any agreement that we will use it!). John Swinbank made changes - Epic Link DM-9680 [ 30785 ] DM-10068 [ 31628 ] John Swinbank made changes - Epic Link DM-10068 [ 31628 ] DM-11798 [ 34281 ] John Swinbank made changes - Epic Link DM-11798 [ 34281 ] DM-12728 [ 36327 ] John Swinbank made changes - Epic Link DM-12728 [ 36327 ] DM-14447 [ 80385 ] John Swinbank made changes - Epic Link DM-14447 [ 80385 ] DM-16722 [ 235355 ] Frossie Economou made changes - Status Admin Review [ 3 ] In Progress [ 11605 ] Frossie Economou made changes - Status Review [ 11605 ] In Progress [ 3 ] Hide John Swinbank added a comment - Given the progress made in the last couple of years on YAMLCamera, obs_lsst, DM-11196, I think it's unlikely that we're going to do further work on this ticket. I'm marking this as “done” to reflect the (substantial!) work done on obs_lsstSim. Please reopen if you disagree. Show John Swinbank added a comment - Given the progress made in the last couple of years on YAMLCamera, obs_lsst, DM-11196 , I think it's unlikely that we're going to do further work on this ticket. I'm marking this as “done” to reflect the (substantial!) work done on obs_lsstSim. Please reopen if you disagree. John Swinbank made changes - Resolution Done [ 10000 ] Status In Progress [ 3 ] Done [ 10002 ] #### People Assignee: John Parejko Reporter: Paul Price Reviewers: Simon Krughoff Watchers: James Chiang, John Parejko, John Swinbank, Paul Price, Robert Lupton, Simon Krughoff, Tim Jenness Votes: 0 Vote for this issue Watchers: 7 Start watching this issue #### Dates Created: Updated: Resolved: #### Jenkins Builds No builds found.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.47121667861938477, "perplexity": 7715.645933495242}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": false}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-49/segments/1669446710777.20/warc/CC-MAIN-20221130225142-20221201015142-00378.warc.gz"}
http://openmx.ssri.psu.edu/thread/1217?q=thread/1217
Wrong algebra? 6 posts / 0 new Offline Joined: 01/06/2010 - 06:17 Wrong algebra? I'm sorry if this is a too specific question, I tried to use the Python parser but I am totally unfamiliar with Python and can't get it working. I keep finding differences between my Mx and OpenMx analysis and I think I must have made a mistake in translating the following algebra: in Mx: G = (Y-X)@(A.((A.A)(V-J))) + X@(A.((U-I)(A.A))); in OpenMx (wrong?): algebraG = mxAlgebra(expression = (Y-X) %x% (A((AA)%%(V-J))) + X %x% (A * ((U-I)%%(A*A))), name = "G") Does anyone of you check whether this is correct or not? I hope so, thanks in advance, Suzanne Offline Joined: 07/31/2009 - 15:24 The parser translated your The parser translated your algebra into (Y - X) %x% ((A * (((A * A)) %*% (V - J)))) + X %x% ((A * ((U - I) %*% ((A * A))))) Assuming there is no bug in the Mx/OpenMx algebra parser, it looks like a match to what you've written. Offline Joined: 01/06/2010 - 06:17 Thanks for checking! Thanks for checking! Offline Joined: 01/06/2010 - 06:17 different solutions with the same fit Thanks again for checking the algebra! Apparently something else is wrong when fitting this model. Only, I do not get what it is. I am translating an Mx script for exploratory factor analysis (with oblique rotation) to openMx. So I have the right solution from Mx. In openMx, I obtain the same fit as with Mx, but different parameter estimates depending on the startvalues. When I use -.1 as startvalues for factor correlations (matrix F) I get the same solution as with Mx. However, if I use 0, I get another solution (but still the same fit). I attached my openMX script and the old Mx script, in case you want to take a look. Thanks in advance! Suzanne Offline Joined: 07/31/2009 - 15:12 First, leaving the First, leaving the 'rm(list=ls(all=TRUE))' line in your script is just sneaky. Not quite as sneaky as when someone passed around an OpenMx script that set the variable 'F' to be equal to 'TRUE', but still pretty sneaky. The two different sets of results go a bit deeper than the loadings, as the factor correlations vary across models and the D matrix includes a negative value in the second optimization (though the latter doesn't matter in your code). I think this is just a case of the rotation not working the way its supposed to. My (admittedly weak) understanding of the algebra behing exploratory factor analysis and rotation is that it is traditionally a two-step procedure, where rotation is a method for picking one of any number of models that fit equally well. In a single-step procedure, OpenMx could feasibly get a correct minimum for the model but a local minimum for the rotation. I don't know enough about how rotation is handled in other programs to give you a clearer answer. Offline Joined: 01/06/2010 - 06:17 I am very sorry about the I am very sorry about the 'rm(list=ls(all=TRUE))'. I didn't realize it, but now I see that sending a script with this line to others is a bit like throwing a hand grenade to your work. I promiss I will not do it again! The script indeed uses the single-step procedure described by Oort (2010), who used Mx. The weird thing is that in Mx it is all fine, but it seems that in openMx the rotation constraint is not working as it should. This is the paper from Oort: Oort, F.J. (2010). Likelihood based confidence intervals in exploratory factor analysis. Structural Equation Modeling, 18, 383-396.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6825542449951172, "perplexity": 1871.5676170714003}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-30/segments/1500549423812.87/warc/CC-MAIN-20170721222447-20170722002447-00313.warc.gz"}
http://motls.blogspot.com/2015/10/picard-number-for-pedestrian-physicists.html?m=1
## Thursday, October 22, 2015 ### Picard number for pedestrian physicists Guest blog by Monster from a U.K. research university founded in 1907 Let me try to explain in a maybe more physicists-friendly way essentially the same things that dalpezzo already mentioned beneath the "blog post about 1729". It is an exercise for experts to notice the hidden assumptions and oversimplified explanations. Let $$X$$ be a compact manifold. Let us consider $$U(1)$$ gauge theory on $$X$$, i.e usual Maxwell's electromagnetism. Locally on $$X$$, the gauge field is a 1-form $$A$$ and the field strength $$F=dA$$ is a 2-form. If $$X$$ has non-trivial 2-cycles, one can have non-trivial fluxes of $$F$$ through these 2-cycles. The number of topologically inequivalent 2-cycles in $$X$$ is $$B_2$$, the second Betti number of $$X$$ and, as the fluxes are quantized, a flux configuration is given by a collection $$(n_1,\dots, n_{B_2})$$ of $$B_2$$ integers. It is easy to show that for any flux configuration, there exists a gauge field with the prescribed fluxes. Assume that $$X$$ has the extra structure of a complex manifold. It means that locally on $$X$$, we have a notion of holomorphic coordinates $$z_i$$ and antiholomorphic coordinates $$\bar{z}_i$$. It is then possible to decompose the field strength as$F=F^{2,0}+F^{1,1}+F^{0,2}$ where the $$(2,0)$$ part $$F^{2,0}$$ only contains terms proportional to $$dz_i \wedge dz_j$$, the $$(1,1)$$ part $$F^{1,1}$$ only contains term proportional to $$dz_i \wedge d\bar{z}_j$$, and the $$(0,2)$$ part $$F^{0,2}$$ only contains terms proportional to $$d\bar{z}_i \wedge d\bar{z}_j$$. In terms of indices, the $$(p,q)$$ part has $$p$$ holomorphic indices and $$q$$ antiholomorphic indices. When the complex structures changes continuously, the holomorphic/antiholomorphic coordinates change continuously and so the above decomposition of the field strength changes continuously too. Now, given a complex structure on $$X$$ and a flux configuration, one can ask the following question; is there a gauge field with the prescribed fluxes such that the associated field strength satisfies$F^{2,0}=F^{0,2}=0 \quad ?$ This condition is a first order linear partial differential equation on the gauge field. It is always possible to solve it locally but an obstruction to glue these local solutions and to obtain a global solution can exist. As the equation is linear, the set of flux configurations such that there exists a solution is a sublattice of the lattice of flux configurations. The Picard number of $$X$$ is the rank of this sublattice, i.e. the number of independent flux configurations generating the space of flux configurations such that there exists a solution to the equation $$F^{2,0}=F^{0,2}=0$$. The Picard number is an integer between $$0$$ and $$B_2$$ and in general depends on the complex structure of $$X$$. The field strength $$F$$ of a gauge field configuration satisfies flux quantization but it is not in general the case of $$F^{2,0}$$, $$F^{1,1}$$ or $$F^{0,2}$$. In general their fluxes are complex numbers. So it is useful to introduce the space of complexified flux configurations made of $$B_2$$-uples $$(a_1,...,a_{B_2})$$ of complex numbers. The space of integral fluxes is a discrete subset of this complex vector space. One can show that the decomposition in $$(2,0)$$, $$(1,1)$$, and $$(0,2)$$ parts extend to the space of complexified flux configurations. The complex dimension of the space of complexified flux configurations of type $$(p,q)$$ is called the Hodge number $$h^{p,q}$$ of $$X$$. One has$B_2=h^{2,0}+h^{1,1}+h^{0,2}$ and $h^{2,0}=h^{0,2}.$ One can show that the Hodge numbers do not change when the complex structure of $$X$$ moves continuously but the corresponding subspaces of the space of complexified flux configurations in general moves continuously. The (integral) flux configurations such that there exists a gauge field with these prescribed fluxes such that $$F^{2,0}=F^{0,2}=0$$ are exactly the (integral) flux configurations leaving inside the $$(1,1)$$ subspace of the space of complexified flux configurations. In particular, the Picard number is always between $$0$$ and $$h^{1,1}$$. So the picture to have is mind is the following: a big complex vector space, a discrete lattice of integral points and a specific subspace inside it. The Picard number measures the amount of integral points in the specific subspace. When the complex subspace moves, due to a change in the complex structure of $$X$$, the Picard number in general changes. More precisely, when the complex subspace only moves a bit, an integral point which was not inside cannot become suddenly inside but an integral point inside can suddenly moves out. It means that the Picard number can get enhanced at special points of the moduli space of complex structures on $$X$$. The whole subtlety of the Picard number comes from this interplay between the discrete set of (integral) flux configurations and the continuous subspace of complexified flux configurations of type $$(1,1)$$. If $$h^{2,0}=0$$, the above subtlety is not here: any complexified configuration is of type $$(1,1)$$, the equation $$F^{2,0}=F^{0,2}=0$$ has always a solution and the Picard number is simply the second Betti number $$B_2$$ and in particular does not depend on the complex structure. It is what happens for projective spaces, del Pezzo surfaces or Calabi-Yau manifolds (in the strict sense: holonomy equal to $$SU(n)$$ and not just contained in $$SU(n)$$). In all these examples, the Picard number is not something interesting: it is something we already knew, i.e. $$B_2$$. To have the full subtle story of variations of Picard numbers, one needs to have $$h^{2,0}$$ non zero. It is for example the case for complex tori or $$K3$$ surfaces. For $$K3$$ surfaces, we have$B_2=22, \quad h^{2,0}=h^{0,2}=1, \quad h^{1,1}=20.$ The moduli space of complex structures on a $$K3$$ surface is of complex dimension 20. A generic $$K3$$ surface has Picard number $$0$$. There is a special locus of complex codimension $$1$$ at which the Picard number is enhanced to $$1$$. There is a special locus of complex codimension $$2$$ at which the Picard number is enhanced to $$2$$ and so on until a special locus of complex codimension $$20$$, i.e. dimension $$0$$, at which the Picard number is enhanced to $$20$$, its maximal possible value. Each of these special locus of complex dimension $$k$$ is fairly complicated: it is a countable union of varieties of dimension $$k$$. For example, the space of $$K3$$ surfaces of Picard number $$20$$ is of dimension $$0$$ but it is made of a countably infinite number of points and is in fact dense in the full moduli space of $$K3$$ surfaces: it is as the rational numbers in the real numbers and so the full picture of the moduli space of $$K3$$ surfaces with the various loci of given Picard numbers is extremely intricate. Under nice hypothesis ($$X$$ algebraic), there is a more geometric interpretation of the Picard number. A flux configuration is the data for each 2-cycle of an integer. If $$X$$ is of (real) dimension $$n$$, one can interpret these integers as prescribed intersection numbers with the various 2-cycles for a $$(n-2)$$-cycle. If $$X$$ is a (algebraic) complex manifold, one can show that the existence of a gauge field with $$F^{2,0}=F^{0,2}=0$$ and prescribed flux configuration is equivalent to the existence of an holomorphic representative for the $$(n-2)$$-cycle determined by the flux configuration. In other words, the Picard number measures the amount of holomorphic hypersurfaces (complex codimension $$1$$, i.e. real codimension $$2$$) in $$X$$. For a $$K3$$ surface, of real dimension $$4$$, i.e. complex dimension $$2$$, an holomorphic hypersurface is the same thing that an holomorphic curve (complex dimension $$1$$, i.e. real dimension $$2$$). For example, a generic $$K3$$ surface has Picard number $$0$$ and so has no holomorphic curves in it. In contrary, a $$K3$$ surface with high Picard number has many holomorphic curves in it and so a rich complex geometry. Any non-trivial holomorphic geometry in a $$K3$$ surface in general requires a high enough Picard number. For instance, to compactify F-theory on a $$K3$$ surface, one needs an elliptic fibration with a section. The fiber of the elliptic fibration is a non-trivial holomorphic curve in the $$K3$$ surface and similarly for the image of the section, and so such $$K3$$ surface has at least Picard number $$2$$. This kind on restriction on the allowed $$K3$$ surfaces for a F-theory compactification is not very surprising: IIB superstring compactified on a $$K3$$ surface is dual to heterotic string on $$T^4$$ and F-theory compactified on a $$K3$$ surface is dual to heterotic string on $$T^2$$. As there are clearly less parameters in $$T^2$$ that in $$T^4$$, the range of allowed $$K3$$ on the F-theory side has to be somehow limited. Similarly, I think that when one writes a $$K3$$ surface in a relatively simple explicit form, something one wants to do for explicit computations and explicit checks of various dualities, one generally obtains a $$K3$$ surface with relatively high Picard number precisely because the ability to write a simple description of an object is a sign of its deeper and richer structure. But in general, it seems quite difficult to find a direct physical meaning to the Picard number or a jump in the Picard number. For example, moving in the moduli space of $$K3$$ surfaces, when the Picard number jumps, nothing happens to the topology, nothing becomes singular, it is a really a subtle modification of the complex geometry and so does not correspond to something as brutal as a topology change transition or gauge symmetry enhancement. It is has a physical meaning, this one has to be relatively subtle. I can think of two examples of such physical meaning and both are about $$K3$$ surfaces of maximal Picard number, i.e. Picard number $$20$$. The first one is due to Moore and is about the attractor mechanism. Let us have a look at type IIB superstring compactified on the Calabi-Yau 3-fold obtained as a product of a $$K3$$ surface by an elliptic curve. The vector multiplet moduli space is the complex moduli space of this Calabi-Yau and so in particular contains the moduli space of $$K3$$ surfaces. One can construct BPS black holes in four dimensions by wrapping D3-branes over 3-cycles of the Calabi-Yau. The possible charges for this BPS black holes form a infinite discrete set. The choice of a vacua of the theory is a choice of asymptotic value for the vectomultiplet moduli at infinity. But when we have a BPS black hole and when we move from infinity toward the black hole, the value of the vector multiplet get modified, as prescribed by the supergravity equation of motion. The attractor mechanism describes this evolution of the moduli and asserts that the value at the horizon of the black hole only depends on the charge of the black hole and not of the asymptotic value (as expected by general black hole entropy considerations: the entropy only depends on the charge and so the local geometry near the horizon should also only depends on the charge). So for each choice of charge, there should be a particular $$K3$$ surface describing the compactified geometry at the horizon. The result is that these $$K3$$ surfaces are exactly the $$K3$$ surfaces of maximal Picard number. Second, Gukov and Vafa have speculated about when a non-linear sigma model, defining a two dimensional conformal field theory, is in fact a rational conformal field theory. A rational CFT is a CFT with the local fields organized in finitely many irreducible representations of a chiral algebra. The rationality of a CFT is a quite subtle property, not preserved in general under deformations. Applied to the case of $$K3$$ surfaces, the proposal of Gukov and Vafa asserts that the supersymmetric sigma model of target a $$K3$$ surface is a rational SCFT if and only the $$K3$$ surface has maximal Picard number. I think that the validity of this proposal is an open question.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 1, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9370518922805786, "perplexity": 210.05170399440243}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-26/segments/1498128320841.35/warc/CC-MAIN-20170626170406-20170626190406-00669.warc.gz"}
http://lilypond-translations.3384276.n2.nabble.com/Error-in-make-doc-learning-texi-td7572742.html
# Error in make doc: learning.texi 3 messages Open this post in threaded view | ## Error in make doc: learning.texi I'm working in the translations in the learning directory of Documentation. I'm getting this error: Runaway argument? {ca_\finish }\else \globaldefs = 1 \input txi-ca.tex \fi \closein 1 \endgroup \ ETC. ./learning.texi:15: Paragraph ended before \documentlanguagetrywithoutunderscor e was complete.                    \par l.15       ? ./learning.texi:15: Emergency stop.                    \par l.15       ./learning.texi:15:  ==> Fatal error occurred, no output PDF file produced! Transcript written on learning.log. /usr/bin/texi2dvi: pdfetex exited with bad status, quitting. But I can't see anything wrong in line 15 of learning.tely. Is there anything to be changed somewhere else that is producing this error? In any case I haven't finished the translations in this directory, if it is just a matter of translating all the files of this directory please ignore. -- Walter Garcia-Fontes L'Hospitalet de Llobregat
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8126255869865417, "perplexity": 19805.698192517237}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-30/segments/1531676591140.45/warc/CC-MAIN-20180719144851-20180719164851-00008.warc.gz"}
https://web2.0calc.com/questions/composition-functions
+0 # Composition functions 0 247 4 +152 Express the function $$y=\sqrt{x^2+4}$$as a composition of $$y=f(g(x))$$of the two simpler functions $$y= f(u)$$and $$u=g(x)$$ $$f(u) =$$ $$g(x)=$$ I know how to put a function into another function but I don't know what this thing is asking :S vest4R  Mar 16, 2017 Sort: #1 +152 0 So I think I've got it.... I believe the y function is doing the square root so... f(u) = x+4 g(x)= x^2 ?? vest4R  Mar 16, 2017 #2 +26547 0 Try  $$g(x)=x^2+4$$  and  $$f(u)=\sqrt u$$ or  $$g(x)=x^2$$   and  $$f(u)=\sqrt{u+4}$$ or ... Alan  Mar 16, 2017 #3 +7225 0 Express the function $$\sqrt{x^2+4}$$ as a composition of y=f(g(x)) of the two simpler functions y= f(u) and u=g(x} f(u)= g(x)= I know how to put a function into another function but I don't know what this thing is asking :S .     $$y=\sqrt{x^2+4}$$ .     $$y=Root\ from \ ( x^2+4)$$ .               u                    g f(u)=$$\sqrt{g(x)}$$ $$g(x)=(x^2+4)$$ I hope I could help. ! . asinus  Mar 16, 2017 edited by asinus  Mar 16, 2017 edited by asinus  Mar 16, 2017 #4 0 Alan was correct with, g(x)=x^2+4 and f(u)=sqrt(u) Guest Mar 16, 2017 ### 16 Online Users We use cookies to personalise content and ads, to provide social media features and to analyse our traffic. We also share information about your use of our site with our social media, advertising and analytics partners.  See details
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8817945122718811, "perplexity": 10639.8886139293}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-13/segments/1521257647280.40/warc/CC-MAIN-20180320033158-20180320053158-00767.warc.gz"}
http://math.stackexchange.com/questions/244341/calculus-of-variation-didos-problem?answertab=votes
# Calculus of Variation (Dido's Problem?) Given the length L of a curve going two given point $(a,\alpha)$, $(b,\beta)$ find the equation of the curve so that the curve together with the interval $[a,b]$ encloses the largest area. Am I correct in thinking this is Dido's problem? Is it possible to use Green's theorem to find the equation? - Yes I think this is dido's problem or some variation (sorry for the pun). But think about what you're trying to extremize and with what constraint. You are trying to extremize $\int^b_a y(x) \text{d}x$ together with the constraint that $\int^b_a \sqrt{1+y'(x)^2}dx = L$ and you are also given that the endpoints are fixed i.e, $y(a) = \alpha$ and $y(b) = \beta$. What method does one usually employ if they want to extremize a functional subject to a constraint and given that the endpoints are fixed? You use Lagrange multipliers together with the Euler-Lagrange equation.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8907087445259094, "perplexity": 193.8433524464498}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-10/segments/1394010352519/warc/CC-MAIN-20140305090552-00043-ip-10-183-142-35.ec2.internal.warc.gz"}
http://mathhelpforum.com/math-topics/119689-linear-programming-problem.html
# Math Help - Linear programming - problem 1. ## Linear programming - problem Hi please give me a help to find out the answer for the following linear programing question There are two products x1 and x2 manufactured on two machines M1 and M2. Product x1 requires 3 hours on machine M1 and half and hour on machine M2. Product x2 requires two hours on machine M1 and one hour on machine M2. Total available capacity on machine M1 is six hours and that on machine M2 is four hours. Each unit of x1 has an incremental profit of Rs.12/= and each unit of x2 an incremental profit of Rs.4/=. (a) Formulate the Primal problem. (b) Write the dual problem to the above primal problem. (c) Solve the dual problem and by using it find the solution for the primal problem. The above part (a) is I done as follows; Mathematical formulation is To maximize : $Z=12a+4b$ --> Equation 01 Subject to constraints : $3a+2b \le 6$ --> Equation 02 $\frac {1}{2} a+b \le 4$ --> Equation 03 $a \ge 0, b \ge 0$ --> Equation 04 If the above it true plz give me a help to find the other part (b) and (c) or help me to find the answer 2. Your primal problem is correct! Here is part (b) The dual problem is: Min TC = 6u1 + 4u2 st. 3u1 + 0.5u2 >= 12 2u1 + u2 >= 4 u1, u2 >= 0 Try part (c) yourself
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 4, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7335735559463501, "perplexity": 2320.685023662332}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-10/segments/1394678702690/warc/CC-MAIN-20140313024502-00062-ip-10-183-142-35.ec2.internal.warc.gz"}
https://itectec.com/matlab/matlab-listbox-please-help-me-with-the-listbox/
guidelistboxMATLAB Hello, i have posted some questions before about listbox but none give an answer…. my question is simple i have this matrix: a=1:1:10 % this is 1 2 3 4 5 6 7 8 9 10 i would like to load these numbers into a listbox, but i dont know how, must i change these numbers to char??? how ca i do this???? thanks in advance a=1:1:10;b=num2cell(a); set(handles.my_pop,'String',b);
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7188857197761536, "perplexity": 1465.8031801272896}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-21/segments/1620243988796.88/warc/CC-MAIN-20210507150814-20210507180814-00186.warc.gz"}
http://orgmode.org/changes_older.html
[ en - fr - ja ] # Org mode for Emacs – Older (!) Release notes Org mode is for keeping notes, maintaining TODO lists, planning projects, and authoring documents with a fast and effective plain-text system. ## Version 6.32 ### Rewrite of org-mobile.org, for MobileOrg 1.0 (build 20) MobileOrg is currently under review at the iPhone App Store. You will need Org mode version 6.32 to interact with it. ### Added support for habit consistency tracking org-habit.el contains new code to track habits. Please configure the variable org-modules to activate it. When active, habits (a special TODO entry) will be displayed in the agenda together with a "consistency graph". Habit tracking is described in a new manual section. Thanks to John Wiegley for this contribution. ### New context-aware tag auto-exclusion After writing a function relating to location and context information, you will be able to press / RET in the agenda to exclude tasks that cannot be done in the current context. For details, see the information about filtering in the manual. Thanks to John Wiegley for a patch to this effect. ### New clock resolving tools When clocking into a new task while no clock is running, Org now checks for orphaned CLOCK lines and offers to repair these before starting the clock. You can also configure this feature to check for idle time and prompt you to subtract that time from the running timer. See the new manual section for more details. Thanks to John Wiegley for a patch to this effect. ### Mutually exclusive tag groups can now have a name in the tags interface The customize interface allows to optionally add a string to the beginning or end of such a group. Thanks to James TD Smith for a patch to this effect. ### Agenda Search view: Search for substrings The default in search view (C-c a s)is now that the search expression is searched for as a substring, i.e. the different words must occur in direct sequence, and it may be only part of a word. If you want to look for a number of separate keywords with Boolean logic, all words must be preceded by + or -. This was, more-or-less, requested by John Wiegley. ### Make space and backspace scroll the show window in the agenda Pressing SPC again after using it to show an agenda item in another window will make the entire subtree visible, and show scroll it. Backspace and DEL will scroll back. This was a request by Eric Fraga. ### File tags are now offered for completion during a tag prompts Requested by Matt Lundin. ### Make - SPC' an agenda filter that selects entries without any tags Request by John Wiegley. ### Better way to edit multi-line macro definitions The editing tool key C-c '= now also edits =#+MACRO definitions, including multiline macros. ### Restructured Manual The manual has been slightly reorganized. The archiving stuff, which was - somewhat obscurely - hidden in the Document Structure chapter, has been moved into the new chapter Capture-Refile-Archive. Also, there is a new chapter Markup which contains both the markup rules (moved there from the Export chapter) and the documentation for embedded LaTeX. ### Improved figure placement in LaTeX and HTML export Text can now be wrapped around figures. See the manual for details. ### Allow date to be shifted into the future if time given is earlier than now By setting (setq org-read-date-prefer-future 'time) you indicate to Org that, if you only give a time at the date/time prompt, and if this time is earlier then the current time, then the date of tomorrow will be assumed to be valid for this event. A similar mechanism was already in place for dates, but now you can make it work for times as well. ### Collected changes in org-babel • Source blocks can now reference source-blocks in other files using filepath:srcname syntax. • Inline code blocks like src_python{2+2} are now exported • Remote source block calls using the #+lob: srcname(arg=val) syntax can now be exported. • When :file is supplied with an R block, graphics are automatically sent to file and linked from the org buffer, thus appearing on export. The image format is obtained from the filename extension. Possible values are .png, .jpg, .jpeg, .tiff, .bmp, .pdf, .ps, .postscript, defaulting to png. • Results can be returned as parseable code using :results code, and as pretty-printed code using :results pp (emacs-lisp, python, ruby). Thanks to Benny Andresen for the idea and patch for emacs-lisp. • When :file filename is supplied, :exports file is unnecessary • Header args are taken from org-file-properties in addition to properties active in the subtree. • :noweb header argument now expands noweb references before source-block evaluation. • Tangling honours the new org variable org-src-preserve-indentation, so that correct code is output for a language like python that depends on indentation. ### Changes in org-exp-blocks.el • Interblocks export has been simplified. • Support for R code (begin_R blocks and inline \R{}) has been removed. Please use org-babel instead. ## Version 6.31 ### Org-babel is now part of the Org distribution Org-babel provides the ability to execute source code in many different languages within org-mode documents. The results of code execution – text, tables and graphics – can be integrated into Org mode documents and can be automatically updated during publishing. Since Org-babel allows execution of arbitrary code, the range of tasks that can be addressed from within an Org mode file becomes very large. Examples of ways in which Org-babel might be used include • Documenting a task that involves some programming so that it is automatically repeatable • Creating dynamic (executable) reports that respond to changes in the underlying data (Reproducible Research) • Exportation of code contained in an Org mode document into regular source code files (Literate Programming) Additionally, Org-babel provides a programming environment within Org files, in which data can be transmitted between parameterised source code blocks in different languages, as well as between source code blocks and Org mode tables. A simple API is defined so that users can add support for new "languages" (broadly construed). Languages currently supported are: • asymptote • css • ditaa • dot • emacs-lisp • gnuplot • ocaml • python • R • ruby • sass • sh • sql Org-babel was designed and implemented Eric Schulte with continued significant help on both accounts from Dan Davison. ### MobileOrg support Richard Morelands iPhone/iPod Touch program MobileOrg can view Org files, mark entries as DONE, flag entries for later attention, and capture new entries on the road. Org mode has now support to produce a staging area where MobileOrg can download its files, and to integrate changes done on the phone in a half automatic, half interactive way. See the new appendix B in the manual for more information. ### Indented lines starting with "#+ " are treated as comments To allow comments in plain lists without breaking the list structure, you can now have indented comment lines that start with "#+ ". ### New STARTUP keyword showeverything' This will make even drawer contents visible upon startup. Requested by Jeff Kowalczyk. ### New contributed package org-invoice.el This package collects clocking information for billing customers. Thanks to Peter Jones for this contribution. ### Encrypting subtrees org-crypt.el by John Wiegley and Peter Jones allows encryption of individual subtrees in Org mode outlines. Thanks to John and Peter for this contribution. ### Agenda: Support for including a link in the category string The category (as specified by an #+CATEGORY line or CATEGORY property can contain a bracket link. While this sort-of worked in the past, it now is officially supported and should cause no problems in agenda display or update. The link can be followed by clicking on it, or with C-c C-o 0. This was a request by Peter Westlake. ## Version 6.30 ### Inconsistent changes #### Agenda now uses f and b to move through time Up to now, the Org mode agenda used the cursor keys left and right to switch the agenda view forward an backward through time. However, many people found this confusing, and others wanted to be able to do cursor motion in the agenda, for example to select text. Therefore, after an extensive discussion on [email protected], it was decided to use the b and f keys instead, and to let the cursor keys do cursor motion again. #### Agenda follow mode is now on the F key This was necessary to free up the f key, see above. ### Details #### Maintenance • New command to submit a bug report There is now a special command M-x org-submit-bug-report. This command will create a mail buffer with lots of useful details. In particular, it contains complete version information for Emacs and Org mode. It will also (if you agree to it) contain all non-standard settings of org-mode and outline-mode related variables. Even if you do not sent your emails from within Emacs, please still use this command to generate the information and then copy it into your mail program. The command will not generate and include a *Backtrace* buffer, please do this yourself if you have hit an error. For more information, see the feedback section of the manual. • New contributed package org-track.el This package allows to keep up-to-date with current Org development, using only Emacs on-board means. So if you don't want or cannot use git, but still want to run the latest and hottest Org mode, this is for you. Thanks to Sebastian Rose for this contribution. #### Agenda • Agenda now uses f and b to move through time Up to now, the Org mode agenda used the cursor keys left and right to switch the agenda view forward an backward through time. However, many people found this confusing, and others wanted to be able to do cursor motion in the agenda, for example to select text. Therefore, after an extensive discussion on [email protected], it was decided to use the b and f keys instead, and to let the cursor keys do cursor motion again. • Agenda follow mode is now on the F key This was necessary to free up the f key, see above. • The agenda can be put into a dedicated frame When the variable org-agenda-window-setup has the value other-frame, then the new frame created to show the agenda will now have the window marked as dedicated. As a consequence, exiting the agenda while the agenda is the only window on the frame will kill that frame. This was a request by Henry Atting. • New mode to show some entry body text in the agenda There is now a new agenda sub-mode called org-agenda-entry-text-mode. It is toggled with the E key. When active, all entries in the agenda will be accompanied by a few lines from the outline entry. The amount of text can be customized with the variable org-agenda-entry-text-maxlines. This was a request by Anthony Fairchild, Manish, and others. • Improve following links from the agenda C-c C-o in the agenda will now offer all links in the headline and text of an entry. If there is only a single link, it will be followed immediately. • Avoid some duplicate entries There is a new variable that can be used to avoid some duplicate agenda entries: org-agenda-skip-scheduled-if-deadline-is-shown If that is set, it avoids that an entry shows up in the agenda for today for both a scheduling and a deadline entry. See the docstring of the variables for more details. This partially addresses a request by Samuel Wales. • Mark the running clock in the agenda. If the entry currently being clocked is present in the agenda, it will be highlighted with the face org-agenda-clocking. This was a request by Rainer Stengele. #### Export • Allow LaTeX export to use the listings package The LaTeX listings package can now be used for formatting fontified source code in many programming languages. For more information, see http://thread.gmane.org/gmane.emacs.orgmode/16269 and http://orgmode.org/worg/org-faq.php#fontified_source_code_w_latex Thanks to Eric Schulte for this patch. • Remove table rows that only contain width and alignment markers The width and alignment in table columns can be set with a cookie like "<10>" or "<r>" or "<r10>". In order to keep Org from exporting such lines, the first column of a line should contain only "/". However, for convenience, there is now a special case: If the entire row contains only such markers, the line will automatically be discarded during export, even is the first column is not "/". • Allow Macro calls to span several lines. Macro calls may now span several lines, to write several arguments in a cleaner way. The result of a macro call can also span several lines, by inserting the string "\n" (backslash followed by n) into the value in the macro definition. These were requests by Stefan Vollmar. #### Misc If C-c C-o is called while the cursor is in a headline, but not directly on a link, then all links in the entry will be offered in a small menu. If there is only a single link, it will be followed without a prompt. • Visibility Cycling: Allow to show all empty lines after a headline org-cycle-separator-lines can now be set to a negative value, to indicate that, if the number of empty lines before a visible entry is greater than the specified number, then all empty lines should be shown. This was a request by "PT" whatever this means. • Allow language names to replace some strange major mode names Sometimes a language uses a major mode which can't be guessed from it's name. There is now a new variable org-src-lang-modes which can be used to map language names to major modes when this is the case. This is used when editing a source-code block, or when exporting fontified source-code with htmlize. Thanks to Eric Schulte for a patch to this effect. • iswitchb support for many completion prompts This is enabled using org-completion-use-iswitchb, and follows the same model of usage as for ido users. Thanks to John Wiegley for a patch to this effect. • New commands to set the effort property of an entry There is now a special command, C-c C-x e to set the Effort property of an entry. From the agenda you can even use e. If you have set up allowed values for the Effort property, then using a prefix argument will directly select the nth allowed value. For example, in the agenda, 5 e will select the 5th allowed value. This was a request by Michael Gilbert • Edit src works now better with killing buffer Thanks to Dan Davison for a patch to this effect ## Version 6.29 ### Structure editing and cycling #### New minor mode org-indent-mode This mode implements outline indentation similar to clean view, but in a dynamic and virtual way, at display time. I have wanted this functionality for years and tried several implementations, all unworkable. Emacs 23 has finally made it possible. So this solution is for Emacs 23 only, and I am not sure yet how stable it really is. Time will tell. Currently I do not recommend to turn it on globally using the variable org-startup-indented. But you can turn it on for a particular buffer using #+STARTUP: indent Turning on this minor mode automatically turns on org-hide-leading-stars, and it turns off org-adapt-indentation. #### Skip CHILDREN state if there are no children When a subtree does not have any children, visibility cycling now skips the CHILDREN state. You can customize this behavior with the variable org-cycle-skip-children-state-if-no-children. #### Nodes without keyword can now be counted for statistics See the variable org-provide-todo-statistics for details. It can be the symbol all-headings, or a list of TODO states to consider. This was requested by David A. Gershman. #### New function org-list-make-subtree This function converts the plain list at point into a subtree, preserving the list structure. The key for this command is C-c C-*. Thanks to Ilya Shlyakhter for this suggestion. #### Headlines can be fontified to the right window border Use the variable org-fontify-whole-heading-line to turn this on. Then headline fontification will include the final newline. If your setup for headline faces includes a background different from the default background, this setup creates a visual line across the window. #### Inline tasks have become better citizens The new key C-c C-x t inserts an inline task including an END line. Inline tasks play along with (i,e, are ignored by) link creation and footnotes. Inline tasks with an END line can be refiled and archived. During the refile/archive operation, the tasks become normal tasks and the END line disappears. These improvements reflect reports and requests by Peter Westlake and Matt Lundin. #### Archive subtree and move to next visible task When archiving a task, the cursor now ends up on the next headline, so the repeated application of the archiving command will archive successive tasks. Thanks to Bernt Hansen for a patch to this effect. #### Renumbering the fn:N-like footnotes The new footnote action r will renumber simple fn:N footnotes in the current document. The action S will first do the renumbering and then sort the footnotes (the s action). This was a request by Andreas Röhler. #### Automatic sorting and renumbering Customize the new variable org-footnote-auto-adjust or use the #+STARTUP option fnadjust to get automatic renumbering and sorting of footnotes after each insertion/deletion. This was a request by Andreas Röhler. #### Improvements to plain-list-cycling with TAB. TAB now by default cycles visibility in plain lists if the cursor is at a plain list item. This corresponds to the new default value t of org-cycle-include-plain-lists. If you want to treat plain list items as part of the outline hierarchy during cycling of outline headings (this is what a t value used to mean), set this variable to integrate. #### Force bullet type changes during plain list demotion We now have a mechanism to force a particular bullet type when demoting a plain list item. See the variable org-list-demote-modify-bullet for details. This was a request by Rainer Stengele. ### Tables #### Relative row references may now cross hlines A relative row reference like @-1 in a table may now reach across a horizontal separator line. I hope this will not break any important tables out there, but I think it is the right thing to do. The sole original reason for not allowing such crossing was to implement running averages of one column in the next. This can now be done using field formulas near the beginning and end of the column, and a column formula for the central part. See the variable org-table-relative-ref-may-cross-hline for more details. #### Cut or copy single fields C-c C-x C-w and C-c C-x M-w now act on single table fields if there is no active region defined. #### Find agenda files linking to the current location The new command org-occur-link-in-agenda-files creates a link like org=store-link would, and then searches all agenda files for this link. So for example, you could be in a GNUS message, trying to find tasks that have links to this message. When inserting a link with C-c C-l, TAB completion will now not only access link prefixes, but also the stored links. ### Agenda Agenda bulk commands on marked entries now can also set the scheduling date or a deadline. Normally, all entries will be set to the specified date. However, when writing the change as "++5d" or "++2w", then each time stamp will independently be shifted by that amount. #### Tags-todo searches: No longer force to list sublevels For historic reasons, org-tags-match-list-sublevels was forced to t in tags-todo agenda searches. Now we no longer do this and accept the user setting of this variable. Thanks to Patrick Bahr for bringing this up. ### Export #### Use file-source.org format instead of file.org-source When publishing the source Org file to the source directory (i.e. if the publishing directory is the same as the source directory), then the file name will now look like file-source.org and file-source.org.html. Note that if you do use this kind of setup, you probably want to specify :exclude "-source\.org" in your publishing project, to avoid that a new generation of -source files is created each time you publish the project. #### LaTeX export: Skip title command when there is no title Using #+TITLE: without a value makes the LaTeX export ignore the value of org-export-latex-title-command. #### New option org-export-html-footnote-format This defines the format for footnote references. This string must contain %s which will be replaced by the footnote label. #### More export options for source code examples Allow whitespace in code references. Allow the -r switch to remove the references in the source code even when the lines are not numbered: the labels can be explicit enough. Note that -r -k is the same as no switch at all. Thanks to Ulf Stegemann for bring this up. #### LaTeX export: Allow more environment for low-level headings The user can now define a non-standard environment or macro to handle export of low-level headings to LaTeX. For details, see the variable org-export-latex-low-levels. #### LaTeX export: Add postscript file extensions for images Some people process LaTeX files not directly to pdf, but go through dvi and then to ps or pdf. In that case, allowed images are ps and eps files, not pdf and jpg. This commit adds the two extensions, so that export using that alternative path can be supported better. However, it is up to the user to make sure that the images are actually compatible with the backend. #### HTML export: Show UP and HOME links org-export-html-link-up and org-export-html-link-home are now also inserted into normal HTML export, above the page title. #### General mechanism for local variable settings Many different people want to set many different variables in a buffer-local way for export. This cannot be done with file variables, because the Org buffer is not current while the exporter is running. Lots of variables can be set with the #+OPTIONS lines, but finding abbreviations goes only so far. Therefore we have now a general mechanism that can be used to bind variables during export operations. A line like: #+BIND: variable value will bind the variable to value. For example, the line #+OPTIONS: toc:nil can now equivalently be written as #+BIND: org-export-with-toc nil #### Clean out publishing timestamp directory When changing the publishing setup, old timestamp files can be left behind. Forcing publishing of all projects with C-u C-c C-e E will remove all existing timestamp files. ### Miscellaneous #### Calendar for reading a date forced into current frame. Separate-frame setup for calendar had caused problems in AquaEmacs. You can now set a timer related to any headline, like an alarm clock. Three new commands have been defined: org-timer-set-timer bound to C-c C-x ; in Org buffers and to ; in Org agenda buffers. This function sets a timer for the headline the cursor is currently it. Up to three timers can be used at any time. org-timer-show-remaining-time Show the remaining time for the last timer set. org-timer-cancel-timers Cancel all timers. This functionality was requested by Samuel Wales and emulates that of tea-time.el – see the emacswiki doc at http://www.emacswiki.org/emacs/tea-time #### Clock reports may include a time stamp Using :timetamp t as an option in a clock report now allows insertion of the timestamp for the clocked entry. Timestamps are searched for in this order: SCHEDULING, TIMESTAMP, DEADLINE and TIMESTAMP_IA. #### New option org-id-uuid-program On some systems, uuidgen is named uuid. See the variable org-show-notification-handler. #### New option org-tags-sort-function. This allows tags to be sorted by string<, string>, or a custom function. Thanks to James TD Smith for a patch to this effect. #### Improvements for org-feed.el But fixes, and allowing to choose between wget and curl. Thanks to Christopher League for a patch to this effect. ## Version 6.28 ### Agenda changes #### Refiling now works from the agenda The command C-c C-w can be executed to refile an entry shown in the agenda. After the command, the entry will no longer be shown in the agenda. It it is still in an agenda file, refresh the agenda to bring it up from it's new context. #### Bulk action You can now use the m key to mark entries in the agenda. u will unmark the etry at point, and U will unmark everything. When one or more entries have been selected, the B key will execute an action on all selected entries. I believe this bulk action makes mainly sense for the commands that require answering interactive prompts. So far the supported actions are • Refile all selected entries to a single destination • Archive all selected entries • Set the TODO state of all selected entries, bypassing any blocking or note-taking. • Add or remove a tag to/from all selected entries We can add more actions, if you convince me they make sense. #### Modified keys To make room for the new Bulk action commands, some keys in the agenda buffer had to move: There is a new command bound to the v key, it dispatches various view mode changes. Month and year view are now only available as v m and v y, respectively. Turning on inclusion of archive trees and files (unsed to be on v) is now on v a and v A. ### Improvements related to #+begin blocks #### Indented blocks #+begin_ ... +#end_... blocks may now be indented along with the structure of your document. So the #+ lines no longer need to start in column 0, these lines can be, along with the block contents, indented arbitrarily. Org supports this during editing with "C-c '", and now finally treats them consistently during export across all backends. This makes these blocks work much better with plain list structure editing, and it also looks better if you like to indent text under outline headings. For example: *** This is some headline , #+begin_example , here we have an example , #+end_example , , - a plain list , - a sublist item , - a second sublist item , #+begin_center , centering within the plain list item , #+end_center , #+begin_example , This example does terminate the sublist, , the indentation of the #+begin line counts. , #+end_example , - but the top level plain lists continues here From now on, the indentation of such a block decides whether it is part of a plain list item or if it is actually terminating the list. This was so far inconsistent between editing behavior and export, now it is consistent. The content of the block, i.e. the text between the #+ lines gets an extra indentation of two space characters, which I find visually pleasing. You can change the amount of extra indentation using the variable org-src-content-indentation. This was a pretty complex change, achieved in many small steps over the last couple of weeks. It cleans up one of the more annoying inconsistencies in Org. I hope it will work, but I am sure you will let me know if not. #### Indented tables Also tables can be fully indented now. What is new here is that the #+TBLFM line, and also things like #+caption, #+label, #+attr_... etc can be indented along with the table. Again, this makes the look of the document better and allows for proper plain list structure editing. #### Protected blocks Some #+begin_ ... +#end_... blocks contain text that should not be processed like normal Org mode text. example and src block fall into this class, and so do ditaa blocks, for example. The content in such blocks is now properly fontified in a single face (called org-block). This was a frequently requested feature. The list of blocks that should be protected from normal Org mode fontification is defined in the variable org-protecting-blocks. Modules defining new blocks should add to this variable when needed. org-exp-blocks.el does this already. #### Hide and show the contents of blocks Blocks can now be folded and unfolded with TAB. If you want to have all blocks folded on startup, customize org-hide-block-startup or use the #+STARTUP options hideblocks or showblocks to overrule this variable on a per-file basis. Thanks to Eric Schulte for a patch to this effect. #### Moved Eric Schulte's org-exp-blocks.el into the core This seems to be getting a lot of use now, so it is now part of the core and loaded automatically. This package can now also be used to define new blocks. Customize the variable org-export-blocks or use the function org-export-blocks-add-block. ### New and updated contributed modules #### org-export-generic.el is now a contributed package. This new module allows users to export an Org page to any type of output by constructing the output using a list of prefixes, format specifications and suffixes for the various types of org data (headlines, paragraphs, list bullets, etc). Use the org-set-generic-type function to define your own export types and have them bound to a key (use an upper-case letter for user export definitions). Thanks to Wes Hardaker for this contribution with a lot of potential. #### org-R.el: Updated. org-R.el has been updated, thanks to Dan Davison for this. #### [ TABLE-OF-CONTENTS] is now also used for LaTeX export This cookie will mark the location of the \tableofcontents macro. Triggered by a report by Yuva. ### Changes to the clocking system #### New option org-clock-out-switch-to-state'. Clocking out can now switch the task to a particular state. This was a request by Manish. #### More control about what time is shown in mode line while clocking • If you have an Effort property defined, its value is also shown in the mode line, and you can configure org-clock-sound to get an alert when your planned time for a particular item is over. • When an entry has been clocked earlier, the time shown in the mode line while the item is being clocked is now the sum of all previous, and the current clock. • The exception to the previous rule are repeating entries: There the clock time will only be clocking instances recorded since the last time the entry when through a repeat event. The time of that event is now recorded in the LAST_REPEAT property • You can use the property CLOCK_MODELINE_TOTAL to get control over what times are displayed in the mode line, see the manual for more information. • The new command C-c C-x C-e can be used to change the Effort estimate and therefore to change the moment when the clock sound will go off. • The clock string in the modeline now has a special font, org-mode-line-clock. This was a proposal by Samuel Wales. • Clicking on the mode line display of the clock now offers a menu with important clock functions like clocking out, or switching the clock to a different task. Thanks to Konstantin Antipin for part of the implementation, and thanks to Bernt Hansen for helping to iron out the issues related to repeated tasks. ### Miscellaneous changes #### Allow to specify the alignment in table columns by hand Similar to the <20> cookies that allow to specify a maximum width for a table column, you can now also specify the alignment in order to overrule the automatic alignment choice based on the dominance of number or non-number fields in a column. The corresponding cookies are <l> and <r> for left and right side alignment, respectively. These can be combined with maximum width specification like this: <r15>. This was a proposal by Michael Brand. #### Stop logging and blocking when selecting a TODO state Sometimes you want to quickly select or change a TODO state of an item, without being bothered by your setup for blocking state changes and logging entries. So in this case, you don't want the change be seen as a true state change. You can now set the variable org-treat-S-cursor-todo-selection-as-state-change to nil. Then, when you use S-left and S-right to quickly flip through states, blocking and logging will be temporarily disabled. #### Export BBDB anniversaries to iCalendar See the variable org-icalendar-include-bbdb-anniversaries'. This was a request by Richard Riley, thanks to Thomas Baumann for the prompt implementation. #### Macro definitions can be collected in an #+SETUPFILE If you want to use many macros in different files, collect the #+macro lines into a file and link to them with #+SETUPFILE: path/to-file #### Subtree cloning now also shifts inactive dates When using the command org-clone-subtree-with-time-shift, time stamps will be shifted for each clone. So far, this applied only to active timestamps, but now it does apply to inactive ones as well. #### HTML table export: Assign alternating classes to rows The new variable org-export-table-row-tags can now be set up in a way so that different table lines get special CSS classes assigned. This can be used for example to choose different background colors for odd and even lines, respectively. The docstring of the variable contains this example: (setq org-export-table-row-tags "<tr>" (if (= (mod nline 2) 1) "<tr class=\"tr-odd\">" "<tr class=\"tr-even\">")) "</tr>")) It makes use of the local variables head and nline which are used to check whether the current line is a header line, and whether it is an odd or an even line. Since this is fully programmable, you can do other things as well. This was a request by Xin Shi. #### Remember: target headline may be a function When setting up remember templates, the target headline may now be a function, similarly to what is allowed for the target file. The functions needs to return the headline that should be used. #### Remove flyspell overlays in places where they are not wanted We now keep flyspell from highlighting non-words in links. #### Update targets in the Makefile Some new targets in the default Makefile make it easier to update through git to the latest version: update and up2. Here are the definitions. update: git pull ${MAKE} clean${MAKE} all up2: update sudo ${MAKE} install This was a request by Konstantin Antipin. ## Version 6.27 ### Details #### Macros for export Macro processing for export has been enhanced: • You can use arguments in a macro, for example #+macro hello Greet the$1: Hello $1 which would turn {{{hello(world)}}} into Greet the world: Hello world • The macro value can be an emacs-lisp for to be evaluated at the time of export: #+macro: datetime (eval (format-time-string "$1")) • More built-in default macros: date(FORMAT_TIME_STRING) Time/Date of export time(FORMAT_TIME_STRING) Same as date modification-time(FORMAT_TIME_STRING) Last modification of file input-file Name of the input file The new built-in macros have been requested by Daniel Clemente. #### Link completion for files and bbdb names Org now has a general mechanism how modules can provide enhanced support (for example through completion) when adding a link. For example, when inserting a link with C-c C-l, you can now type file: followed by RET to get completion support for inserting a file. After entering bbdb: and RET, a completion interface will allow to complete names in the BBDB database. These are the only ones implemented right now, but modules that add a link type xyz: can simple define org-xyz-complete-link that should return the full link with prefix after aiding the used to create the link. For example, if you have http links that you have to insert very often, you could define a function org-http-complete-link to help selecting the most common ones. #### Source file publishing It is now easy to publish the Org sources along with, for example, HTML files. In your publishing project, replace :publishing-function org-publish-org-to-html with :publishing-function (org-publish-org-to-html org-publish-org-to-org) :plain-source t :htmlized-source t to get both the plain org file and an htmlized version that looks like your editing buffer published along with the HTML exported version. #### Push exported stuff to kill ring All exporters now push the produced material onto the kill-ring in Emacs, and also to the external clipboard and the primary selection to make it easy to paste this under many circumstances. #### Tables in LaTeX without centering Set the variable org-export-latex-tables-centered' to nil if you prefer tables not to be horizontally centered. Note that longtable tables are always centered. #### LaTeX export: TODO markup configurable The markup for TODO keywords in LaTeX export is now configurable using the variable org-export-latex-todo-keyword-markup. #### ASCII export to buffer ASCII export has now the same command variations as the other export backends, for example exporting to a temporary buffer instead of a file. The was a request by Samuel Wales. #### Accessibility improvements for HTTP tables When exporting tables to HTML, Org now adds scope attributes to all header fields, in order to support screen readers. Setting the variable org-export-html-table-use-header-tags-for-first-column will request using <th> instead of <td> also in the entire first column, so that also row information can be scoped. This was triggered by a request by Jan Buchal, and as usually Sebastian Rose came up with the right implementation. #### Timezone information in iCalendar files The timezone information in iCalendar files is now written in the correct format, and can be set in the variable org-ical-timezone. This variable is initialized from the TZ environment variable. #### New contributed package org-special-blocks.el The package turns any "undefined" #+begin_... blocks into LaTeX environments for LaTeX export, and into <div> tags for HTML export. Thanks to Chris Gray for this contribution. #### More flexibility about placing logging notes. Logging into a drawer can now also be set for individual subtrees using the LOG_INTO_DRAWER property. Requested by Daniel J. Sinder. Reloading Org has moved to a new key, C-c C-x !, and is now also available in the agenda. #### Start Agenda with log mode active Set the new option org-agenda-start-with-log-mode to have log mode turned on from the start. Or set this option for specific custom commands. Thanks to Benjamin Andresen for a patch to this effect. #### Agenda speed optimizations Depending on circumstances, construction the agenda has become a lot faster. Triggered by Eric S Fraga's reports about using Org on a slow computer like a netbook. #### New face for today in agenda The date that is today can now be highlighted in the agenda by customizing the face org-agenda-date-today. Thanks to Dmitri Minaev for a patch to this effect. #### Properties to disambiguate statistics When an entry has both check boxes and TODO children, it is not clear what kind of statistics a cookie should show You can now use the COOKIE_DATA property to disambiguate, by giving it a value "todo" or "checkbox". Thanks to Ulf Stegeman, who was persistent enough to push this change past my initial resistance. #### Checkboxes and TODO items: recursive statistics nil will make statistics cookies count all checkboxes in the Setting the variable org-hierarchical-checkbox-statistics to lit hierarchy below it. Setting the variable org-hierarchical-todo-statistics to nil will do the same for TODO items. To turn on recursive statistics only for a single subtree, add the word "recursive" to the COOKIE_DATA property. Note that you can have such a property containing both "todo" or "checkbox" for disambiguation, and the word "recursive", separated by a space character. The change for checkboxes was a patch by Richard Klinda. #### New operators for column view Column view has new operators for computing the minimum, maximum, and mean of property values. Thanks to Mikael Fornius for a patch to this effect. ## Version 6.26 ### Details #### custom IDs Entries can now define a CUSTOM_ID property. This property must be a valid ID according to HTML rules, and it will be used in HTML export as the main target ID for this entry. That means, both the table of conents and other internal links will automatically point to this ID instead of the automatic ID like sec-1.1. This is useful to create humar-readable permanent links to these location in a document. The user is responsible to make sure that custom IDs are unique within a file. Links written like [[#my-target-name] ] can be used to target a custom ID. When using C-c l to store a link to a headline that has a custom ID, Org will now create two links at the same time. One link will be to the custom ID. The other will be to the globaly unique ID property. When inserting the line with C-c C-l, you need to decide which one you want to use. Use the ID links for entries that are expected to move from one file to the next. Use custom ID links publishing projects, when you are sure that te entry will stay in that file. See also the variable org-link-to-org-use-id. #### Remember to non-org files If the target headline part of a remember template definition entry is top or bottom, the target file may now be a non-Org mode file. In this case, the content of the remember buffer will be added to that file without enforcing an Org-like headline. Sorry, Russel, that this took so long. #### New property to turn off todo dependencies locally Setting the property NOBLOCKING will turn off TODO dependency checking for this entry. #### Refile verify A new function is called to verify tasks that are about to be selected as remember targets. See the new variable org-refile-target-verify-function. #### New version org ditaa.jar Thanks to Stathis Sideris. #### htmlize.el is now in the contrib directory The latest version of htmlize.el is now the in the contrib directory of Org. Thanks to Hrvoje Niksic for allowing this. ## Version 6.25 ### Major new features #### DocBook export We now do have a fully functional DocBook exporter, contributed by Baoqiu Cui. Simple press C-c e D to export the current file to DocBook format. You can also get direct conversion to PDF if you have made the correct setup, please see the manual for details. Kudos to Baoqiu for this fantastic addition, and my personal thanks for doing this in a such a smooth way that I did not have to do anything myself. org-protocol.el is a new module that supersedes both org-annotation-helper.el and org-browser.el and replaces them with a more abstracted interface. org-protocol intercepts calls from emacsclient to trigger custom actions without external dependencies. Only one protocol has to be configured with your external applications or the operating system, to trigger an arbitrary number of custom actions. Just register your custom sub-protocol and handler with the new variable org-protocol-protocol-alist. org-protocol comes the with three standard protocol handlers (in parenthesis the name of the sub-protocol): org-protocol-remember (remember) Trigger remember org-protocol-store-link (store-link) org-protocol-open-source (open-source) Find the local source of a remote web page. Passing data to emacs is now as easy as calling emacsclient org-protocol://sub-protocol://data Thanks to Sebastian Rose for this really beautiful module. Inline tasks are tasks that have all the properties of normal outline nodes, including the ability to store meta data like scheduling dates, TODO state, tags and properties. But these tasks are not meant to introduce additional outline structure, at least as far as visibility cycling and export is concerned. They are useful for adding tasks in extensive pieces of text where interruption of the flow or restructuring is unwanted. This feature is not turned on by default, you need to configure org-modules to turn it on, or simply add to you .emacs file: (require 'org-inlinetask) After that, tasks with level 15 (30 stars when using org-odd-levels-only) will be treated as inline tasks, and fontification will make obvious which tasks are treated in this way. Org can now collect tasks from an RSS feed, a great method to get stuff from online call and note-taking services into your trusted system. You need to configure the feeds in the variable org-feed-alist. The manual contains a short description, more detailed information is available on Worg. Full credit goes to Brad Bozarth who really paved the way for this exciting new feature. ### Export #### Allow modification of table attributes in HTML export The #+ATTR_HTML line can now be used to set attributes for a table. Attributes listed in that line will replace existing attributes in org-export-html-table-tag, or will add new ones. For example #+ATTR_HTML: border="2" rules="all" frame="all" #+CAPTION: Finally a table with lines! | a | b | |---|---| | 1 | 2 | #### LaTeX low levels are now exported as itemize lists LaTeX export now treats hierarchy levels 4,5, etc as itemize lists, not as description lists as before. This is more consistent with the behavior of HTML export. You can configure this behavior using the variable org-export-latex-low-levels. #### Markup for centering. Text can be exported centered with #+BEGIN_CENTER ,Everything should be made as simple as possible, \\ ,but not any simpler #+END_CENTER #### Sitemap file is now sitemap.org Org-publish can produce a list of all files in a project. Previously the file containing this list was called "index.org", really a brain-dead default because during publication it would overwrite the "index.html" file of the website. The default file name is now "sitemap.org". #### Protect explicit target links in HTML export If a link is [[#name] [desc]], the href produced when exporting the file will be exactly href="#name". So starting a link target with # will indicate that there will be an explicit target for this. #### HTML export: Allow "- _" to explicitly terminate a list If a list contains "- _" (three underscores) as an item, this terminates the list, ignoring this item. This is an experimental feature, it may disappear again if we find other ways to deal with literal examples right after lists. See this mailing list thread for context. ### Agenda #### Changing the time of an entry from the agenda We now have a way to change not only the date, but also the start time of an entry from the agenda. The date is normally changed with S-right/left. Now, if you add a C-u prefix, the hour will be changed. If you immediately press S-right/left again, hours will continue to be changed. A double prefix will do the same for minutes. If the entry has a time range like 14:40-16:00, then both times will change, preserving the length of the appointment. #### Show saved PDF agenda view with prefix arg When writing an agenda view to a PDF file, supplying a a prefix argument (C-u C-x C-w) will get the new file displayed immediately. This was a request by Alan E Davis. #### Filter for entries with no effort defined During secondary agenda filtering, pressing "?" now will install a filter that selects entries which do not have an effort defined. This new model was necessary because we needed to stop interpreting entries with no effort defines as 0 effort. This was inconsistent, because for normal agenda sorting, the treatment of these entries depends on the variable org-sort-agenda-noeffort-is-high. Now this variable is also respected during filtering. This new feature resulted from a discussion with Matt Lundin and Bernt Hansen. #### Introduce user-defined sorting operators The new variable org-agenda-cmp-user-defined can contain a function to test how two entries should be compared during sorting. The symbols user-defined-up and user-defined-down can then be part of any sorting strategy. This was a request by Samuel Wales. #### Indentation of subitems in the agenda When a tags/property match does match an entry and it's sublevels, the sublevels used to be indented by dots, to indicate that the matches likely result from tag inheritance. This is now no longer the default, so the subitems will not get special indentation. You can get this behavior back with (setq org-tags-match-list-sublevels 'indented) #### Stuck projects search now searches subtrees of unstuck projects When, during a stuck-project search, a project tree is identified as not stuck, so far the search would continue after the end of the project tree. From now on, the search continues in the subtree, so that stuck subprojects can still be identified. ### Miscellaneous #### Citations: Use RefTeX to insert citations RefTeX can now be used to create a citation in Org mode buffers. Setup the buffer with #+BIBLIOGRAPHY: bibbase style and create citations with C-c C-x [. Together with org-exp-bibtex.el by Taru Karttunen (available as a contributed package), this provides a great environment for including citations into HTML and LaTeX documents. #### Changing time ranges as a block When using the S-cursor keys to change the first time in a time range like <2009-04-01 Wed 14:40-16:40> then the end time will change along, so that the duration of the event will stay the same. This was a request by Anupam Sengupta. #### New sparse tree command A new sparse tree command shows entries with times after a certain date. Keys are C-c / a, this command is for symmetry with C-c / b. A new command allows to create clone copies of the current entry, with shifted dates in all stamps in the entry. This is useful to create, for example, a series of entries for a limited time period. I am using it to prepare lectures, for example. #### New face for checkboxes Checkboxes now have their own face, org-checkbox. This can be used for nice effects, for example choosing a face with a box around it: (custom-set-faces (org-checkbox ((t (:background "#444444" :foreground "white" :box (:line-width 1 :style released-button))))) #### M-a and M-e for navigation in a table field In tables fields, the sentence commands M-a and M-e are redefined to jump to the beginning or end of the field. This was a request by Bastien Guerry. #### Backup files for remember buffers Sometimes users report that they lost data when not immediately storing a new remember note, and then later exiting Emacs or starting a new remember process. Now you can set the variable org-remember-backup-directory. Each remember buffer created will then get its own unique file name in that directory, and the file will be removed only if the storing of the note to an Org files was successful. #### org-mac-message.el: New functions to access flagged mail Christopher Suckling has added functionality to org-mac-message.el. In particular, you can now select a number of messages and easily get links to all of them with a single command. For details, see the online documentation. Setting up the minibuffer for reading a date. If can be used to The new hook org-read-date-minibuffer-setup-hook is called when install new keys into the temporary keymap used there. ## Version 6.24 ### Incompatible changes #### Tag searches are now case-sensitive From this release on, tag searches will be case sensitive. While I still think it would be nice to have them case-insensitive, this was both an inconsistency (TODO keyword searches have always been case-sensitive), and trouble for coding some efficient algorithms. So please make sure that you give the tags with correct casing when prompted for a match expression. #### New key for creating tags/property sparse trees The key to produce a sparse tree matching tags and properties is now C-c / m instead of C-c a T. This is also more consistent with the C-c a m key for the corresponding agenda view. C-c / T will still work for now, but it is no longer advertised in the documentation and may go away at any time in the future. #### IDs in HTML have "ID-" prefix when generated by uuidgen uuidgen generates IDs that often start with a number, not a latter. However, IDs and names in XHTML must start with a letter. Therefore, IDs in HTML files will now get an "ID-" prefix if they have been generated by uuidgen. This means that id links from one file to another may stop working until all files have been exported again. #### In agenda, only priority cookies get the special face So far, an entire task would get a special face when org-agenda-fontify-priorities was set. Now, the default value for this variable is the symbol cookies, which means that on the cookie is fontified. Set it to t if you want the entire task headline to be fontified. ### Details #### PDF export of agenda views Agenda views can now be exported to PDF files by writing them to a file with extension ".pdf". Internally this works by first producing the postscript version and then converting that to PDF using the ghostview utility ps2pdf. Make sure that this utility is installed on your system. The postscript version will not be removed, it will stay around. #### Inline some entry text for Agenda View export When exporting an agenda view to HTML or PDF for printing or remote access, one of the problems can be that information stored in entries below the headline is not accessible in that format. You can now copy some of that information to the agenda view before exporting it. For this you need to set the variable org-agenda-add-entry-text-maxlines to a number greater than 0. (setq org-agenda-add-entry-text-maxlines 20) Or you can do this with the settings in a custom agenda view, for example: ("A" "" agenda "" ((org-agenda-ndays 1) ("agenda-today.pdf")) #### Improved ASCII export of links ASCII export of links works now much better. If a link has a link and a description part which are different, then the description will remain in the text while the link part will be moved to the end of the current section, before the next heading, as a footnote-like construct. Configure the variable org-export-ascii-links-to-notes if you prefer the links to be shown in the text. In this case, Org will make an attempt to wrap the line which may have become significantly longer by showing the link. Thanks to Samuel Wales for pointing out the bad state of ASCII link export. #### Custom agenda commands can specify a filter preset If a custom agenda command specifies a value for org-agenda-filter-preset in its options, the initial view of the agenda will be filterd by the specified tags. Applying a filter with / will then always add to that preset filter, clearing the filter with / / will set it back to the preset. Here is an example of a custom agenda view that will display the agenda, but hide all entries with tags FLUFF or BLUFF: ("A" "" agenda "" ((org-agenda-filter-preset '("-FLUFF" "-BLUFF")))) This is in response to a thread on the mailing list, started by Daniel Clemente and with great contributions by Bernt Hansen and Matt Lundin. #### Exporting of citations to LaTeX and HTML, using BibTeX Citations can now me made using BibTeX, and will be exported to LaTeX and HTML. This is implemented in a contributed package by Taru Karttunen, org-exp-bibtex.el. Kudos to Taru for this really nice addition. #### Finally a way to specify keywords and description for HTML export Use something like #+DESCRIPTION: This page is all about .... #+KEYWORDS: org-mode, indexing, publishing To specify the content of the description and keywords meta tags for HTML output. #### org-collector.el is now a contributed package org-collector.el provides functions to create tables by collecting and processing properties from entries in a specific scope like the current tree or file, or even from all agenda files. General lisp expressions can be used to manipulate the property values before they are inserted into an org-mode table, for example as a dynamic block that is easy to update. Thanks to Eric Schulte for yet another great contribution to Org. #### Update of org2rem.el org2rem.el has been updated significantly and now does a more comprehensive job of exporting Org events to remind. Thanks to Sharad Pratap for this update. #### New div around the entire page in HTMP export A new <div id=content> is wrapped around the entire page, everything that is inside <body>. This means that you need to update org-info.js (if you have a local copy). It will be safe todo so, because the new org-info.js still handles older pages correctly. Thanks to Sebastian Rose for making these changes so quicky. #### Clustering characters for undo When typing in Org mode, undo will now remove up to 20 characters at a time with a single undo command. This is how things work normally in Emacs, but the special binding of characters in Org mode made this impossible until now. Thanks to Martin Pohlack for a patch which mimicks the behavior of the Emacs command loop for the Org version of self-insert-command. Note that this will not work in headlines and tables because typing there will do a lot of extra work. There might be a small typing performance hit resulting from this change - please report in the mailing list if this is noticeable and annoying. #### Separate settings for special C-a and C-e The variable org-special-ctrl-a/e now allows separate settings for C-a and C-e. For example (setq org-special-ctrl-a/e '(reversed . t)) Thanks to Alan Davis for this proposal. #### orgstruct++-mode improvements In addition to orgstruct-mode which allows to use some Org mode structure commands in other major modes, there is a more invasive version of this mode: orgstruct++-mode. This mode will import all paragraph and line wrapping variables into the major mode, so that, for example, during typing the auto-fill wrapping of items will work just like in Org mode. This change is not reversible, so turning off orgstruct++-mode will not remove these settings again. orgstruct++-mode is most useful in text modes like message-mode or magit-log-edit-mode. Furthermore, orgstruct++-mode will recognize plain list context not only in the first line of an item, but also further down, so that M-RET will correctly insert new items. Thanks to Austin Frank for requesting some of these changes. #### Promotion and demotion works for regions now M-right and M-left now do demote and promote all headlines in an active region. #### Match syntax for tags/properties is now described in a single place The manual chapters about tags and about properties now only refer to the section about agenda views, where the general syntax of tag/property matches is described. #### Macro replacement A string like {{{ title }}} will be replaced by the title of the document, {{{ email }}} by the email setting of the author and similarly for other export settings given in #+... lines. In addition to that, you can define an arbitrary number of macros, for example: #+MACRO: myaddress 41 Onestreet, 12345 New York, NY ,... Macro replacement is the very first thing that happens during export, and macros will be replaced even in source code and other protected regions. #### New reload command, with keyboard access There is now a special command to reload all Org Lisp files, so that you can stay in your Emacs session while pulling and compiling changes to Org. The command to reload the compiled files (if available) is C-c C-x r. If no compiled files are found, uncompiled ones will be loaded. If you want to force loading of uncompiled code (great for producing backtraces), use a prefix arg: C-u C-c C-x r. Both commands are available in the menu as well. This new command was inspired by one written earlier by Bernt Hansen. #### Faces for priority cookies can now be set freely The new variable org-priority-faces can be used to set faces for each priority. #### New key for creating tags/property sparse trees The key to produce a sparse tree matching tags and properties is now C-c / m instead of C-c a T. This is more consistent with the C-c a m key for the corresponding agenda view. C-c / T will still work for now, but it is no longer advertised in the documentation and may go away at any time in the future. #### IDs in HTML have "ID-" prefix when generated by uuidgen uuidgen generates IDs that often start with a number, not a letter. However, IDs and names in XHTML must start with a letter. Therefore, IDs in HTML files will now get an "ID-" prefix if they have been generated by uuidgen. This means that id links from one file to another may stop working until all files have been exported again, so that both links and targets have the new prefix. #### In agenda, only priority cookies get the special face So far, an entire task would get a special face when org-agenda-fontify-priorities was set. Now, the default value for this variable is the symbol cookies, which means that on the cookie is fontified. Set it to t if you want the entire task headline to be fontified. #### Turning off time-of-day search in headline Some people like to put a creation time stamp into a headline and then get confused if the time-of-day found in there shows up as the time-of-day of the deadline/scheduling entry for this headline. The reason for this is that Org searches the headline for a free-format time when trying to sort the entry into the agenda, and that search accidentally finds the time in the creation time stamp or something else that happens to look like a time. If this is more painful than useful for you, configure the new variable org-agenda-search-headline-for-time. ## Version 6.23 ### Overview • Capture state change notes into a drawer • Clock lines are now captured into the LOGBOOK drawer as well • Added org-R.el to contrib directory • Allow individual formatting of each TODO keyword in HTML export • New hooks for add-ons to tap into context-sensitive commands • Publishing files irrespective of extension • New variable index in the manual • The ORDERED property also influences checkboxes • The ORDERED property can be tracked with a tag • You may now specify line breaks in the fast tags interface • When a TODO is blocked by checkboxes, keep it visible in agenda • LaTeX can import Org's in-buffer definitions for TITLE, EMAIL etc. ### Incompatible changes • CLOCK lines will now be captured into the LOGBOOK drawer. See below for details. ### Details #### Capture state change notes into a drawer State change notes can now be captured into a drawer LOGBOOK, to keep the entry tidy. If this is what you want, you will need this configuration: (setq org-log-into-drawer "LOGBOOK") Thanks to Wanrong Lin for this proposal. #### Clock lines are now captured into the LOGBOOK drawer as well The CLOCK drawer will be abandoned, clock lines will now also end up in a drawer LOGBOOK. The reason for this is that it's a bit useless to have two different drawers for state change notes and clock lines. If you wish to keep the old way, use (setq org-clock-into-drawer "CLOCK") #### Added org-R.el to contrib directory Dan Davison has contributed org-R.el which is now in the contrib directory. Org-R performs numerical computations and generates graphics. Data can come from org tables, or from csv files; numerical output can be stored in the org buffer as org tables, and links are created to files containing graphical output. Although, behind the scenes, it uses R, you do not need to know anything about R. Common operations, such as tabulating discrete values in a column of an org table, are available "off the shelf" by specifying options on lines starting with #+R:. However, you can also provide raw R code to be evaluated. The documentation is currently the worg tutorial at http://orgmode.org/worg/org-tutorials/org-R/org-R.php Thanks to Dan for this great contribution. #### Allow individual formatting of TODO keyword and tags in HTML export TODO keywords in HTML export have the CSS class todo or done. Tags have the CSS class tag. In addition to this, each keyword has now itself as class, so you could do this in your CSS file: .todo { font-weight:bold; } .done { font-weight:bold; } .TODO { color:red; } .WAITING { color:orange; } .DONE { color:green; } If any of your keywords causes conflicts with CSS classes used for different purposes (for example a tag "title" would cause a conflict with the class used for formatting the document title), then you can use the variables org-export-html-tag-class-prefix and org-export-html-todo-kwd-class-prefix to define prefixes for the class names for keywords, for example "kwd-". Thanks to Wanrong Lin for this request, and to Sebastian Rose for help with the implementation. #### New hooks for add-ons to tap into context-sensitive commands Some commands in Org are context-sensitive, they will execute different functions depending on context. The most important example is of course C-c C-c, but also the M-cursor keys fall into this category. Org has now a system of hooks that can be used by add-on packages to install their own functionality into these keys. See the docstring of org-ctrl-c-ctrl-c-hook for details. The other hooks are named like org-metaleft-hook or org-shiftmetaright-hook. #### Publishing files irrespective of extension If you set the :base-extension property for a publishing project to the symbol any, all files in the directory will be published, irrespective of extension. Thanks to Richard Klinda for a patch to this effect. #### New variable index in the manual A new index in the manual lists all variables mentioned in the manual, about 200 variables in total. #### The ORDERED property also influences checkboxes When an entry has the ORDERED property set, checkboxes in the entry must be completed in order. This was already the case for children TODO items, now it also applies for checkboxes. Thanks to Rainer Stengele for this proposal. #### The ORDERED property can be tracked with a tag The ORDERED property is used to flag an entry so that subtasks (both children TODO items and checkboxes) must be completed in order. This property is most easily toggled with the command C-c C-x o. A property was chosen for this functionality, because this should be a behavior local to the current task, not inherited like tags. However, properties are normally invisible. If you would like visual feedback on the state of this property, configure the variable org-track-ordered-property-with-tag. If you then use C-c C-x o to toggle the property, a tag will be toggled as well, for visual feedback. Note that the tag itself has no meaning for the behavior of TODO items and checkboxes, and that changing the tag with the usual tag commands will not influence the property and therefore the behavior of TODO and checkbox commands. #### You may now specify line breaks in the fast tags interface Up to now, the fast tags interface tried to lump as many tags as possible into a single line, with the exception that groups would always be on a line by themselves. Now, if you use several lines to define your tags, like #+TAGS: aa(a) bb(b) cc(c) #+TAGS: dd(d) ee(e) ff(f) then there will also be a line break after the "cc" tag in the fast tag selection interface. You may also write #+TAGS: aa(a) bb(b) cc(c) \n dd(d) ee(e) ff(f) to achieve the same effect, and you can use \n several times in order to produce empty lines. In org-tag-alist, newlines are represented as (:newline). Thanks to Christopher Suckling for a patch to this effect. #### When a TODO is blocked by checkboxes, keep it visible in agenda When the variable org-agenda-dim-blocked-tasks is set to invisible, tasks that are blocked will not be visible in the agenda. If the blocking is due to child TODO entries, this does make sense because the children themselves will show up in the TODO list. However, as John Rakestraw has pointed out, if the blocking is done by checkboxes, no trace of these subtasks is left. Therefore, when the blocking is done by checkboxes, we now overrule the invisible setting and replace it with mere dimming of the task. #### LaTeX can import Org's in-buffer definitions for TITLE, EMAIL etc. If you configure org-export-latex-import-inbuffer-stuff, in-buffer definitions like #+TITLE will be made available in the LaTeX file as \orgTITLE. This was a request by Russel Adams. ## Version 6.22 ### Details #### org-choose.el by Tom Breton is now included Org-choose helps documenting a decision-making process by using TODO keywords for different degrees of chosenness, and by automatically keeping a set of alternatives in a consistent state. Documentation for org-choose.el is available here. This package inserts itself into Org using hooks, so if other people would like to do interesting stuff with TODO keywords for special purposes, looking at Tom's code might be a good way to start. Thanks to Tom for this interesting contribution! #### orgmode.org and Worg css works now better on IE Thanks to Sebastian Rose for making these changes. #### When exporting a subtree, headline levels are now relative to parent This was reported as a bug by William Henney and is fixed now. #### Inactive dates in tables can be used for sorting. When sorting table fields or entries by date, Org first tries to find an active date, and, if none exist, uses a passive date if present. This was a request by Hsui-Khuen Tang #### The default for org-return-follows-link is back to nil Setting it to t violates Emacs rules to some extent. The internal implementation of this has been improved, so setting it to t should now be pretty stable. #### Automatic scheduling of siblings with org-depend.el The sibling of a DONE task can now automatically be scheduled. This was a patch by Andrew Hyatt. #### New skipping conditions The functions org-agenda-skip-entry-if and org-agenda-skip-subtree-if now accept timestamp and nottimestamp as additional conditions. This was in response to a request by Saurabh Agrawal. ## Version 6.21 ### Details #### Changes to some default values of variables: Here are the new default values: (setq org-return-follows-link t) (setq org-use-fast-todo-selection t) (setq org-tags-column -77) (setq org-agenda-sorting-strategy '((agenda time-up priority-down category-keep) (todo time-up priority-down category-keep) (tags time-up priority-down category-keep) (search category-keep))) ## Version 6.20 ### Details #### Support for simple TODO dependencies John Wiegley's code for enforcing simple TODO dependencies has been integrated into Org mode. Thanks John! The structure of Org files (hierarchy and lists) makes it easy to define TODO dependencies. A parent TODO task should not be marked DONE until all subtasks (defined as children tasks) are marked as DONE. And sometimes there is a logical sequence to a number of (sub)tasks, so that one task cannot be acted upon before all siblings above it are done. If you customize the variable org-enforce-todo-dependencies, Org will block entries from changing state while they have children that are not DONE. Furthermore, if an entry has a property ORDERED, each of its children will be blocked until all earlier siblings are marked DONE. Here is an example: * TODO Blocked until (two) is done ** DONE one ** TODO two * Parent , :PROPERTIES: , :ORDERED: t , :END: ** TODO a ** TODO b, needs to wait for (a) ** TODO c, needs to wait for (a) and (b) The command C-c C-x o toggles the value of the ORDERED property. The variable org-agenda-dim-blocked-tasks controls how blocked entries should appear in the agenda, where they can be dimmed or even made invisible. Furthermore, you can use the variable org-enforce-todo-checkbox-dependencies to block TODO entries from switching to DONE while any checkboxes are unchecked in the entry. #### Support for shift-selection in Emacs 23 Customize the variable org-support-shift-select to use S-cursor key for selecting text. Make sure that you carefully read the docstring of that variable first. #### Adding and removing checkboxes from many lines The command C-c C-x C-b normally toggles checkbox status in the current line, or in all lines in the region. With prefix argument it now either adds or removes the checkbox. This was a requested by Daniel Clemente. ## Version 6.19 ### Overview • Improved behavior of conversion commands C-c - and C-c * • Table formulas may now reference fields in other tables • A final hline is imagined in each table, for the sake of references • A tags-todo search can now ignore timestamped items • \par can be used to force a paragraph break, also in footnotes ### Details #### Improved behavior of conversion commands C-c - and C-c * The conversion commands C-c - and C-c * are now better behaved and therefore more useful, I hope. If there is an active region, these commands will act on the region, otherwise on the current line. C-c - This command turns headings or normal lines into items, or items into normal lines. When there is a region, everything depends on the first line of the region: • if it is a item, turn all items in the region into normal lines. • if it is a headline, turn all headlines in the region into items. • if it is a normal line, turn all lines into items. • special case: if there is no active region and the current line is an item, cycle the bullet type of the current list. C-c * This command turns items and normal lines into headings, or headings into normal lines. When there is a region, everything depends on the first line of the region: • if it is a item, turn all items in the region into headlines. • if it is a headline, turn all headlines in the region into normal lines. • if it is a normal line, turn all lines into headlines. #### Table formulas may now reference fields in other tables You may now reference constants, fields and ranges from a different table, either in the current file or even in a different file. The syntax is remote(NAME-OR-ID,REF) where NAME can be the name of a table in the current file as set by a #+TBLNAME: NAME line before the table. It can also be the ID of an entry, even in a different file, and the reference then refers to the first table in that entry. REF is an absolute field or range reference, valid in the referenced table. Note that since there is no "current filed" for the remote table, all row and column references must be absolute, not relative. #### A final hline is imagined in each table, for the sake of references Even if a table does not end with a hline (mine never do because I think it is not pretty), for the sake of references you can assume there is one. So in the following table | a | b | |---+---| | 1 | 2 | | 3 | 4 | a reference like @I$1..@II$2 will now work. #### A tags-todo search can now ignore timestamped items The variables org-agenda-todo-ignore-with-date, org-agenda-todo-ignore-with-date, and org-agenda-todo-ignore-with-date make it possible to exclude TODO entries which have this kind of planning info associated with them. This is most useful for people who schedule everything, and who use the TODO list mainly to find things that are not yet scheduled. Thomas Morgan pointed out that also the tags-todo search may serve exactly this purpose, and that it might be good to have a way to make these variables also apply to the tags-todo search. I can see that, but could not convince myself to make this the default. A new variable must be set to make this happen: org-agenda-tags-todo-honor-ignore-options. #### \par can be used to force a paragraph break, also in footnotes The LaTeX idiom \par will insert a paragraph break at that location. Normally you would simply leave an empty line to get such a break, but this is useful for footnotes whose definitions may not contain empty lines. ## Version 6.18 ### Incompatible changes #### Short examples must have a space after the colon Short literal examples can be created by preceding lines with a colon. Such lines must now have a space after the colon. I believe this is already general practice, but now it must be like this. The only exception are lines that are empty except for the colon. ### Details #### Include files can now also process switches The example and src switches like -n can now also be added to include file statements: #+INCLUDE "~/.emacs" src emacs-lisp -n -r Thanks to Manish for pointing out that this was not yet supported. #### Examples can be exported to HTML as text areas You can now specify a -t switch to an example or src block, to make it export to HTML as a text area. To change the defaults for height (number of lines in the example) and width of this area (80), use the -h and -w switches. Thanks to Ulf Stegemann for driving this development. #### LaTeX_CLASS can be given as a property When exporting a single subtree by selecting it as a region before export, the LaTeX class for the export will be taken from the LaTeX_CLASS property of the entry if present. Thanks to Robert Goldman for this request. #### Better handling of inlined images in different backends Two new variables govern which kind of files can be inlined during export. These are org-export-html-inline-image-extensions and org-export-latex-inline-image-extensions. Remember that links are turned into an inline image if they are a pure link with no description. HTML files can inline .png, .jpg, and .gif files, while LaTeX files, when processed with pdflatex, can inline .png, .jpg, and .pdf files. These also represent the default settings for the new variables. Note that this means that pure links to .pdf files will be inlined - to avoid this for a particular link, make sure that the link has a description part which is not equal to the link part. #### Links by ID now continue to work in HTML exported files If you make links by ID, these links will now still work in HTML exported files, provided that you keep the relative path from link to target file the same. Thanks to Friedrich Delgado Friedrichs for pushing this over the line. #### The relative timer can be paused The new command C-c C-x ,' will pause the relative timer. When the relative timer is running, its value will be shown in the mode line. To get rid of this display, you need to really stop the timer with C-u C-c C-x ,'. Thanks to Alan Davis for driving this change. #### The attachment directory may now be chosen by the user Instead of using the automatic, unique directory related to the entry ID, you can also use a chosen directory for the attachments of an entry. This directory is specified by the ATTACH_DIR property. You can use C-c C-a s' to set this property. Thanks to Jason Jackson for this proposal. #### You can use a single attachment directory for a subtree By setting the property ATTACH_DIR_INHERIT, you can now tell Org that children of the entry should use the same directory for attachments, unless a child explicitly defines its own directory with the ATTACH_DIR property. You can use the command C-c C-a i' to set this property in an entry. ## Version 6.17 ### Overview • Footnote support • Line numbers and references in literal examples • New hooks for export preprocessing • Capture column view into a different file ### Details #### Footnote support Org mode now directly supports the creation of footnotes. In contrast to the footnote.el package, Org mode's footnotes are designed for work on a larger document, not only for one-off documents like emails. The basic syntax is similar to the one used by footnote.el, i.e. a footnote is defined in a paragraph that is started by a footnote marker in square brackets in column 0, no indentation allowed. The footnote reference is simply the marker in square brackets inside text. For example: The Org homepage[fn:1] now looks a lot better than it used to. ... Org mode extends the number-based syntax to named footnotes and optional inline definition. Using plain numbers as markers is supported for backward compatibility, but not encouraged because of possible conflicts with LaTeX syntax. Here are the valid references: 1 A plain numeric footnote marker. 2 A named footnote reference, where name' is a unique label word or, for simplicity of automatic creation, a number. 3 A LaTeX-like anonymous footnote where the definition is given directly at the reference point. 2 An inline definition of a footnote, which also specifies a name for the note. Since Org allows multiple references to the same note, you can then use use 2' to create additional references. Footnote labels can be created automatically, or you create names yourself. This is handled by the variable org-footnote-auto-label and its corresponding #+STARTUP keywords, see the docstring of that variable for details. The following command handles footnotes: C-c C-x f The footnote action command. When the cursor is on a footnote reference, jump to the definition. When it is at a definition, jump to the (first) reference. Otherwise, create a new footnote. Depending on the variable org-footnote-define-inline' (with associated #+STARTUP options fninline and nofninline), the definitions will be placed right into the text as part of the reference, or separately into the location determined by the variable org-footnote-section. When this command is called with a prefix argument, a menu of additional options is offered: s Sort the footnote definitions by reference sequence. During editing, Org makes no effort to sort footnote definitions into a particular sequence. If you want them sorted, use this command, which will also move entries according to org-footnote-section. n Normalize the footnotes by collecting all definitions (including inline definitions) into a special section, and then numbering them in sequence. The references will then also be numbers. This is meant to be the final step before finishing a document (e.g. sending off an email). The exporters do this automatically, and so could something like message-send-hook'. d Delete the footnote at point, and all references to it. C-c C-c If the cursor is on a footnote reference, jump to the definition. If it is a the definition, jump back to the reference. When called with a prefix argument at either location, offer the same menu as C-u C-c C-x f'. C-c C-o or mouse-1/2 Footnote labels are also links to the corresponding definition/reference, and you can use the usual commands to follow these links. Org mode's footnote support is designed so that it should also work in buffers that are not in Org mode, for example in email messages. Just bind org-footnote-action to a global key like C-c f. The main trigger for this development came from a hook function written by Paul Rivier, to implement named footnotes and to convert them to numbered ones before export. Thanks, Paul! Thanks also to Scot Becker for a thoughtful post bringing this subject back onto the discussion table, and to Matt Lundin for the idea of named footnotes and his prompt testing of the new features. #### Line numbers and references in literal examples Literal examples introduced with #+BEGIN_EXAMPLE or #+BEGIN_SRC do now allow optional line numbering in the example. Furthermore, links to specific code lines are supported, greatly increasing Org mode's utility for writing tutorials and other similar documents. Code references use special labels embedded directly into the source code. Such labels look like "(ref:name)" and must be unique within a document. Org mode links with "(name)" in the link part will be correctly interpreted, both while working with an Org file (internal links), and while exporting to the different backends. Line numbering and code references are supported for all three major backends, HTML, LaTeX, and ASCII. In the HTML backend, hovering the mouse over a link to a source line will remote-highlight the referenced code line. The options for the BEGIN lines are: -n Number the lines in the example +n Like -n, but continue numbering from where the previous example left off. -r Remove the coderef cookies from the example, and replace links to this reference with line numbers. This option takes only effect if either -n or +n are given as well. If -r is not given, coderefs simply use the label name. -l "fmt" Define a local format for coderef labels, see the variable org-coderef-label-format for details. Use this of the default syntax causes conflicts with the code in the code snippet you are using. Here is an example: #+begin_src emacs-lisp -n -r ,(defmacro org-unmodified (&rest body) (def) , "Execute body without changing buffer-modified-p'." , (set-buffer-modified-p (back) , (prog1 (buffer-modified-p) ,@body))) #+end_src , ,[[(def)][Line (def)]] contains the macro name. Later at line ,[[(back)]], backquoting is used. When exported, this is translated to: 1: (defmacro org-unmodified (&rest body) 2: "Execute body without changing buffer-modified-p'." 3: (set-buffer-modified-p 4: (prog1 (buffer-modified-p) ,@body))) Line 2 contains the macro name. Later at line 4, backquoting is used. Thanks to Ilya Shlyakhter for proposing this feature set. Thanks to Sebastian Rose for the key Javascript element that made the remote highlighting possible. #### New hooks for export preprocessing The export preprocessor now runs more hooks, to allow better-timed tweaking by user functions: org-export-preprocess-hook Pretty much the first thing in the preprocessor. But org-mode is already active in the preprocessing buffer. org-export-preprocess-after-include-files-hook This is run after the contents of included files have been inserted. org-export-preprocess-after-tree-selection-hook This is run after selection of trees to be exported has happened. This selection includes tags-based selection, as well as removal of commented and archived trees. org-export-preprocess-before-backend-specifics-hook Hook run before backend-specific functions are called during preprocessing. org-export-preprocess-final-hook Hook for preprocessing an export buffer. This is run as the last thing in the preprocessing buffer, just before returning the buffer string to the backend. #### Capture column view into a different file The :id parameter for the dynamic block capturing column view can now truly be an ID that will also be found in a different file. Also, it can be like file:path/to/file, to capture the global column view from a different file. Thanks to Francois Lagarde for his report that IDs outside the current file would not work. ## Version 6.16 Cleanup of many small bugs, and one new feature. ### Details #### References to last table row with special names Fields in the last row of a table can now be referenced with $LR1,$LR2, etc. These references can appear both on the left hand side and right hand side of a formula. ## Version 6.15f This version reverses the introduction of @0 as a reference to the last rwo in a table, because of a conflict with the use of @0 for the current row. ## Version 6.15 ### Overview • All known LaTeX export issues fixed • Captions and attributes for figures and tables. • Better implementation for entry IDs • Spreadsheet references to the last table line. • Old syntax for link attributes abandoned ### Incompatible changes #### Old syntax for link attributes abandoned There used to be a syntax for setting link attributes for HTML export by enclosing the attributes into double braces and adding them to the link itself, like [[./img/a.jpg{{alt="an image"}}] ] This syntax is not longer supported, use instead #+ATTR_HTML: alt="an image" ,./img/a.jpg ### Details #### All known LaTeX export issues fixed All the remaining issues with the LaTeX exporter have hopefully been addressed in this release. In particular, this covers quoting of special characters in tables and problems with exporting files where the headline is in the first line, or with an active region. #### Captions and attributes for figures and tables. Tables, and Hyperlinks that represent inlined images, can now be equipped with additional information that will be used during export. The information will be taken from the following special lines in the buffer and apply to the first following table or link. #+CAPTION: The caption of the image or table. This string should be processed according to the export backend, but this is not yet done. #+LABEL: A label to identify the figure/table for cross references. For HTML export, this string will become the ID for the <div class="figure"> element that encapsulates the image tag and the caption. For LaTeX export, this string will be used as the argument of a \label{...} macro. These labels will be available for internal links like [[label][Table] ]. #+ATTR_HTML: Attributes for HTML export of image, to be added as attributes into the <img...> tag. This string will not be processed, so it should have immediately the right format. #+ATTR_LaTeX: Attributes for LaTeX export of images and tables. For images, this string is directly inserted into the optional argument of the \includegraphics[...]{file} command, to specify scaling, clipping and other options. This string will not be processed, so it should have immediately the right format, like width=5cm,angle=90.\\ For tables, this can currently contain the keyword longtable, to request typesetting of the table using the longtable package, which automatically distributes the table over several pages if needed. Also, the attributes line may contain an alignment string for the tabular environment, like longtable,align=l|lrl For LaTeX export, if either a caption or a label is given, the element will be exported as a float, i.e. wrapped into a figure or table environment. #### Better implementation for entry IDs Unique identifiers for entries can now be used more efficiently. Internally, a hash array has replaced the alist used so far to keep track of the files in which an ID is defined. This makes it quite fast to find an entry by ID. There is a new link type which looks like this: id:GLOBALLY-UNIQUE-IDENTIFIER This link points to a specific entry. When you move the entry to a different file, for example if you move it to an archive file, the link will continue to work. The file org-id.el contains an API that can be used to write code using these identifiers, including creating IDs and finding them wherever they are. Org has its own method to create unique identifiers, but if the system has uuidgen command installed (Mac's and Linux systems generally do), it will be used by default (a change compared to the earlier implmentation, where you explicitdly had to opt for uuidgen). You can also select the method by hand, using the variable org-id-method. If the ID system ever gets confused about where a certain ID is, it initiates a global scan of all agenda files with associated archives, all files previously known containing any IDs, and all currently visited Org mode files to rebuild the hash. You can also initiate this by hand: M-x org-id-update-id-locations. Running this command will also dump into the *Messages* buffer information about any duplicate IDs. These should not exist, and Org will never make the same ID twice, but if you copy an entry with its properties, duplicate IDs will inevitably be produced. Unfortunately, this is unavoidable in a plain text system that allows you to edit the text in arbitrary ways, and a portion of care on your side is needed to keep this system clean. The hash is stored in the file ~/.emacs.d/.org-id-locations. This is also a change from previous versions where the file was ~/.org=id-locations. Therefore, you can remove this old file if you have it. I am not sure what will happen if the .emacs.d directory does not exists in your setup, but in modern Emacsen, I believe it should exist. If you do not want to use IDs across files, you can avoid the overhead with tracking IDs by customizing the variable org-id-track-globally. IDs can then still be used for links inside a single file. IDs will also be used when you create a new link to an Org mode buffer. If you use org-store-link (normally at C-c l) inside en entry in an Org mode buffer, and ID property will be created if it does not exist, and the stored link will be an id: link. If you prefer the much less secure linking to headline text, you can configure the variable org-link-to-org-use-id. The default setting for this variable is create-if-interactive, meaning that an ID will be created when you store a link interactively, but not if you happen to be in an Org mode file while you create a remember note (which usually has a link to the place where you were when starting remember). #### Spreadsheet references to the last table line. You may now use @0 to reference the last dataline in a table in a stable way. This is useful in particular for automatically generated tables like the ones using org-collector.el by Eric Schulte. ## Version 6.14 ### Overview • New relative timer to support timed notes • Special faces can be set for individual tags • The agenda shows now all tags, including inherited ones. • Exclude some tags from inheritance. • More special values for time comparisons in property searches • Control for exporting meta data • Cut and Paste with hot links from w3m to Org • LOCATION can be inherited for iCalendar export • Relative row references crossing hlines now throw an error ### Incompatible Changes #### Relative row references crossing hlines now throw an error Relative row references in tables look like this: "@-4" which means the forth row above this one. These row references are not allowed to cross horizontal separator lines (hlines). So far, when a row reference violates this policy, Org would silently choose the field just next to the hline. Tassilo Horn pointed out that this kind of hidden magic is actually confusing and may cause incorrect formulas, and I do agree. Therefore, trying to cross a hline with a relative reference will now throw an error. If you need the old behavior, customize the variable org-table-error-on-row-ref-crossing-hline. ### Details #### New relative timer to support timed notes Org now supports taking timed notes, useful for example while watching a video, or during a meeting which is also recorded. C-c C-x . Insert a relative time into the buffer. The first time you use this, the timer will be started. When called with a prefix argument, the timer is reset to 0. C-c C-x - Insert a description list item with the current relative time. With a prefix argument, first reset the timer to 0. M-RET Once the time list has been initiated, you can also use the normal item-creating command to insert the next timer item. C-c C-x 0 Reset the timer without inserting anything into the buffer. By default, the timer is reset to 0. When called with a C-u prefix, reset the timer to specific starting offset. The user is prompted for the offset, with a default taken from a timer string at point, if any, So this can be used to restart taking notes after a break in the process. When called with a double prefix argument C-c C-u, change all timer strings in the active region by a certain amount. This can be used to fix timer strings if the timer was not started at exactly the right moment. Thanks to Alan Dove, Adam Spiers, and Alan Davis for contributions to this idea. #### Special faces can be set for individual tags You may now use the variable org-tag-faces to define the face used for specific tags, much in the same way as you can do for TODO keywords. Thanks to Samuel Wales for this proposal. #### The agenda shows now all tags, including inherited ones. This request has come up often, most recently it was formulated by Tassilo Horn. If you prefer the old behavior of only showing the local tags, customize the variable org-agenda-show-inherited-tags. #### Exclude some tags from inheritance. So far, the only way to select tags for inheritance was to allow it for all tags, or to do a positive selection using one of the more complex settings for org-use-tag-inheritance'. It may actually be better to allow inheritance for all but a few tags, which was difficult to achieve with this methodology. A new option, org-tags-exclude-from-inheritance', allows to specify an exclusion list for inherited tags. #### More special values for time comparisons in property searches In addition to <now>, <today>, <yesterday>, and <tomorrow>, there are more special values accepted now in time comparisons in property searches: You may use strings like <+3d> or <-2w>, with units d, w, m, and y for day, week, month, and year, respectively Thanks to Linday Todd for this proposal. #### Control for exporting meta data All the metadata in a headline, i.e. the TODO keyword, the priority cookie, and the tags, can now be excluded from export with appropriate options: Variable Publishing property OPTIONS switch org-export-with-todo-keywords :todo-keywords todo: org-export-with-tags :tags tags: org-export-with-priority :priority pri: #### Cut and Paste with hot links from w3m to Org You can now use the key C-c C-x M-w in a w3m buffer with HTML content to copy either the region or the entire file in a special way. When you yank this text back into an Org mode buffer, all links from the w3m buffer will continue to work under Org mode. For this to work you need to load the new file org-w3m.el. Please check your org-modules variable to make sure that this is turned on. Thanks for Richard Riley for the idea and to Andy Stewart for the implementation. #### LOCATION can be inherited for iCalendar export The LOCATION property can now be inherited during iCalendar export if you configure org-use-property-inheritance like this: (setq org-use-property-inheritance '("LOCATION")) ## Version 6.13 ### Overview • Keybindings in Remember buffers can be configured • Support for ido completion • New face for date lines in agenda column view • Invisible targets become now anchors in headlines. • New contributed file org-exp-blocks.el • New contributed file org-eval-light.el • BBDB links may use regular expressions. • Link abbreviations can use %h to insert a url-encoded target value • Improved XHTML compliance ### Details #### Keybindings in Remember buffers can be configured The remember buffers created with Org's extensions are in Org mode, which is nice to prepare snippets that will actually be stored in Org mode files. However, this makes it hard to configure key bindings without modifying the Org mode keymap. There is now a minor mode active in these buffers, org-remember-mode', and its keymap org-remember-mode-map can be used for key bindings. By default, this map only contains the bindings for C-c C-c to store the note, and C-c C-k to abort it. Use org-remember-mode-hook' to define your own bindings like (add-hook 'org-remember-mode-hook (lambda () (define-key org-remember-mode-map "\C-x\C-s" 'org-remember-finalize))) If you wish, you can also use this to free the C-c C-c binding (by binding this key to nil in the minor mode map), so that you can use C-c C-c again to set tags. This modification is based on a request by Tim O'Callaghan. #### Support for ido completion You can now get the completion interface from ido.el for many of Org's internal completion commands by turning on the variable org-completion-use-ido. ido-mode must also be active before you can use this. This change is based upon a request by Samuel Wales. #### New face for date lines in agenda column view When column view is active in the agenda, and when you have summarizing properties, the date lines become normal column lines and the separation between different days becomes harder to see. If this bothers you, you can now customize the face org-agenda-column-dateline. This is based on a request by George Pearson. #### Invisible targets become now anchors in headlines. These anchors can be used to jump to a directly with an HTML link, just like the sec-xxx IDs. For example, the following will make a http link //domain/path-to-my-file.html#dummy work: ,# <<dummy>> This is based on a request by Matt Lundin. #### New contributed file org-exp-blocks.el This new file implements special export behavior of user-defined blocks. The currently supported blocks are comment Comment blocks with author-specific markup ditaa conversion of ASCII art into pretty png files using Stathis Sideris' ditaa.jar program dot creation of graphs in the dot language R Sweave type exporting using the R program For more details and examples, see the file commentary in org-exp-blocks.el. Kudos to Eric Schulte for this new functionality, after org-plot.el already his second major contribution. Thanks to Stathis for this excellent program, and for allowing us to bundle it with Org mode. #### New contributed file org-eval-light.el This module gives control over execution Emacs Lisp code blocks included in a file. Thanks to Eric Schulte also for this file. You can now configure Org to understand many links created with the Emacs Planner package, so you can cut text from planner pages and paste them into Org mode files without having to re-write the links. Among other things, this means that the command org-open-at-point-global which follows links not only in Org mode, but in arbitrary files like source code files etc, will work also with links created by planner. The following customization is needed to make all of this work (setq org-link-translation-function I guess an inverse translator could be written and integrated into Planner. #### BBDB links may use regular expressions. This did work all along, but only now I have documented it. #### yank-pop works again after yanking an outline tree Samuel Wales had noticed that org-yank did mess up this functionality. Now you can use yank-pop again, the only restriction is that the so-yanked text will not be pro/demoted or folded. #### Link abbreviations can use %h to insert a url-encoded target value Thanks to Steve Purcell for a patch to this effect. #### Improved XHTML compliance Thanks to Sebastian Rose for pushing this. ## Version 6.12 ### Overview • A region of entries can now be refiled with a single command • Fine-tuning the behavior of org-yank' • Formulas for clocktables • Better implementation of footnotes for HTML export • More languages for HTML export. ### Details #### A region of entries can now be refiled with a single command With transient-make-mode active (zmacs-regions under XEmacs), you can now select a region of entries and refile them all with a single C-c C-w command. Thanks to Samuel Wales for this useful proposal. #### Fine-tuning the behavior of org-yank The behavior of Org's yanking command has been further fine-tuned in order to avoid some of the small annoyances this command caused. • Calling org-yank with a prefix arg will stop any special treatment and directly pass through to the normal yank command. Therefore, you can now force a normal yank with C-u C-y. • Subtrees will only be folded after a yank if doing so will now swallow any non-white characters after the yanked text. This is, I think a really important change to make the command work more sanely. #### Formulas for clocktables You can now add formulas to a clock table, either by hand, or with a :formula parameter. These formulas can be used to create additional columns with further analysis of the measured times. Thanks to Jurgen Defurne for triggering this addition. #### Better implementation of footnotes for HTML export The footnote export in 6.11 really was not good enough. Now it works fine. If you have customized footnote-section-tag, make sure that your customization is matched by footnote-section-tag-regexp. Thanks to Sebastian Rose for pushing this change. #### More languages for HTML export. More languages are supported during HTML export. This is only relevant for the few special words Org inserts, like "Table of Contents", or "Footnotes". Also the encoding issues with this feature seem to be solved now. Thanks to Sebastian Rose for pushing me to fix the encoding problems. ## Version 6.11 ### Overview • Yanking subtree with C-y now adjusts the tree level • State changes can now be shown in the log mode in the agenda • Footnote in HTML export are now collected at the end of the document • HTML export now validates again as XHTML • The clock can now be resumed after exiting and re-starting Emacs • Clock-related data can be saved and resumed across Emacs sessions • Following file links can now use C-u C-u to force use of an external app • Inserting absolute files names now abbreviates links with "~" • Completed repeated tasks listed briefly in agenda • Remove buffers created during publishing are removed ### Details #### Yanking subtree with C-y now adjusts the tree level When yanking a cut/copied subtree or a series of trees, the normal yank key C-y now adjusts the level of the tree to make it fit into the current outline position, without losing its identity, and without swallowing other subtrees. This uses the command org-past-subtree. An additional change in that command has been implemented: Normally, this command picks the right outline level from the surrounding visible headlines, and uses the smaller one. So if the cursor is between a level 4 and a level 3 headline, the tree will be pasted as level 3. If the cursor is actually at the beginning of a headline, the level of that headline will be used. For example, lets say you have a tree like this: * Level one ** Level two ,(1) ,(2)* Level one again with (1) and (2) indicating possible cursor positions for the insertion. When at (1), the tree will be pasted as level 2. When at (2), it will be pasted as level 1. If you do not want C-y to behave like this, configure the variable org-yank-adjusted-subtrees. Thanks to Samuel Wales for this idea and a partial implementation. #### State changes can now be shown in the log mode in the agenda If you configure the variable org-agenda-log-mode-items, you can now request that all logged state changes be included in the agenda when log mode is active. If you find this too much for normal applications, you can also temporarily request the inclusion of state changes by pressing C-u l in the agenda. This was a request by Hsiu-Khuern Tang. You can also press C-u C-u l' to get only log items in the agenda, withour any timestamps/deadlines etc. #### Footnote in HTML export are now collected at the end of the document Previously, footnotes would be left in the document where they are defined, now they are all collected and put into a special <div> at the end of the document. Thanks to Sebastian Rose for this request. #### HTML export now validates again as XHTML. Thanks to Sebastian Rose for pushing this cleanup. #### The clock can now be resumed after exiting and re-starting Emacs If the option org-clock-in-resume is t, and the first clock line in an entry is unclosed, clocking into that task resumes the clock from that time. Thanks to James TD Smith for a patch to this effect. #### Clock-related data can be saved and resumed across Emacs sessions The data saved include the contents of org-clock-history, and the running clock, if there is one. (setq org-clock-persist t) (setq org-clock-in-resume t) (org-clock-persistence-insinuate) Thanks to James TD Smith for a patch to this effect. #### Following file links can now use C-u C-u to force use of an external app. So far you could only bypass your setup in org-file-apps' and force opening a file link in Emacs by using a C-u prefix arg with C-c C-o. Now you can call C-u C-u C-c C-o to force an external application. Which external application depends on your system. On Mac OS X and Windows, open is used. On a GNU/Linux system, the mailcap settings are used. This was a proposal by Samuel Wales. #### Inserting absolute files names now abbreviates links with "~". Inserting file links with C-u C-c C-l was buggy if the setting of org-link-file-path-type' was adaptive' (the default). Absolute file paths were not abbreviated relative to the users home directory. This bug has been fixed. Thanks to Matt Lundin for the report. Even though one of the purposes of entry attachments was to reduce the number of links in an entry, one might still want to have the occasional link to one of those files. You can now use link abbreviations to set up a special link type that points to attachments in the current entry. Note that such links will only work from within the same entry that has the attachment, because the directory path is entry specific. Here is the setup you need: (setq org-link-abbrev-alist '(("att" . org-attach-expand-link))) After this, a link like this will work [[att:some-attached-file.txt]] This was a proposal by Lindsay Todd. #### Completed repeated tasks listed briefly in agenda When a repeating task, listed in the daily/weekly agenda under today's date, is completed from the agenda, it is listed as DONE in the agenda until the next update happens. After the next update, the task will have disappeared, of course, because the new date is no longer today. #### Remove buffers created during publishing are removed Buffers that are created during publishing are now deleted when the publishing is over. At least I hope it works like this. ## Version 6.10 ### Overview • Secondary agenda filtering is becoming a killer feature • Setting tags has now its own binding, C-c C-q • Todo state changes can trigger tag changes • C-RET will now always insert a new headline, never an item. • Customize org-mouse.el feature set to free up mouse events • New commands for export all the way to PDF (through LaTeX) • Some bug fixed for LaTeX export, more bugs remain. ### Details #### Enhancements to secondary agenda filtering This is, I believe, becoming a killer feature. It allows you to define fewer and more general custom agenda commands, and then to do the final narrowing to specific tasks you are looking for very quickly, much faster than calling a new agenda command. If you have not tries this yet, you should! • You can now refining the current filter by an additional criterion When filtering an existing agenda view with /, you can now narrow down the existing selection by an additional condition. Do do this, use \ instead of / to add the additional criterion. You can also press + or - after / to add a positive or negative condition. A condition can be a TAG, or an effort estimate limit, see below. • It is now possible to filter for effort estimates This means to filter the agenda for the value of the Effort property. For this you should best set up global allowed values for effort estimates, with (setq org-global-properties '(("Effort_ALL" . "0 0:10 0:30 1:00 2:00 3:00 4:00"))) You may then select effort limits with single keys in the filter. It works like this: After / or \, first select the operator which you want to use to compare effort estimates: < Select entries with effort smaller than or equal to the limit > Select entries with effort larger than or equal to the limit = Select entries with effort equal to the limit After that, you can press a single digit number which is used as an index to the allowed effort estimates. If you do not use digits to fast-select tags, you can even skip the operator, which will then default to org-agenda-filter-effort-default-operator', which is by default <. Thanks to Manish for the great idea to include fast effort filtering into the agenda filtering process. • The mode line will show the active filter For example, if there is a filter in place that does select for HOME tags, against EMAIL tags, and for tasks with an estimated effort smaller than 30 minutes, the mode-line with show +HOME-EMAIL+<0:30 • The filter now persists when the agenda view is refreshed All normal refresh commands, including those that move the weekly agenda from one week to the next, now keep the current filter in place. You need to press / / to turn off the filter. However, when you run a new agenda command, for example going from the weekly agenda to the TODO list, the filter will be switched off. #### Setting tags has now its own binding, C-c C-q You can still use C-c C-c on a headline, but the new binding should be considered as the main binding for this command. The reasons for this change are: • Using C-c C-c for tags is really out of line with other uses of C-c C-c. • I hate it in Remember buffers when I try to set tags and I cannot, because C-c C-c exits the buffer :-( • C-c C-q will also work when the cursor is somewhere down in the entry, it does not have to be on the headline. #### Todo state changes can trigger tag changes The new option org-todo-state-tags-triggers can be used to define automatic changes to tags when a TODO state changes. For example, the setting (setq org-todo-state-tags-triggers '((done ("Today" . nil) ("NEXT" . nil)) ("WAITING" ("Today" . t)))) will make sure that any change to any of the DONE states will remove tags "Today" and "NEXT", while switching to the "WAITING" state will trigger the tag "Today" to be added. I use this mostly to get rid of TODAY and NEXT tags which I apply to select an entry for execution in the near future, which I often prefer to specific time scheduling. #### C-RET will now always insert a new headline, never an item. The new headline is inserted after the current subtree. Thanks to Peter Jones for patches to fine-tune this behavior. #### Customize org-mouse.el feature set There is a new variable org-mouse-features which gives you some control about what features of org-mouse you want to use. Turning off some of the feature will free up the corresponding mouse events, or will avoid activating special regions for mouse clicks. By default I have urned off the feature to use drag mouse events to move or promote/demote entries. You can of course turn them back on if you wish. This variable may still change in the future, allowing more fine-grained control. #### New commands for export to PDF This is using LaTeX export, and then processes it to PDF using pdflatex. C-c C-e p process to PDF. C-c C-e d process to PDF, and open the file. #### LaTeX export • \usepackage{graphicx} is now part of the standard class definitions. • Several bugs fixed, but definitely not all of them :-( #### New option org-log-state-notes-insert-after-drawers' Set this to t if you want state change notes to be inserted after any initial drawers, i.e drawers the immediately follow the headline and the planning line (the one with DEADLINE/SCHEDULED/CLOSED information). ## Version 6.09 ### Details #### org-file-apps now uses regular repressions instead of extensions Just like in auto-mode-alist, car's in the variable org-file-apps that are strings are now interpreted as regular expressions that are matched against a file name. So instead of "txt", you should now write "\\.txt\\'" to make sure the matching is done correctly (even though "txt" will be recognized and still be interpreted as an extension). There is now a shortcut to get many file types visited by Emacs. If org-file-apps contains (auto-mode . emacs)', then any files that are matched by auto-mode-alist' will be visited in emacs. #### Changes to the attachment system • The default method to attach a file is now to copy it instead of moving it. • You can modify the default method using the variable org-attach-method'. I believe that most Unix people want to set it to ln' to create hard links. • The keys c, m, and l specifically select copy, move, or link, respectively, as the attachment method for a file, overruling org-attach-method'. • To create a new attachment as an Emacs buffer, you have not now use n instead of c. • The file list is now always retrieved from the directory itself, not from the "Attachments" property. We still keep this property by default, but you can turn it off, by customizing the variable org-attach-file-list-property. ## Version 6.08 ### Incompatible changes • Changes in the structure of IDs, see here for details. • C-c C-a has been redefined, see here for details. ### Details #### The default structure of IDs has changed IDs created by Org have changed a bit: • By default, there is no prefix on the ID. There used to be an "Org" prefix, but I now think this is not necessary. • IDs use only lower-case letters, no upper-case letters anymore. The reason for this is that IDs are now also used as directory names for org-attach, and some systems do not distinguish upper and lower case in the file system. • The ID string derived from the current time is now reversed to become an ID. This assures that the first two letters of the ID change fast, so hat it makes sense to split them off to create subdirectories to balance load. • You can now set the org-id-method' to uuidgen' on systems which support it. #### C-c C-a no longer calls show-all' The reason for this is that C-c C-a is now used for the attachment system. On the rare occasions that this command is needed, use M-x show-all, or C-u C-u C-u TAB. #### New attachment system You can now attach files to each node in the outline tree. This works by creating special directories based on the ID of an entry, and storing files in these directories. Org can keep track of changes to the attachments by automatically committing changes to git. See the manual for more information. Thanks to John Wiegley who contributed this fantastic new concept and wrote org-attach.el to implement it. #### New remember template escapes %^{prop}p to insert a property %k the heading of the item currently being clocked %K a link to the heading of the item currently being clocked Also, when you exit remember with C-2 C-c C-c, the item will be filed as a child of the item currently being clocked. So the idea is, if you are working on something and think of a new task related to this or a new note to be added, you can use this to quickly add information to that task. Thanks to James TD Smith for a patch to this effect. #### Clicking with mouse-2 on clock info in mode-line visits the clock. Thanks to James TD Smith for a patch to this effect. #### New file in contrib: lisp/org-checklist.el This module deals with repeated tasks that have checkbox lists below them. Thanks to James TD Smith for this contribution. #### New in-buffer setting #+STYLE It can be used to locally set the variable org-export-html-style-extra'. Several such lines are allowed-, they will all be concatenated. For an example on how to use it, see the publishing tutorial. ## Version 6.07 ### Overview • Filtering existing agenda views with respect to a tag • Editing fixed-width regions with picture or artist mode • org-plot.el is now part of Org • Tags can be used to select the export part of a document • Prefix interpretation when storing remember notes • Yanking inserts folded subtrees • Column view capture tables can have formulas, plotting info • In column view, date stamps can be changed with S-cursor keys • The note buffer for clocking out now mentions the task • Sorting entries alphabetically ignores TODO keyword and priority • Agenda views can sort entries by TODO state • New face org-scheduled for entries scheduled in the future. • Remember templates for gnus links can use the :to escape. • The file specification in a remember template may be a function • Categories in iCalendar export include local tags • It is possible to define filters for column view • Disabling integer increment during table Field copy • Capturing column view is on C-c C-x i' • And tons of bugs fixed. ### Incompatible changes #### Prefix interpretation when storing remember notes has changed The prefix argument to the C-c C-c' command that finishes a remember process is now interpreted differently: C-c C-c Store the note to predefined file and headline C-u C-c C-c Like C-c C-c, but immediately visit the note in its new location. C-1 C-c C-c Select the storage location interactively C-0 C-c C-c Re-use the last used location This was requested by John Wiegley. #### Capturing column view is now on C-c C-x i' The reason for this change was that C-c C-x r' is also used as a tty key replacement. #### Categories in iCalendar export now include local tags The locally defined tags are now listed as categories when exporting to iCalendar format. Org's traditional file/tree category is now the last category in this list. Configure the variable org-icalendar-categories to modify or revert this behavior. This was a request by Charles Philip Chan. ### Details #### Secondary filtering of agenda views. You can now easily and interactively filter an existing agenda view with respect to a tag. This command is executed with the / key in the agenda. You will be prompted for a tag selection key, and all entries that do not contain or inherit the corresponding tag will be hidden. With a prefix argument, the opposite filter is applied: entries that do have the tag will be hidden. This operation only hides lines in the agenda buffer, it does not remove them. Changing the secondary filtering does not require a new search and is very fast. If you press TAB at the tag selection prompt, you will be switched to a completion interface to select a tag. This is useful when you want to select a tag that does not have a direct access character. A double / / will restore the original agenda view by unhiding any hidden lines. This functionality was John Wiegley's idea. It is a simpler implementation of some of the query-editing features proposed and implemented some time ago by Christopher League (see the file contrib/lisp/org-interactive-query.el). #### Editing fixed-width regions with picture or artist mode The command @<code>C-c '@</code> (that is C-c followed by a single quote) can now also be used to switch to a special editing mode for fixed-width sections. The default mode is artist-mode which allows you to create ASCII drawings. It works like this: Enter the editing mode with @<code>C-c '@</code>. An indirect buffer will be created and narrowed to the fixed-width region. Edit the drawing, and press @<code>C-c '@</code> again to exit. Lines in a fixed-width region should be preceded by a colon followed by at least one space. These will be removed during editing, and then added back when you exit the editing mode. Using the command in an empty line will create a new fixed-width region. This new feature arose from a discussion involving Scott Otterson, Sebastian Rose and Will Henney. #### org-plot.el is now part of Org. You can run it by simple calling org-plot/gnuplot. Documentation is not yet included with Org, please refer to http://github.com/eschulte/org-plot/tree/master until we have moved the docs into Org or Worg. Thanks to Eric Schulte for this great contribution. #### Tags can be used to select the export part of a document You may now use tags to select parts of a document for inclusion into the export, and to exclude other parts. This behavior is governed by two new variables: org-export-select-tags and org-export-exclude-tags. These default to ("export") and ("noexport"), but can be changed, even to include a list of several tags. Org first checks if any of the select tags is present in the buffer. If yes, all trees that do not carry one of these tags will be excluded. If a selected tree is a subtree, the heading hierarchy above it will also be selected for export, but not the text below those headings. If none of the select tags is found anywhere in the buffer, the whole buffer will be selected for export. Finally, all subtrees that are marked by any of the exclude tags will be removed from the export buffer. You may set these tags with in-buffer options EXPORT_SELECT_TAGS and EXPORT_EXCLUDE_TAGS. I love this feature. Thanks to Richard G Riley for coming up with the idea. #### Prefix interpretation when storing remember notes The prefix argument to the C-c C-c' command that finishes a remember process is now interpreted differently: C-c C-c Store the note to predefined file and headline C-u C-c C-c Like C-c C-c, but immediately visit the note in its new location. C-1 C-c C-c Select the storage location interactively C-0 C-c C-c Re-use the last used location This was requested by John Wiegley. #### Yanking inserts folded subtrees If the kill is a subtree or a sequence of subtrees, yanking them with C-y will leave all the subtrees in a folded state. This basically means, that kill and yank are now much more useful in moving stuff around in your outline. If you do not like this, customize the variable org-yank-folded-subtrees. Right now, I am only binding C-y to this new function, should I modify all bindings of yank? Do we need to amend yank-pop as well? This feature was requested by John Wiegley. #### Column view capture tables can have formulas, plotting info If you attach formulas and plotting instructions to a table capturing column view, these extra lines will now survive an update of the column view capture, and any formulas will be re-applied to the captured table. This works by keeping any continuous block of comments before and after the actual table. #### In column view, date stamps can be changed with S-cursor keys If a property value is a time stamp, S-left and S-right can now be used to shift this date around while in column view. This was a request by Chris Randle. #### The note buffer for clocking out now mentions the task This was a request by Peter Frings. #### Sorting entries alphabetically ignores TODO keyword and priority Numerical and alphanumerical sorting now skips any TODO keyword or priority cookie when constructing the comparison string. This was a request by Wanrong Lin. #### Agenda views can sort entries by TODO state You can now define a sorting strategy for agenda entries that does look at the TODO state of the entries. Sorting by TODO entry does first separate the non-done from the done states. Within each class, the entries are sorted not alphabetically, but in definition order. So if you have a sequence of TODO entries defined, the entries will be sorted according to the position of the keyword in this sequence. This follows an idea and sample implementation by Christian Egli. #### New face org-scheduled for entries scheduled in the future. This was a request by Richard G Riley. #### Remember templates for gnus links can now use the :to escape. Thanks to Tommy Lindgren for a patch to this effect. #### The file specification in a remember template may now be a function Thanks to Gregory Sullivan for a patch to this effect. #### Categories in iCalendar export now include local tags The locally defined tags are now listed as categories when exporting to iCalendar format. Org's traditional file/tree category is now the last category in this list. Configure the variable org-icalendar-categories to modify or revert this behavior. This was a request by Charles Philip Chan. #### It is now possible to define filters for column view The filter can modify the value that will be displayed in a column, for example it can cut out a part of a time stamp. For more information, look at the variable org-columns-modify-value-for-display-function. #### Disabling integer increment during table field copy Prefix arg 0 to S-RET does the trick. This was a request by Chris Randle. ## Version 6.06 ### Overview • New, more CSS-like setup for HTML style information • Attributes in hyperlinks, for example alt and title for images • Simplified way to specify file links • Modified behavior of time stamps in iCalendar export • New way to compare times during a property search • New option org-open-directory-means-index' • New parameters :prefix and :prefix1 for include files • New option :index-style for org-publish • New structure for the timestamp directory for org-publish. ### Incompatible changes #### New structure for the timestamp directory for org-publish. The timestamp directory now uses SHA1 hashed versions of the path to each publishing file. This should be a consistent and system-independent way to handle things. The change means that your next publishing command will publish each and every file again, but just once, until new time stamps are in place. ### Details #### New setup for HTML style information In order to create a more CSS-like setup of the HTML style information, the following changes have been made: • The default style has moved to a constant, org-export-html-style-default and should not be changed anymore. • The default of the variable org-export-html-style is now just the empty string. This variable should receive settings that are Org-wide. When using org-publish, this variable is associated with the :style property and can be used to establish project-wide settings. • There is a new variable org-export-html-style-extra that should be used for file-local settings. Org-publish can, if necessary, access this variable with the :style-extra property. • When a file is published, the values of • org-export-html-style-default • org-export-html-style • org-export-html-style-extra are all inserted into the HTML header, in the given sequence. This follows a proposal by Rustom Mody. You can now set attributes in hyperlinks that will be used when publishing to HTML. For example, if you want to use the ALT and TITLE attributes of an inlined image, here is who to do this: [[./img/a.jpg{{alt="This is image A" title="Image with no action"}}]] Thanks to Charles Chen for this idea. #### Simplified way to specify file links In a link, you can now leave out the "file:" prefix if you write an absolute file name like /Users/dominik/.emacs or ~/.emacs, or if you write a relative file name by using ./ or ../ to start the file path. You cannot write a plain file name, because plain text is interpreted as an internal link. So for example, a link to an image A.jpg with a thumbnail B.jpg can now be written like ,[[./A.jpg][./B.jpg] ] #### Changes in iCalendar export Deadline and scheduling time stamps are now treated differently in iCalendar export. The default behavior is now the following: • a DEADLINE that appears in an entry that is a TODO item is used as the item's DUE date. Therefore, such a deadline will no longer show up in the calendar. • a DEADLINE that appears in an item that is not a TODO item is exported as an EVENT and will show up in the calendar. • a SCHEDULED timestamp in a TODO item will be used as the items DTSTART. Therefore, such a timestamp will not show up in the calendar. • a SCHEDULED timestamp in an item that is not a TODO has no effect on iCalendar export at all. It will be ignored. Of course this would not be Emacs if you could not configure exactly what you want. Take a look at the variables org-icalendar-use-deadlines and org-icalendar-use-scheduled if you want to go back to the old behavior or even do something completely different. Thanks to Karen Cooke for triggering this change. #### New way to compare times during a property search If the comparison value in a property search is a string that is enclosed in angular brackets, a time comparison will be done. For example +DEADLINE>="<2008-12-24 15:20>" looks for entries with a deadline on or after that time. Special allowed values are "<now>" (with time) and "<today>" (date only). This is based on a request by Manish. #### New option org-open-directory-means-index' When set, a link pointing to a directory will actually open the index.org file in that directory. This is a good setting inside a publishing project. When not set, you get a finder/explorer window for that directory, or dired, depending on system and setup. This follows a request by Richard Riley. #### New parameters :prefix and :prefix1 for include files These parameters specify prefixes for each line of included text. :prefix1 is only for the first line, :prefix for all other lines. This follows a proposal by Richard Riley. #### New option :index-style for org-publish This option can be used to switch the style of the index produced by org-publish. Can be list' (index is just an itemized list of the titles of the files involved) or tree' (the directory structure of the source files is reflected in the index). The default is tree'. Thanks to Manuel Hermenegildo for the patch. #### In the Agenda, inclusion of archives can now be toggled • Pressing v will toggle inclusion of trees with the ARCHIVE tag, this includes obviously the archive sibling. • Pressing C-u v' will include trees with ARCHIVE tag, and will also include all archive files that are currently associated with your agenda files. This was triggered by a proposal by Manuel Hermenegildo. ## Version 6.05 If I were to name my releases, this one would be called "Adam". Adam, you definitely owe me a beer :-). And I owe you one, too - thanks for all the great ideas. ### Overview • Use cursor position in agenda for remember, scheduling and deadlines • New API for mapping a function over all or selected entries • Remember templates can be filed to beginning/end of a file • Visiting a filed remember buffer immediately • BBDB anniversaries are now links • Column view in the agenda now cleans the ITEM field • The format of section numbers in exported files is configurable • New hook to hack exported iCalendar files • Log mode in agenda now shows end time for CLOCK line ### Incompatible changes #### C-c C-x C-k' now calls org-mark-entry-for-agenda-action' It used to call org-cut-special, but that is also at bound to the key C-c C-x C-w. ### Details #### Making use of the cursor position in the agenda The date at the cursor in the agenda (and also in the calendar) can now be used to schedule entries, or to set the date in a remember template correctly. It is also designed to make it easier to move an entry to a date picked in the agenda. Thanks to Thomas Baumann for starting the thread that led to this development. • Calling remember with the cursor date in the agenda If you want to use the date at the agenda cursor in a remember template, start remember from the agenda with the keys k r. While the template is being filled in, the default date for all time stamps, and also for all interactive escapes like %^t is now the date at the cursor in the agenda. The exact same command can also be used from the calendar if you prefer that. • Picking a date for scheduling/deadline in the agenda You may now pick the date for scheduling an item or for setting a deadline in the agenda, where you have the best overview over free time slots. This is a two step process. 1. First you pick the entry that should be acted upon. In the agenda, you use the keys k m. In an org-mode file, this is on C-c C-x C-k. 2. Then you find the agenda date you want to apply. When the cursor is anywhere in the block belonging to that date, press k s to schedule, or k d to put a deadline. The agenda is not updated immediately, press r if you want it to show the affected entry in the right place. #### New API for mapping a function over all or selected entries Org has sophisticated mapping capabilities to find all entries satisfying certain criteria. Internally, this functionality is used to produce agenda views, but there is also an API that can be used to execute arbitrary functions for each or selected entries. The main entry point for this API is: -- Function: org-map-entries func &optional match scope &rest skip Call FUNC at each headline selected by MATCH in SCOPE. FUNC is a function or a lisp form. The function will be called without arguments, with the cursor positioned at the beginning of the headline. The return values of all calls to the function will be collected and returned as a list. MATCH is a tags/property/todo match as it is used in the agenda tags view. Only headlines that are matched by this query will be considered during the iteration. When MATCH is nil or t, all headlines will be visited by the iteration. SCOPE determines the scope of this command, it can specify a file, all agenda files, the current tree and much more. The remaining args are treated as settings for the skipping facilities of the scanner. The function given to that mapping routine can really do anything you like. Here is a simple example that will turn all entries in the current file with a tag TOMORROW into TODO entries with the keyword UPCOMING. Entries in comment trees and in archive trees will be ignored. (org-map-entries '(org-todo "UPCOMING") "+TOMORROW" 'file 'archive 'comment) The following example counts the number of entries with TODO keyword WAITING, in all agenda files. (length (org-map-entries t "/+WAITING" nil 'agenda)) #### Changes in Remember templates • Remember templates can now use the cursor date in the agenda Use k r to start remember from the agenda, with enforcing the cursor date as default for any time stamps created by the template. • Filing remember templates to the beginning or end of a file You may now set the heading part of a remember template definition to top' or bottom'. The template will then be filed as a level 1 entry to the beginning or end of the target file, respectively. Thanks to Adam Spiers for this proposal. • You can jump to the location of a note immediately after filing it Just include the %& escape anywhere in the template. An interesting combination now is to use %!%&, which will immediately file and visit the note, which is equivalent to generating the note directly in the target location. Thanks to Adam Spiers for this proposal. #### BBDB anniversaries are now links. If you are using %%(bbdb-anniversaries) to list anniversaries in the agenda, you can now directly access the entry that triggered a listed anniversary from the agenda. Just click the anniversary - it is a link now. Thanks to Thomas Baumann for a patch to this effect. #### Column view in the agenda now cleans the ITEM field See the new variable org-agenda-columns-remove-prefix-from-item. Thanks to Adam Spiers for this proposal. #### The format of section number in exported files is configurable See the new variable org-export-section-number-format'. Thanks to Adam Spiers for this proposal. In column view, if you press a key 1-9 or 0, the corresponding values from the list of allowed values for that field at point will be directly selected. Thanks to Levin Du for this proposal and a patch to this effect. #### New hook to hack exported iCalendar files The new hook org-before-save-iCalendar-file-hook' runs just before the buffer with a created iCalendar export is saved. This is what I settled for after a long discussion with Adam Spiers about doing some special filtering automatically. #### Log mode in agenda now shows end time for CLOCK lines When turning on log mode in the agenda with l, clock lines will now also list the end time, not only the starting time. Thanks to Tian Qiu for bringing this up again. #### Fixes and additions for org-publish • The :include and :index-title properties in org-publish work now as advertized • the #+TITLE of a page will be used in the index • new :completion-function property can define a hook to be run after publishing a file. Thanks to Manuel Hermenegildo for a patch to this effect. ## Version 6.04 ### Overview • Statistics cookies [/] and [%] for TODO entries • Editing source code example in the proper mode • iCalendar now defines proper UIDs for entries • New properties for customizing subtree export ### Incompatible changes • The default of the variable org-tags-match-list-sublevels' is now t'. The main reason for this is that it is easier to explain in the manual and will lead to fewer surprises. • The former CONTRIB directory is now called "contrib". This was already the case in the git distribution, but the tar and zip archives still did this wrong. ### Details #### Statistics for TODO entries The [/] and [%] cookies have already provided statistics for checkboxes. Now they do the same also for TODO entries. If a headline contains either cookie, changing the TODO state of any direct child will trigger an update of this cookie. Children that are neither TODO nor DONE are ignored. There have already been requests to automatically switch the parent headline to DONE when all children are done. I am not making this a default feature, because one needs to make many decisions about which keyword to use, etc. Instead of a complex customization variable, I am providing a hook that can be used. This hook will be called each time a TODO statistics cookie is updated, with the cursor in the corresponding line. Each function in the hook will receive two arguments, the number of done entries, and the number of not-done entries, and you can use the hook to change the state of the headline. Here is an example implementation: (defun org-summary-todo (n-done n-not-done) "Switch entry to DONE when all sub-entries are done, to TODO otherwise." (let (org-log-done org-log-states) ; turn off logging (org-todo (if (= n-not-done 0) "DONE" "TODO")))) #### Editing source code example in the proper mode If you are writing a document with source code examples, you can include these examples into a #+BEGIN_SRC lang ... #+END_SRC or (with the org-mtags module loaded) a <src... structure. lang stands for the Emacs mode used for editing the language, this could be emacs-lisp for Emacs Lisp mode examples, or org for Org mode examples. You can now use the key "C-c '" (that is C-c followed by the single quote) to edit the example in its native mode. This works by creating an indirect buffer, narrowing it to the example and setting the appropriate mode. You need to exit editing by pressing "C-c '" again. This is important, because lines that have syntactic meaning in Org will be quoted by calling this command. "C-c '" also edits include files, the setupfile in a #+setufile line, and all those little foreign snippets like: #+HTML: this code can be edited in html-mode #+BEGIN_HTML ,Same here #+BEGIN_HTML #+LaTeX: this code can be edited in latex-mode #+BEGIN_LaTeX ,Same here #+BEGIN_LaTeX #+BEGIN_SRC fortran ,Here we can edit in fortran-mode #+END_SRC #### iCalendar now defines proper UIDs for entries This is necessary for synchronization services. The UIDs are created using the the org-id.el module which is now part of the Org core. If you set the variable (setq org-icalendar-store-UID t) then all created UIDs will be stored in the entry as an :ID: property. This is off by default because it creates lots of property drawers even if you only play with iCalendar export. But if you plan to use synchronization, you really need to turn this on. Diary sexp entries do not yet receive proper persistent UIDs, because they are transformed to iCalendar format by icalendar.el which creates fresh UIDs each time, based on the current time. An interesting aspect of Org is that a single outline node can give rise to multiple iCalendar entries (as a timestamp, a deadline, a scheduled item, and as a TODO item). Therefore, Org adds prefixes "TS-", "DL-" "CS-", and "TD-" to the UID during iCalendar export, depending on what triggered the inclusion of the entry. In this way the UID remains unique, but a synchronization program can still figure out from which entry all the different instances originate. #### New properties for customizing subtree export. When exporting a subtree by selecting it before calling the export command, you can now use the properties EXPORT_TITLE, EXPORT_TEXT, and EXPORT_OPTIONS to overrule the global #+TITLE, #+TEXT, and #+OPTIONS settings. You can also set an export file name with EXPORT_FILE_NAME that will overrule the file name derived from the buffer's file name. As far as the options are concerned, the global #+OPTIONS will still be read, and only the options you give in the property will be overwritten. For example: #+OPTIONS: skip:nil * Computer Tricks , :PROPERTIES: , :EXPORT_FILE_NAME: ct.html , :EXPORT_TITLE: Steve's collected computer tricks , :EXPORT_OPTIONS: h:2 toc:nil , :END: #### New way to define tags for an entire file. Tags that are defined in a line like #+FILETAGS: work urgent are inherited by all entries in the file. Thanks to Manuel Hermenegildo for this proposal. ## Version 6.03 ### Overview • Description lists are now supported natively • Block quotes for export • Fontified code examples in HTML export • Include files for export • Text before the first headline is now exported by default • In-buffer options may now be collected in an external file • The in-buffer settings keywords may now be lower case • Completion of structure elements • Startup visibility can now be influenced by properties • Clock task history, moving entries with the running clock • BBDB anniversaries much faster • New contrib files: org-eval.el and org-mtags.el ### Incompatible changes • The text before the first headline is now exported by default Previously, the default was to not include text in an org-mode buffer before the first headline. From now on, the default it to include it. If you like the old default better, customize the variable org-export-skip-text-before-1st-heading or set the value on a per-file basis with #+OPTIONS: skip:t ### Details #### Description lists are now supported natively A plain list will be exported as a description list if the first item in the list has a term and the description, separated by " :: ". For example Emacs software by Carsten Dominik - RefTeX :: Support for LaTeX Labels, References, Citations - CDLaTeX :: more LaTeX functionality for Emacs - TeXmathp :: checking LaTeX buffers for Math mode. - ORG :: An Emacs mode for notes and projet planning. - CONSTANTS :: An Emacs package for inserting the definition of natural constants and units into a buffer. - IDLWAVE :: The Emacs modes for editing and running IDL and WAVE CL files. will be rendered as Emacs software by Carsten Dominik RefTeX Support for LaTeX Labels, References, Citations CDLaTeX more LaTeX functionality for Emacs TeXmathp checking LaTeX buffers for Math mode. ORG An Emacs mode for notes and projet planning. CONSTANTS An Emacs package for inserting the definition of natural constants and units into a buffer. IDLWAVE The Emacs modes for editing and running IDL and WAVE CL files. This works now in the HTML exporter, we still need to supoort it with the LaTeX and ASCII exporters. #### Block quotes for export For quoting an entire paragraph as a citation, use #+BEGIN_QUOTE: ,Everything should be made as simple as possible, ,but not any simpler -- Albert Einstein #+BEGIN_QUOTE: which will render as Everything should be made as simple as possible, but not any simpler – Albert Einstein #### Fontified code examples in HTML export You can now get code examples fontified like they would be fontified in an Emacs Buffer, and export the result to HTML. To do so, wrap the code examples into the following structure: #+BEGIN_SRC emacs-lisp , (defun org-xor (a b) , "Exclusive or." , (if a (not b) b)) #+END_SRC In the export, this will then look like this (if you are now looking at the ASCII export and do not see anything interesting, go and check out the HTML version at http://orgmode.org/Changes.html). (defun org-xor (a b) "Exclusive or." (if a (not b) b)) The string after the BEGIN_SRC is the name of the major emacs mode that should be used to fontify the code example, without the "-mode" at the end of the mode name. For example, if you are writing an Org tutorial with Org examples included, you would use "org" as the language identifier - in fact, I have used just that in the example above. Currently this works only for HTML export, and requires the htmlize.el package, version 1.34 or later. For other backends, such structures are simply exported as EXAMPLE. #### Include files for export A line like #+INCLUDE "file" markup lang will lead to the inclusion of the contents of FILE at the moment of publishing. FILE should be surrounded by double quotes, this is obligatory if it contains space characters. The parameters MARKUP and LANG are optional. MARKUP can be "example", "quote", or "src". If it is "src", LANG should be the name of the Emacs mode to be used for fontifying the code. For example: Here is my /.emacs/ file: #+INCLUDE "~/.emacs" src emacs-lisp #### The text before the first headline is now exported by default Previously, the default was to not include text in an org-mode buffer before the first headline. From now on, the default it to include it. If you like the old default better, customize the variable org-export-skip-text-before-1st-heading or set the value on a per-file basis with #+OPTIONS: skip:t #### In-buffer options may now be collected in an external file If you would like to share the Org setup between a number of files, you can now store in-buffer setup in a file and simply point to that file from each file that should read it. If you write in a buffer #+SETUPFILE: "path/to/setup.org" then this file will be scanned for in-buffer options like #+STARTUP, #+TITLE, or #+OPTIONS. #### The in-buffer settings keywords may now be upper or lower case From now on, it makes no difference is you write #+STARTUP or #+startup, to make these lines less imposing. Similarly for all other in-buffer keywords. #### Completion of structure elements As a new experimental feature, Org now supports completion of structural elements like #+BEGIN_EXAMPLE in a special way. It work by typing, for example "<e" and then pressing TAB, on an otherwise empty line. "<e" will expand into a complete EXAMPLE template, with the cursor positioned in the middle. Currently supported templates are: <s #+begin_src <e #+begin_example <q #+begin_quote <v #+begin_verse <l #+begin_latex <L #+latex: <h #+begin_html <H #+html: <a #+begin_ascii <i #+include #### Startup visibility can now be influenced by properties When Emacs opens an Org mode buffer, the outline visibility is set to a startup value that is taken from the variable org-startup-folded, or from a #+STARTUP setting in the buffer. After this has happened, the buffer will now also be scanned for entries with a VISIBILITY property. Wherever such a property is found, the corresponding subtree will get its visibility adjusted. Allowed values for the property are: folded Fold the subtree children Show the text after the headline, and the headlines of all direct children content Show all headlines in the tree, but no text below any headline all Show the entire subtree For example, I am using this for the huge Changes.org file that is the source for the list of visible changes you are reading right now. The top-most entry in this file always describes the changes in my current working version. The start of this section currently looks like this: * Version 6.03 , :PROPERTIES: , :VISIBILITY: content , :END: ** Overview This was a proposal by Ben Alexander. The command C-u C-u TAB will switch back to the startup visibility of the buffer. #### Clock task history, and moving entries with the running clock Org now remembers the last 5 tasks that you clocked into, to make it easier to clock back into a task after interrupting it for another task. • C-u C-u C-c C-x C-i (or C-u C-u I from the agenda) will clock into that task and mark it as current default task. • C-u C-c C-x C-i (or C-u I from the agenda) will offer a list of recently clocked tasks, including the default task, for selection. d selects the default task, i selects the task that was interrupted by the task that is currently being clocked. 1,… selects a recent task. When you select a task, you will be clocked into it. • You can use C-u C-c C-x C-j to jump to any of these tasks. When moving an entry using structure editing commands, archiving commands, or the special subtree cut-and-paste commands C-c C-x C-w and C-c C-x C-y, the running clock marker and all clock history markers will be moved with the subtree. Now you can start a clock in a remember buffer and keep the clock running while filing the note away. See also the variable org-remember-clock-out-on-exit'. #### BBDB anniversaries much faster bbdb-anniversaries is now much faster, thanks to a new approach using a hash for birthdays. Thanks to Thomas Baumann for a patch to this effect. #### New files in the contrib directory Do people think any of these should become core? org-eval.el This new module allows to include the result of the evaluation of Lisp code (and other scripting languages) into the buffer, similar to the <lisp> tag of Emacs Wiki and Muse. org-mtags.el This new modules allows you to use Muse-like tags for some structure definitions in Org. For example, instead of #+BEGIN_EXAMPLE ... #+END_EXAMPLE you can write <example> ... </example> In fact, I myself find these easier to type and to look at. Also, it will allow you to more easily move text and files back and forth between Org and Muse. For a list of supported structure elements, see the commentary in the file commentary in the file org-mtags.el. If you load this module and use the "<i" etc completion described above, the Muse form will automatically be inserted. #### Bug fixes Many bug fixes again. Will this ever stop? ## Version 6.02 ### Overview • Column view (mostly) works now in XEmacs • Summaries for columns in the agenda • The special property Effort can be used for effort estimates • New operators for property searches • Search commands can now include archive files. • Clock tables can include the archive files ### Details #### Column view works now in XEmacs I had already given up on this, but Greg Chernev (who implemented noutline.el for XEmacs and in this way kept Org alive on XEmacs) has done it again and provided the patches to make column view work under XEmacs. There are still some problems, but the basics work and we will iron out the remaining issues, hopefully soon. #### Summaries for columns in the agenda If any of the columns has a summary type defined, turning on column view in the agenda will show summaries for these columns. Org will first visit all relevant agenda files and make sure that the computations of this property are up to date. This is also true for the special CLOCKSUM property. Org will then sum the values displayed in the agenda. In the daily/weekly agenda, the sums will cover a single day, in all other views they cover the entire block. It is vital to realize that the agenda may show the same entry multiple times (for example as scheduled and as a deadline), and it may show two entries from the same hierarchy (for example a parent and it's child). In these cases, the summation in the agenda will lead to incorrect results because some values will count double. #### The special property Effort can be used for effort estimates If you want to plan your work in a very detailed way, or if you need to produce offers with quotations of the estimated work effort, you may want to assign effort estimates to entries. If you are also clocking your work, you may later want to compare the planned effort with the actual working time. Effort estimates can now be stored in a special property Effort, displayed side-to-side with clock sums, and also be summed over a day, in order to show the planned work load of a day. See the manual for more details. #### New operators for property searches Property searches can now choose a number of different operators for comparing values. These operators are =', <>', <', <=', >', and >='. When the search term uses the operator with plain number like +Effort>=2.7, then the property value is converted to a number and a numerical comparison takes place. When the search term uses a string on the right hand side of the operator, a string comparison is done: +PRIORITY<"C". Finally, if the right hand side is enclosed in curly braces, a regexp match is done: aaa={regexp}. In this case you should use only the =' or <>' operators, meaning "does match" or "does not match", respectively. This was a triggered with a request by Dan Davison. #### Search commands can now include archive files. If the value of the customization variable org-agenda-text-search-extra-files contains the symbol agenda-archives as the first element in the list, all archive files of all agenda files will be added to the list of files to search. This is relevant for the search view C-c a s, as well as for the agenda files multi-occur command C-c a /. #### Clock tables can include the archive files There are new values for the :scope parameter of a clock table. This can now be file-with-archives and agenda-with-archives, in order to collect information not only from the current file or all agenda files, but also from all archive files that are currently used by these files. The options available for radio tables using orgtbl-mode have been expanded. You may use several reception points and formats for the same table, you may have special formatting in the last line of the table, and many table parameters may be functions, so that more general transformations are possible. Jason Riedy provided a patch for this, and he will hopefully come up with some examples. Thanks! ## Version 6.01 This is a new major release, mostly because of structural changes in Org. However, since this took a while, there is also a long list of small improvements and some new significant features. ### Overview • The Org distribution has a new structure • New system for selecting modules to load • New archiving mechanism: The Archive Sibling • Support for Sebastian Rose's JavaScript org-info.js. • Internal links work now better in HTML export • Export commands can be done in the background • Flexible setting of the time block shown by the clock table • Clock table can be included in the agenda • Support for ISO week dates (ISO 6801) • Tag inheritance can be limited to a subset of all tags • Entries can be sorted by TODO keyword • And some more small fixes and improvements ### Incompatible changes #### The Org distribution has a new structure In the distribution files as well as in the GIT repository, the lisp files are now located in a subdirectory "lisp", and the documentation files are located in a subdirectory "doc". If you are running Org directly from the unpacked distribution archive (zip or tar file, or GIT repository), you need to modify your settings for load-path accordingly. ### Details #### The Org distribution has a new structure In the distribution files as well as in the GIT repository, the lisp files are now located in a subdirectory "lisp", and the documentation files are located in a subdirectory "doc". If you are running Org directly from the unpacked distribution archive (zip or tar file, or GIT repository), you need to modify your settings for load-path accordingly. Org mode has now a system for loading modules by simply configuring an option that lists all the modules you want to use. Customize the variable org-modules'. That variable lists both modules that are part of the Org mode core (and in this way part of Emacs), and modules that are contributed packages. Contributed modules will only be available when you have installed them properly (most likely by downloading the distribution and adding /path/to/orgdir/contrib/lisp to your load path). #### New archiving mechanism: The Archive Sibling There is a new method to archive entries in the current file: By moving it to a sibling called the Archive Sibling. That sibling has the heading "Archive" and also carries the ARCHIVE tag. This can be a great way to do archiving inside a project, to get parts of the project out of the way and to wait with true archiving (moving to another file) until the entire project is done. Archiving to a sibling keeps much of the context, for example inherited tags and approximate tree position in tact. The key binding for the is "C-c C-x A", and from the agenda buffer you can simply use "A". Thanks to Ilya Shlyakhter for this rather clever idea. #### Support for Sebastian Rose's JavaScript org-info.js. This fascinating program allows a completely new viewing experience for web pages created from Org files. The same document can be viewed in different ways, and switching between the views as well as navigation uses single-key commands. One of the view types is an Info-like interface where you can jump through the sections of the document with the n' and p' keys (and others). There is also a folding interface where you can fold the document much like you can fold it in org-mode in Emacs, and cycle through the visibility both locally and globally. To set this up, all you need to do is to make sure that org-infojs.el gets loaded (customize the variable org-modules to check). Then add this line to the buffer: #+INFOJS_OPT: view:info In that line, you can configure the initial view and other settings. Available views are info for the info-like interface, and overview, content, and showall for the folding interface. See the manual for more details. The JavaScript program is served from http://orgmode.org/org-info.js, and your exported HTML files will automatically get it from there. However, you may want to be independent of the existence and stability of orgmode.org and install a copy locally. Then you need to change the path from which the script is loaded, either by using something like #+INFOJS_OPT: view:info path:../scripts/org-info.js or by configuring the variable org-infojs-options. For details see the documentation provided by Sebastian Rose together with org-info.js. #### Export improvements • The export of internal links to HTML now works a lot better. Most internal links that work while editing an Org file inside Emacs will now also work the the corresponding HTML file. • You can run many of the export commands in the background by using C-c C-u C-c C-e' in order to start the process. RIght now this will only work if "emacs" is the right command to get to your Emacs executable - I hope to make this less system dependent in the future. Both these are based on requests by Ilya Shlyakhter. #### Improvements to clocktable • The clocktable is now much more flexible and user friendly when trying to specify the time block that should be considered when constructing the table. The :block parameter to the table can now look like any of these: :block meaning 2008 The entire year 2008 2008-04 The month April 2008 2008-04-02 The day April 2, 2008 2008-W14 ISO-Week 14 in 2008 today Today today-5 The day five days ago thisweek The current week thisweek-2 Two weeks ago thismonth The current month thismonth-12 Same month, last year lastmonth Same as thismonth-1 What is more, you can now use the S-left and S-right keys to shift the time block around. The cursor needs to be in the #+BEGIN: clocktable line for this to work. If the current block is today, S-left with switch to yesterday. If the current block is 2008-W14, S-right will switch to the following week. • When the clocktable is collecting from several files, the total time for each file will now also be listed. This was a request from Bernt Hansen. • If you turn on the new clock report mode with the "R" key in the agenda, a clock table will be attached to the agenda, showing the clock report for the file scope and time interval of the agenda view. To turn this on permanently, configure the variable org-agenda-start-with-clock report-mode. To modify the properties of the table, in particular the :maxlevel depth, configure org-agenda-clockreport-parameter-plist. #### Support for ISO week dates (ISO 6801) The agenda now shows the ISO week for the displayed dates, in the form W08 for week 8. The keys d, w, m, and y in the agenda view now accept prefix arguments. Remember that in the agenda, you can directly type a prefix argument by typing a number, no need to press C-u first. The prefix argument may be used to jump directly to a specific day of the year, ISO week, month, or year, respectively. For example, 32 d jumps to February 1st, 9 w to ISO week number 9. When setting day, week, or month view, a year may be encoded in the prefix argument as well. For example, 200712 w will jump to week 12 in the year 2007. If such a year specification has only one or two digits, it will be mapped to the interval 1938-2037. When entering a date at the date prompt, you may now also specify an ISO week. For example w4 Monday of week 4 fri w4 Friday of week 4 w4-5 Same as above 2012 w4 fri Friday of week 4 in 2012. 2012-W04-5 Same as above So far I have not implemented the effect of org-read-date-prefer-future' on this functionality, because it seemed too magic for me. I'd appreciate comments on this issue: Should org-read-date-prefer-future' also push dates into the next year if the week you are entering has already passed in the current year? For consistency I guess this should be the case, but I cannot quite wrap my head around it. I hope but am not entirely convinced that this will behave sanely also during the first/last week of a year. Please test extensively and report back. This was a request by Thomas Baumann. #### Improvements in Search View • Calling search view with a C-u prefix will make it match only in TODO entries. • The single quote is no longer considered a word character during search, so that searching for the word "Nasim" will also match in "Nasim's". #### Misc • Inheritance of tags can now be limited to a subset of all tags, using the variable org-use-tag-inheritance. This variable may now be a regular expression or a list to select the inherited tags. Thanks to Michael Ekstrand for this excellent proposal. The regexp option is also implemented for org-use-property-inheritance, so that you can now select properties for inheritance my name. • The INHERIT flag to the function org-entry-get can be set to the symbol selective. If this is the case, then the value of the property will be retrieved using inheritance if and only if the setting in org-use-property-inheritance selects the property for inheritance. • There are now special faces for the date lines in the agenda/timeline buffers, and another special face for days that fall on a weekend: org-agenda-date and org-agenda-date-weekend. Both these faces are initially similar to the org-agenda-structure face, but you can customize them freely. • When an entry already has a scheduling or deadline time stamp, calling C-c C-s' or C-c C-d', respectively, will now use that old date as the default, and you can can use the "++4d" syntax to invoke shifts relative to that default date. Simply pressing RET at the prompt will keep the default date, not switch to today. This was an omission in the earlier implementation, spotted by Wanrong Lin. Thanks! • File names in remember templates can be relative, if they are, they will be interpreted relative to org-directory. • The handling of the clipboard when inserting into remember templates is now much better, and gives more control on what should be inserted with new %-escapes: • %c - Now always insert the head of the kill ring, never the X clipboard. • %x - Insert the content of the X clipboard. This is the first non-empty value from the PRIMARY, SECONDARY and CLIPBOARD X clipboards. • %^C - This allows the user to choose between any of the clipboard values available, the kill ring head, and the initial region if set. • %^L - Like %^C, but this inserts an org link using the selected value. Thanks to James TD Smith for this patch. • Table export to an internal file can now use a format specification, similar to the formats that are used by orgtbl radio tables. The default format is in the variable org-table-export-default-format. You can use properties TABLE_EXPORT_FILE and TABLE_EXPORT_FORMAT to specify the file name to which the export should go, and a local format. For example: :PROPERTIES: :TABLE_EXPORT_FILE: ~/xx.txt :TABLE_EXPORT_FORMAT: orgtbl-to-generic :splice t :sep "\t" :END: Thanks to James TD Smith for this patch. • Entries can be sorted by TODO keyword, and the order is given by the definition sequence of the TODO keywords in the variable org-todo-keywords, or in the #+TODO line. Use the "o" key when sorting with C-c ^. Thanks to James TD Smith for this patch. ## Version 5.23 ### Overview • New keyword search agenda view • Many new extensions available in the CONTRIB directory • New remember template option: pre-selection contexts • Modifying list/headline status of a line • Granularity while editing time stamps • New repeaters mechanisms • New parameters for dynamic blocks ad the clock table • Limiting iCalendar export to fewer entries • M-RET splits lines again • New hooks ### Incompatible changes • The variable org-time-stamp-rounding-minutes' is now a list of two values - if you have configured this variable before, please do it again. ### Details #### New keyword search agenda view C-c a s' now invokes a special agenda view that can be used to search notes by keyword and regular expressions. In particular, it does not require a single regular expression or string to search for, but it can search for a number keywords or regexps that can occur in arbitrary sequence in the entry. The search knows the boundaries of an entry, can use simple Boolean logic and is reasonably fast. For example, the search string +computer +wifi -ethernet -{8\.11[bg]} will search for note entries that contain the keywords computer and wifi, but not the keyword ethernet, and which are also not matched by the regular expression "8\.11[bg]", meaning to exclude both 8.11b and 8.11g. If the first character of the search string is an asterisk, the search will only look at headlines - otherwise it will look at the headine and the text below it, up to the next (possibly sub-) heading. The command searches all agenda files, and in addition the files listed in org-agenda-text-search-extra-files. I find it very useful to define a custom command to do such a search only in a limited number of files (my notes files), like this: ("N" "Search notes" search "" ((org-agenda-files '("~/org/notes.org" "~/org/computer.org")) (org-agenda-text-search-extra-files nil))) #### Many new extensions available in the CONTRIB directory • Phil Jackson's org-irc.el is now part of the Org mode core, which means it will become part of Emacs soon. • The new development model already starts to pay off, a number of interesting extensions are now part of the distribution. Check the file CONTRIB/README for a list. • There is a new variable org-default-extensions'. Configuring this variable makes it very easy to load these default extensions - eventually this will be expanded to cover contributed extensions as well. #### New remember template option: pre-selection contexts • Remember template definitions now allow six elements. The last element defines the contexts in which the template should be offered. It can be a list of major modes, a function, t or nil. If it is a list of major-mode, the template will be available only when org-remember is called from a buffer in one of these modes. If it is a function, the template will be offered only if the function returns t' when called in the current buffer. A value of t or nil for this element means select this template in any context. One possible application for this would be to have several templates all using the same selection letter, and choosing the right one based on context. For example, think of tasks describing a bug in a source code file. With the following configuration we make sure that the bug reports are filed into the appropriate sections of the target file. (setq org-remember-templates '(("Elisp" ?b "* %a\n\n%i%?" "~/bugs.org" "Elisp bugs" (emacs-lisp-mode)) ("C Bugs" ?b "* %a\n\n%i%?" "~/bugs.org" "C bugs" (cc-mode)))) See (info "(org)Remember templates") for details. #### Modifying list/headline status of a line • C-c -' has now more functions: • In a table, add a hline as before • In an item list, cycle bullet type as before • In a normal line, turn it into an item • In a headline, turn it into an item • If there is an active region, turn each line into an item. But if the first region line is already an item, remove item markers from all lines. Based on proposals by Bastien. • C-c *' has now more functions • in a table, recompute, as before • in a normal line, convert it to a sub heading. • at an item, convert it into a subheading • if there is an active region, convert all lines in the region to headlines. However, if the first lie already is a heading, remove the stars from all lines int he region. Based on proposals by Bastien. #### Changes related to time stamps • The value variable org-time-stamp-rounding-minutes is now a list of two values. The first applies when creating a new time stamp. The second applies when modifying a timestamp with S-up/down. The default for this new task is 5 minutes, but 15 may also be a very good value for many people. If S-up/down is used on a time stamp where the minute part is not compatible with this granularity it will be made so. You can bypass this by using a prefix argument to exactly specify the number of minutes to shift. This was a proposal by Adam Spiers. • New repeaters that shift a date relative to today, or that make sure that the next date is in the future. For example: ** TODO Call Father Marking this DONE will shift the date by at least one week, but also by as many weeks as it takes to get this date into the future. However, it stays on a Sunday, even if you called and marked it done on Saturday. ** TODO Check the batteries in the smoke detectors Marking this DONE will shift the date to one month after today. Proposed by Wanrong Lin and Rainer Stengle. #### New parameters for dynamic blocks ad the clock table • There is a new :link parameter for the clocktable. When set, the headlines listed in the table will be links to the original headlines. • There is a new :content parameter that is passed to the writer function of the dynamic block. Use this parameter to pass the previous content of the block to the writer function, in case you want to make the outcome dependent on the previous content. #### Limiting iCalendar export to fewer entries • New way to limit iCalendar export to the entries captured in an agenda view. This is done by "writing" the agenda view using C-x C-w' to a file with extension .ics. This was a request by Kyle Sexton. #### Misc • Due to a popular revolt shortly after the 5.22 release, M-RET can again be used to split a line so that the rest of the line becomes the new heading. However, if you do this in a heading containing tags, the tags will stay in the old line. Customize the variable org-M-RET-may-split-line if you don't want this command to split a line in the middle. The same variable also influences line splitting in items and in tables. • There are three new hooks: org-follow-link-hook: runs after following a link org-publish-before-export-hook: runs before export org-publish-after-export-hook: runs after export ## Version 5.22 ### Incompatible changes • The variable org-log-done' is now less complex. • The in-buffer settings for logging have changed. Some options no longer exists, some new ones have been added. ### Details #### Changes to logging progress There is now more control over which state changes are being logged in what way. Please read carefully the corresponding sections in the manual. Basically: • The variable org-log-done' has been simplified, it no longer influences logging state changes and clocking out. • There is a new variable for triggering note-taking when clocking out an item: org-log-note-clock-out'. • Logging of state changes now has to be configured on a pre-keyword basis, either in org-todo-keywords' or in the #+TODO in-buffer setting. • These per-keyword settings allow more control. For example WAIT(w@) Record a note when entering this state. WAIT(w!) Record a timestamp when entering this state. WAIT(w@/!) Recore a note when entering and timestamp when leaving this state. This is great for getting a record when switching *back* from WAIT to TODO. WAIT(/!) Record a timestamp when leaving this state. Here we not even define a fast access character, but just the logging stuff. This was triggered by requests from Wanrong Lin and Bernt Hansen. #### Other • M-RET no longer brakes a line in the middle, it will make a new line after the current or (if cursor is at the beginning of the line) before the current line. • RET, when executed in a headline after the main text and before the tags will leave the tags in the current line and create a new line below the current one. ## Version 5.21 Bug fixes, in particular the long-hunted bug about wrong window positions after pressing SPACE in the agenda. Hopefully this is really fixed. ## Version 5.20 ### Overview #### Remember/Refile/Goto • The use of prefix arguments for the commands org-remember' and org-refile' has been normalized. • The clock can now safely be used in a remember buffer. • The variable org-remember-use-refile-when-interactive' introduced only in 5.19 is already obsolete. Please use org-remember-interactive-interface' instead. • It is no longer necessary to update the refiling targets. • Automatic isearch in org-goto'. • Outline-path-completion as alternative org-goto interface. #### Misc • Checkboxes now work hierarchically. • C-k' can now behave specially in headlines. • Repeater for tasks in plain timestamps. • All clock intervals of an item show in agenda/timeline. • New parameter :step for clocktable, to get daily reports. • Never loose a repeaded scheduled item from the agenda. • Archiving a subtree now stores the outline path in a property. • Links to messages in Apple Mail. • Bug fixes. ### Incompatible Changes • The variable org-remember-use-refile-when-interactive' introduced only in 5.19 is already obsolete. Please use org-remember-interactive-interface' instead. ### Details #### Remember/Refile/Goto • The use of prefix arguments for the commands org-remember' and org-refile' has been normalized: • when called without prefix argument, the command does its normal job, starting a remember note or refiling a tree. • when called with a single C-u prefix, these commands can be used to select a target location and to jump there. In the case of org-remember', you will be prompted for a template and then Emacs jumps to the default target location or this template. In the case of org-refile', you select a location from the refile target list and jump there. • when called with two prefixes (C-u C-u'), the command jumps to the location last used for storing a note or a moved tree. • When the clock is running inside an remember buffer, storing the remember buffer with C-c C-c' will automatically clock out. This was inspired by a request by Rainer Stengle. • The variable org-remember-use-refile-when-interactive' introduced only in 5.19 is already obsolete. Please use org-remember-interactive-interface' instead. This new variable does select the interface that is used to select the target for a remember note in an interactive way. Possible values are: • outline': Use an outline of the document to select a location. • outline-path-completion': Use completion of an outline path to select a location. • refile': Offer the org-refile-targets' as possible targets. • It is no longer necessary to update the refiling targets - they are always current. • In org-goto', typing characters now automatically starts isearch from the beginning of the buffer. The isearch is special also because it only matches in headline. This goes some way toward saving org-goto from being removed from Org mode. Thanks to Piotr Zielinski for the code, and sorry that it took me so long to put it in. If you prefer to use single letters n,p,f,b,u,q for navigation as before, configure the variable org-goto-auto-isearch'. • Outline-path-completion is now available as an alternative interface in the command org-goto'. Please select the default interface you'd like to use with the new variable org-goto-interface'. You can then select the alternative interface with a prefix argument to C-c C-j' (org-goto). I am considering to make outline-path-completion the default interface. Comments? #### Misc • Checkboxes now work hierarchically. When a plain-list item with a checkbox has children with checkboxes, the status of the item's checkbox is calculated from the children, each time a checkbox is toggled with C-c C-c. Thanks to Miguel A. Figueroa-Villanueva for a patch to this effect. • There is a new variable org-special-ctrl-k'. When set, C-k' will behave specially in headlines: • When the cursor is at the beginning of a headline, kill the entire line and possible the folded subtree below the line. • When in the middle of the headline text, kill the headline up to the tags. • When after the headline text, kill the tags. This is following a proposal by Piotr Zielinski. • You can now also have a plain (as opposed to deadline or scheduled) repeater timestamp in a task. Switching the task to DONE will now also shift a plain time stamp. This was a request by Austin Frank. • If an entry is clocked multiple times, it will now show up several times in the agenda and timeline buffers, when log-mode is on. This was a proposal by Jurgen Defurne. • The clock table accepts a new parameter :step. This parameter can be day' or week' and will result in separate tables for each day or week in the requested time interval. This was triggered by a proposal by Sacha Chua in her blog. • A time-stamp with a repeater now no longer refers to the date closest to the current day. Instead, it means either today or the most recent match. This change makes sure that overdue scheduled or deadline items never disappear from the agenda. With the previous convention, an overdue scheduled item would disappear. For example, a weekly item scheduled for Sunday would appear as overdue until Wednesday, and the suddenly disappear until next Sunday. Now the item will show up as "Sched 7x" on Saturday. From Sunday on it will be in the list as "Scheduled", i.e. old sins will be forgiven. This follows a request by Warong, Dennis and Bernt. • Archiving a subtree now creates an additional property, ARCHIVE_OLPATH. This property contains the "path" in the outline tree to the archived entry, as it was in the original file. For example, archiving Fix the door in the following hierarchy * Tasks ** HOME *** Garage **** Fix the door will file is with the following property :ARCHIVE_PATH: Task/HOME/Garage Note that you can configure (i.e. limit) the information that gets stored upon archiving with the variable org-archive-save-context-info'. • New file org-mac-message.el' by John Wiegley to create links for messages in Apple Mail, and to follow these links. • Bug fixes. ## Version 5.19 ### Overview • Column view can list the clocked times of a subtree. • Storing remember notes can use the org-refile' interface. • Storing remember notes no longer produced empty lines. • Moving subtrees now folds all siblings of the subtree. • New variable org-agenda-todo-keyword-format'. • Hack to allow brackets in link descriptions. • Clocking into an entry can enforce a specific TODO state. • EXPORT_FILE_NAME may be an absolute file name with "~". • Bug fixes, lots of them. ### Details • A new special column definition lists the sum of all CLOCK entries in a subtree. For example #+COLUMNS: %20ITEM %10Time_Estimate{:} %CLOCKSUM will allow you to compare estimated times (as given in the Time_Estimate property) with the clocked times. This was a request by Bernt Hansen. • Storing remember notes can now use the org-refile' interface instead of the org-goto' interface (see the variable org-remember-use-refile-when-interactive'). Nothing will change if the note is stored immediately after pressing C-c C-c' in the *Remember* buffer. But if you have chosen (e.g. by pressing C-u C-c C-c') to interactively select the filing location (file and headline), the refile interface will be used instead. I am excited about this change, because the org-goto' interface is basically a failure, at least for this application. Note that in any case the refile interface has to be configured first by customizing org-refile-targets'. • Notes inserted with remember now remove any whitespace before and after the note before being pasted, so that there will be no empty lines inserted together with the note. We could invent special syntax in remember templates to allow creating empty lines before a note - is there anyone who'd want this? • Moving subtrees now folds all siblings of the subtree. This is the only reasonably simple way I could find to avoid the reported inconsistencies in the folding state of the outline tree after moving entries. There are reasons to like this new behavior, because it easily visualizes where the tree is located after the move. Still, not everyone might be happy with this. Massive complaining would be needed to make me fix this. • New variable org-agenda-todo-keyword-format' to specify the width of the TODO keyword field in the agenda display. Use it to get things to line up better. This was a proposal by Rainer Stengele. • If a link description inserted with C-c C-l' contains brackets, the brackets will now be converted into curly braces. This looks similar enough. Supporting brackets in link descriptions is, for technical reasons too long to explain here, complex. • The new option org-clock-in-switch-to-state' can be set to a TODO state that will be enforced when the clock is started on an entry. This follows an idea by Sacha Chua. • The EXPORT_FILE_NAME property may now also be an absolute file name, and it may contain abbreviations like "~" for the users home directory. This was requested by Adam Spiers. • Bug fixes, lots of them. Minor fixes. ## Version 5.17 ### Details #### Whitespace • When cutting, pasting, or moving subtrees and items, the empty lines before the subtree/item now belong to the part and will be moved with it. There is one exception to this rule: If the first child is moved down (or, equivalently, the second is moved up), the amount of empty lines above the first child to be moved along with it is limited by the number of empty lines below it. This sounds complicated, but it allows to have extra empty space before the first child and still have good behavior of the subtree motion commands. • Plain lists items work the same. I believe we have finally nailed this one. Thanks to Daniel Pittman for bring this up again and to Eric Schulte for pointing out that it is the empty lines before an entry that really count. This change was non-trivial, please give it a good test and let me know about any problems. #### Remember • The new command org-remember-goto-last-stored' will jump to the location of the remember note stored most recently. If you have org-remember' on a key like C-c r', then you can go to the location with a double prefix arg: C-u C-u C-c r'. This was a proposal by Rainer Stengele. • Template items that are being prompted for can now specify a default value and a completion table. Furthermore, previous inputs at a specific prompt are captured in a history variable. For example: %^{Author|Roald Dahl|Thomas Mann|Larry Niven} will prompt for an author name. Pressing RET without typing anything will select "Roald Dahl". Completion will give you any of the three names. And a history will be kept, so you can use the arrow keys to get to previous input. The history is tied to the prompt. By using the same prompt in different templates, you can build a history across templates. The ideas for this came from proposals by Bastien and Adam. • When a remember template contains the string %!', the note will be stored immediately after all template parts have been filled in, so you don't even have to press C-c C-c'. The was a proposal by Adam Spiers. #### Refile • org-refile-targets' has a new parameter to specify a maximum level for target selection. Thanks to Wanrong Lin for this proposal. • When the new option org-refile-use-outline-path' is set, refile targets will be presented like a file path to the completion interface: "level 1/level 2/level 3". This may be the fastest interface yet to get to a certain outline entry. Do we need to use this interface in other places? Thanks to Jose Ruiz for this proposal. ## Version 5.16 ### Details #### Restriction lock on agenda scope You can now permanently lock the agenda construction to a certain scope, like a file or a subtree. So instead of pressing "<" for each command in the agenda dispatcher, you only once select a restriction scope. All subsequent agenda commands will than respect this restriction. For example, you can use this at work, to limit agendas to your work file or tree, and at home to limit to the home file or tree. Or you can use it during the day in order to focus in on certain projects. You select a scope with the command C-c C-x <', which restricts to the current subtree. When called with a C-u' prefix, the restriction is to the current file. You can also make restrictions from the speedbar frame, see below. When making a new restriction and an agenda window is currently visible, it will immediately be updated to reflect the new scope. If you like you can display an agenda view and then watch it change in various scopes. To get rid of the restriction, use the command "C-c C-x >". Or press ">" in the agenda dispatcher. Also, and use of "<" in the dispatcher will disable the restriction lock and select a new restriction. Thanks to Rick Moynihan for triggering this development. • Org mode now supports Imenu. For example, with the setting (add-hook 'org-mode-hook a menu will be created in each Org mode buffer that provides access to all level 1 and level 2 headings. The depth of the menu can be set with the variable org-imenu-depth'. • org-mode now supports Speedbar. This means that you can drill into the first and second level headlines of an Org mode file right from the speedbar frame. • You can set a restriction lock for the Org mode agenda to a file or a subtree directly from the speedbar frame. Just press "<" with the cursor on an Org mode file or subtree to set the lock and immediately update the agenda if it is visible. Use ">" to get rid of the lock again. ## Version 5.15 ### Details • There are new special properties TIMESTAMP and TIMESTAMP_IA. These can be used to access the first keyword-less active and inactive timestamp in an entry, respectively. • New variable org-clock-heading-function'. It can be set to a function that creates the string shown in the mode line when a clock is running. Thanks to Tom Weissmann for this idea. • Bug fixes. ## Version 5.14 ### Overview • Remember and related stuff • New command org-refile' to quickly move a note. • Easy way to jump to the target location of remember template. • New %-escapes in remember templates: %c %(…) and %[…] • org-remember-insinuate' simplifies remember setup • Emphasis and Font-lock stuff • Stacked emphasis is no longer allowed. • You may finally emphasize a single character like *a*. • Font-lock now can hide the emphasis markers • Text in the "=" emphasis is exported verbatim • There is a new emphasis marker "~" for verbatim text • Constructs treated specially by the exporters can be highlighted • Properties and Column view • More control over which properties use inheritance • CATEGORY="work" can now be used in a tags/property search • the {+} summary type can specify a printf-style output format #### The date/time prompt There have been several small but very useful additions to the date prompt. • While entering data at the date prompt, the current interpretation of your input is shown next to your input in the minibuffer. I find this great to understand how the input works. If you find the extra stuff in the minibuffer annoying, turn it off with org-read-date-display-live'. • The date prompt now prefers to select the future. If you enter a date without a month, and the day number is before today (for example, on the 16th of the month you enter "9"), Org mode will assume next month. Similarly, if you enter a month and no year, next year will be assumed if the entered month is before the current, for example if you enter "May" in September. Thanks to John Rakestraw for this great suggestion. If you find it confusing, turn it off with org-read-date-prefer-future'. • When modifying an existing date using C-c .' at the stamp, the time or time range in the stamp are now offered as default input at the prompt. This goes a long way to simplifying the modification of an existing date. Thanks to Adam Spiers for this proposal. #### Export (all implemented by Bastien…) • You can now export special strings in HTML. Here is the list of newly performed conversions: Org Description HTML \\- double backslash followed by minus &shy; -- two dashes (minuses) &ndash; --- three dashes (minuses) &mdash; ... three dots &hellip; You can turn this globally on or off with org-export-with-special-strings' or locally with "-:t" or "-:nil" in the #+OPTIONS line. Thanks to Adam Spiers for starting the discussion, and thanks to Daniel Clemente and William Henney for relevant inputs. • Comma-separated emails in #+EMAIL: are correctly exported. Thanks to Raman for pointing out this omission. #### Agenda • In the agenda, a few keys have changed g does now the same a "r", refresh current display, because "g" is the Emacs standard for "refresh" G toggle the time grid, used to be "g" e Execute another agenda command, pretty much the same as C-c a', but shorter and keep the same agenda window. #### Miscellaneous (much of it from Bastien) • You can now select the sectioning structure of your LaTeX export by setting it either globally (org-export-latex-default-class') or locally in each Org file (with #+LaTeX_CLASS: myclass). You can also customize the list of available classes and their sectioning structures through the new org-export-latex-classes' option. Thanks to Daniel for discussions and suggestion on this issue. • You can send and receive radio lists in HTML, LaTeX or TeXInfo, just as you send and receive radio tables. Check the documentation for details and examples. • The default for org-ellipsis' is back to nil, some people seem to have had problems with the face as a default. • Support for pabbrev-mode, needs pabbrev version 1.1. Thanks to Phillip Lord for adapting his package to make this possible. • New variable org-show-entry-below' to force context-showing commands to expose the body of a headline that is being shown. Thanks to Harald Weis for pointing out this omission. ## Version 5.13i ### Details • On the date/time prompt, you can now also answer with something like +2tue to pick the second tuesday from today. This was a proposal by Sacha Chua. • When interpopating into Lisp formulas in the spreadsheet, the values of constants and properties are no longer enclosed into parenthesis. When interpolating for calc, this still happens in order to allow expressions in constants. This problem was reported by Eddward DeVilla. • When a directory is listed in org-agenda-files', all files with extension matched by the new variable org-agenda-file-regexp' in that directory will be agenda files. • Bug fixes. ## Version 5.13 ### Overview • Bug fixes and improvements in column view • All known bugs fixed. • A Column view can be captured into a dynamic block. • The ITEM column is formatted core compactly. • Also ITEM can be edited with e' • The agenda dispatcher • <' cycles through restriction states. • Multi-character access codes to commands (= sub-keymaps). • Sorting improvements • User-defined sorting keys. • Sorting by properties. • Sorting of plain lists. • HTML <div> structure • Other stuff • New variables, several of them. • Drawers can be set on a per-file basis. • Better control over priority fontification in agenda. • M-up and M-down now move the current line up and down. • Abort remember template selection with C-g. ### Details #### Bug fixes and improvements in column view • All the bugs described by Scott Jaderholm have been fixed (at least I hope so…). • You can now capture a column view into a dynamic block, for exporting or printing it. The column view can be • global, i.e. for the entire file • local, i.e. for the subtree where the dynamic block is • from an entry with a specific :ID: property. You can identify the entry whose column view you want to capture by assigning an :ID: property, and use that property in the dynamic block definition. For example: * Planning :PROPERTIES: :ID: planning-overview :END: [...] * The column view #+BEGIN: columnview :hlines 1 :id "planning-overview" #+END: Use C-c C-x r' to insert such a dynamic block, and you will be prompted for the ID. • When the current column format displays TODO keyword, priority or tags, these parts are stripped from the content of the ITEM column, making for more compact and readable entries. When any of these "properties" are not listed in the current column format, they are instead retained in the ITEM column. • You can now also edit the ITEM column with e'. #### The agenda dispatcher • Instead of pressing 1' to restrict an agenda command to the current buffer, or 0' to restrict it to the current subtree or region, you can now also press <' once or twice, respectively. This frees up 1' and 0' for user commands, a request by Bastien. In fact, "<" cycles through different restriction states. "1" and "0" are still available for backward compatibility, until you bind them to custom commands. • The access code to custom agenda commands can now contain several characters, effectively allowing to bundle several similar commands into a sub-keymap. This follows an excellent proposal by Adam Spiers. For example: (setq org-agenda-custom-commands '(("h" . "HOME + Name tag searches") ; describe prefix "h" ("hl" tags "+HOME+Lisa") ("hp" tags "+HOME+Peter") ("hk" tags "+HOME+Kim"))) • The user function option in org-agenda-custom-commands may now also be a lambda expression, following a request by Adam Spiers. #### Sorting improvements We are using a new routine for sorting entries, courtesy of John Wiegley. Many thanks to John. • You can define your own function to extract a sorting key and in this way sort entries by anything you like. • Entries can now be sorted according to the value of a property. • Plain lists can be sorted. #### HTML <div> structure There is now a <div>-based structure in exported HTML. • The table of context is wrapped into a div with a class "table-of-contents". • The outline structure is embedded in <div> elements with classes "outline-1", "outline-2" etc. • The postamble, containing the author information and the date is wrapped into a div with class "postamble". I am not sure if the class names are the best choice, let me know if there are more "canonical" choices. Thanks to Mike Newman and Cezar for input, and in particular to Mike for his clearly formulated specification. #### Other stuff • New variable org-agenda-window-frame-fractions' to customize the size limits of the agenda window in the case that you display the agenda window by reorganizing the frame. • Drawers can be set on a per-file basis using #+DRAWERS: HIDDEN STATE PROPERTIES This will define the drawers :HIDDEN: and :STATE:. The :PROPERTY: drawer should always be part of this list, or your properties will not be folded away. Thanks to Richard G. Riley for this proposal. • org-agenda-fontify-priorities' may now also be an association list of priorities and faces, to specify the faces of priorities in the agenda individually. • The variable org-export-with-property-drawer' no longer exists, please use org-export-with-drawers' instead. Also, the corresponding switch in the #+OPTIONS line has changed from "p" to "d". Thanks to Bastien for pointing out that we needed to handle not only the property drawer. • M-up and M-down now move the current line up and down (if not at a headline, item or table). Among other things you can use this to re-order properties in the drawer. This was a proposal by Bastien. • New variable org-agenda-todo-ignore-with-date', based on a request by Wanrong Lin. • Aborting remember template selection with C-g now kills the remember buffer and restores the old window configuration. This was a request by Nuutti Kotivuori. ## Version 5.12 ### Overview • Remember templates can now have name. • C-c C-k' will abort taking a note (remember of log) • C-c C-x C-w' and C-c C-x M-w' now accept a prefix arg. • Lines in the agenda can be fontified according to priority. • New variable org-scheduled-past-days'. • New variables org-agenda-deadline-leaders' and org-agenda-scheduled-leaders'. • New sparse tree function org-sparse-tree'. • The variable org-ellipsis' now defaults to org-link'. • The #+OPTIONS line has a new option "tags". • New variable org-use-property-inheritance'. ### Incompatible Changes • C-c /' now calls org-sparse-tree'. ### Details • Remember templates can now have a template name as the first element. The name will be listed along with the selection character when prompting for a template. It is best to have the name start with the selection character, for example if you use ("Note" "n"), you will be prompted like "[n]ote". Thanks to Matiyam for this proposal. • C-c C-k' will abort taking a note. You can use this in remember buffers and when taking a logging note (e.g. for a state change). Thanks to Bastien. • C-c C-x C-w' and C-c C-x M-w' now accept a prefix arg to cut N sequential subtrees. This was a proposal by John. • Lines in the agenda are now bold if they have priority A and italic if they have priority C. You can turn this off using the variable org-agenda-fontify-priorities'. Thanks to John Wiegley for the idea and code. • New variable org-scheduled-past-days' to set the number a scheduled item will be listed after its date has passed. Default is 10000, i.e. indefinitely. • New variables org-agenda-deadline-leaders' and org-agenda-scheduled-leaders' to adjust the leading text o scheduled items and deadline in the agenda. Thanks to John Wiegley for a patch. • New sparse tree function org-sparse-tree'. This is now the default binding for C-c /'. It requires one additional keypress to select a command, but in return is provides a single interface to all the different sparse tree commands, with full completion support. • The variable org-ellipsis' now defaults to the face org-link' because the visibility of the dots is really bad and I have found this change very useful indeed. • The #+OPTIONS line has a new option "tags" which can be used to set org-export-with-tags'. Thanks to Wanrong Lin for this proposal. • New variable org-use-property-inheritance'. Configure it to t' if you want that searching for entries with certain properties always should assume inheritance. This is not well tested yet, please check it out. • Bug fixes ## Version 5.11 ### Overview • SUMMARY, DESCRIPTION, LOCATION properties for iCalendar • Clock entries can now have their own drawer • C-c C-x C-r' only updates a clocktable at point • New way to assign a remember template to a single key • C-n' and C-p' are back to their default binding • C-x C-s' in agenda buffer saves all org-mode buffers • Schedule/deadline leaves note in agenda buffer • Prefix argument for C-c C-d/s' will remove date • New variable to make block aranda more compact • Better tag alignment in agenda ### Incompatible changes • If you have customized org-drawers', you need to add "CLOCK" to the list of drawers. • The variable org-agenda-align-tags-to-column' has been renamed to org-agenda-tags-column'. The old name is still an alias, in Emacs 22 and in XEmacs, but not in Emacs 21. • The default value for both org-tags-column' and org-agenda-tags-column' is now -80. • The variable org-insert-labeled-timestamps-before-properties-drawer' is now obsolete. ### Details • The LOGGING property allows to modify the settings for progress logging for a single entry. For example: :PROPERTIES: :LOGGING: nologging nologrepeat :END: turns off all progress logging for the current entry and its children. • The properties SUMMARY, DESCRIPTION and LOCATION have special meaning during iCalendar export, when they translate to the corresponding VEVENT and VTODO fields. If not given, Org-ode continues to use cleaned-up version of the headline and body as the summary and the description, respectively. • New function to go to the entry with the currently running clock. Bound to C-c C-x C-j', in agenda also to "J". If you use this often, you might even want to assign a global key. Thanks to Bernt and Bastien. • Clock entries can now have their own drawer, the :CLOCK: drawer. Check out the variable org-clock-into-drawer' for configuration of this feature. The default is to create a drawer when the second clocking line gets added to an entry. Note that "CLOCK" has been added to the default value of org-drawers', but if you have configured that variable, you must go back and add "CLOCK" yourself to get this drawer folded away. Thanks to Tom Weissman for pointing out that too many clock entries are visually annoying. • C-c C-x C-r' no longer tries to find the first clocktable in a buffer and then updates it. Instead, it will update the clocktable at point if there is one (same as C-c C-c will do if the cursor is in the "#+BEGIN" line of the table). If there is none at point, a new one will be inserted. This change was necessary because the new :scope parameter allows to have several clocktables in a buffer. Thanks to Bastien for pointing this out. To update all dynamic blocks in a file, use C-u C-c C-x C-u'. • The function org-remember' can now be called with a template selection key as argument. This helps to make key bindings that go directly to a specific template without being prompted for a template, like this: (global-set-key [f5] (lambda () (interactive) (org-remember "j"))) Thanks to Richard G Riley for bringing this up. • C-n' and C-p' are back to their default binding (next/previous line) in the agenda buffer. Enough people, including recently Denis Bueno, have complained about this, and I agree it is not good to break habits like that. • C-x C-s' in an agenda buffer now saves all org-mode buffers (also s' does this). • Setting schedule or deadline dates from the agenda now produces a note in the agenda, similarly to what happens with S-left/right. • Using a prefix argument for C-c C-d' or C-c C-s' will remove the deadline or scheduling date from an item. Thanks to Wanrong Lin for this proposal. • New variable org-agenda-compact-blocks'. When set, the space between blocks in a block agenda is reduced as much as possible, to show more items on a single screen. • The variable org-agenda-tags-column' (renamed from org-agenda-align-tags-to-column') can now also be negative, to mean alignment to the left. The new default is -80, just like it is now for org-tags-column'. • Bug fixes ## Version 5.10 ### Overview • Category and the archive location can be properties. • The clocktable has a new :scope parameter. • CSV support when importing a table. • Better defaults when modifying a time stamp. • New way to specify the duration of an appointment. • More aggressive version of orgstruct-mode improved wrapping. • Modifications to priority cycling. • Modifications to computations in column view. • New command org-occur-in-agenda-files'. • Bug fixes. ### Details • Both the category and the archive location in a (sub)tree of the buffer can now be specified using a property, for example: * Tree with special properties :PROPERTIES: :CATEGORY: Examples :ARCHIVE: /some/special/file:: :END: This is a much cleaner way of dealing with multiple categories and archives in a single file. The preferred use of the #+CATEGORY and #+ARCHIVE lines is now to set a single default for the file which is then locally overruled by properties. This was a proposal from Bastien if I remember correctly. Multiple #+ lines still work and I don't plan to remove this support soon, but I encourage you to stop using them. • The clocktable has a new :scope parameter that determines the range in the file from which clock entries should be taken. This can be anything from the local subtree to the entire buffer to even the full list of agenda files. Legal values are: value scope nil the current buffer or narrowed region file the full current buffer subtree the subtree where the clocktable is located treeN the surrounding level N tree, for example tree3 tree the surrounding level 1 tree agenda all agenda files Thanks to Jason F. McBrayer and Bernt Hansen for inspiration. Thanks to cranreuch (what is you full name?) for mentioning, at the right moment, that the clocktable is not so bad - that remark made it seem worthwhile to add features. • The commands to import a table and to convert a region to a table can now handle comma-separated values (CSV). The algorithm does not yet treat quoting correctly, but for basic input it works. • When modifying an existing time stamp, or when entering the second stamp of a range, the date prompt will now consistently default to the date/time in the existing stamp. This was triggered by Nuutti Kotivuori's request. • At the date/time prompt, there is a new way to specify a range of hours, by using "+DURATION" after the time. For example: 14:00+2 means 14:00-16:00 2pm+2:30 means 14:00-16:30 Again, Nuutti Kotivuori's request. • When you use the function turn-on-orgstruct++' to turn on orgstruct-mode, the special org-mode settings for auto-filling, indentation and paragraphs are exported into the buffer, so that typing list items with indentation works better. This was Bastien's idea and request. • New variable org-priority-start-cycle-with-default'. When t (the default), priority cycling will initially set the default priority and then increase or decrease. When nil, the first priority set by cycling is already 1 different from the default priority. This was mostly driven by Bastien. • In column view: When an entry has a property for a summary column defined, its value is normally overwritten by the sum of all the children's values each time you enter column view. Now there is an exception to this rule: If none of the children has that particular property defined, the parent's value stays. In this way you can still place TODO items under such an entry without getting the property value changed. Thanks to Russel Adams for pointing out that this is a better way of doing things. • In column view, computed values are now bold face, and trying to edit them is an error. I think this works, but testing is appreciated. • New command org-occur-in-agenda-files', this is basically the quick command John Wiegley proposed the other day, but it also works when the agenda files are not yet in buffers. The key is C-c C-x /', any better proposals? • Links containing a space will now be handled correctly when calling the browser. Note that you need to enclose such links in square or angular brackets. • Bug fixes. ## Version 5.09 ### Overview • Taking a note upon TODO state changes can be restricted to selected states. • The format in which dates are shown in the daily/weekly agenda can be configured. • The default for org-remember-store-without-prompt' is now t. • org-goto' has been made into a general lookup command. • Priority cycling goes back to the nil state. • You can store a remember note to the last used location. • On Emacs 23, the headline faces for org-mode are now inherited from the outline faces. ### Incompatible Changes • The default for org-remember-store-without-prompt' is now t, in order to better match the original intent of remember.el (storing a note with minimum interruption of work flow). I expect that many people will be hit by this incompatible change - nevertheless I believe it is the right thing to do. ### Details • You can now select specific states for recording a note when switching to that state. With the setting #+SEQ_TODO: TODO(t) ORDERED(o@) INVOICE(i@) PAYED(p) | RECEIVED(r) #+STARTUP: lognotestate only the states ORDERED and INVOICE will record a timestamp and a note. • You can now set the format of the string for each day in the agenda and timeline buffers. You can use a format string interpreted by format-time-string', or you can write your own function. Configure the new variable org-agenda-format-date'. Thanks to Levin for triggering this development with a patch. • The default for org-remember-store-without-prompt' is now t, in order to better match the original intent of remember.el (storing a note with minimum interruption of work flow). Since we can assign files and headlines to templates, I guess this takes care of selecting a filing location in most cases. For interactive filing, you now need a prefix command when exiting remember'. • org-goto' (bound to C-c C-j') now uses an indirect buffer and has additional commands enabled: Org-occur with C-c /' or even faster with /', and the commands needed to select and copy a region. This make org-goto' a more general lookup command instead of only a jumping command. Remember that you can exit with Q' to go back to the original location. Thanks to William Henney for this idea. • Setting the priority with S-up/down now cycles back to a state where no priority is specified. This was requested by Rick Moynihan. • You can store a remember note to the last used location. So if you select a location interactively once, you can re-use it without having to find it again. For this, exit the remember buffer with C-u C-u C-c C-c'. The leading comment in the remember buffer will tell exactly where the note goes if you exit with a particular command. Thanks to Maxim Loginov for this idea. • On Emacs 23, the headline faces for org-mode are now inherited from the outline faces. This is just a convenience, so that you only have to configure one set of faces, and that will then be outline-1 .. outline-8. You will actually not see any difference in org-mode, because Stefan Monnier has made the outline faces in Emacs 23 to match the current org-mode faces. This change does not effect XEmacs, nor Emacs 21 and 22. ## Version 5.08 ### Incompatible changes • The default for org-deadline-warning-days' is now 14. ### Details • There is now a separate interface for fast and directly setting a TODO keyword. This interface kicks in when you have configured keys for TODO keywords like #+SEQ_TODO: TODO(t) WAITING(w) | DONE(d) CANCELED(c) C-c C-t still does the cycling thing, you need to use a prefix argument to get to the fast interface. Or configure the variable org-use-fast-todo-selection' to t, then this will be the default and the prefix argument will make the command fall back to cycling. The tag selection no longer does include TODO keywords - Leo's arguments have convinced me that this is not a good idea. If you'd like to see the TODO keywords in the tags interface anyway, set the variable org-fast-tag-selection-include-todo'. Thanks to Leo and others for input on this issue. • New variable org-edit-timestamp-down-means-later'. When set, S-down' on a timestamp will change the timestamp to later. Thanks to Raman for this idea. • Property names can now contain non-ascii word characters. This follows a request from Daniel Clemente. • For export, the date that should be given in the exported file can now be set to a specific value with a line like #+DATE: 15 November 2003 If you want to use the date/time when the file was created, use a format string that will be interpreted by format-time-string', for example: #+DATE: %Y/%m/%d %X • The default of org-deadline-warning-days' has changed to 14 days. 30 was really too much, I suspect most people (me included) have changed this. • When a deadline has an individual lead time, this lead time obviously overrules org-deadline-warning-days'. However, if you bind org-deadline-warning-days' to a number <=0, for example during a custom agenda command, then the absolute value of this number will be enforced also when a different lead time has been specified. This is useful to get a list of all deadlines coming up in the next N days. ## Version 5.07 ### Overview • Different faces for different TODO keywords. • Setting TODO states through the TAG setting interface. • Context information is stored when moving a tree to the archive. • Sorting can be done by priority. • Org-ellipsis' can now also be a face. • Scheduling info is no longer removed entry is marked CLOSED. • Unavailable files in org-agenda-files' can be skipped. ### Incompatible changes • The time of archiving is now stored as a property. ARCHIVED is no longer a special time keyword. • Scheduling info is no longer removed entry is marked CLOSED. ### Details • You can now define different faces for different TODO keywords. This request has come up frequently, so here it is: Use the variable org-todo-keyword-faces'. A Here is a configuration example: (setq org-todo-keyword-faces '(("TODO" . org-warning) ("CANCELED" . (:foreground "blue" :weight bold :underline t)))) Org mode continue still use org-todo' and org-done' for keywords that have no specific face assigned. • Some People use TODO states more like tags. For them the TODO keywords mark special states and they like to quickly switch between states in arbitrary sequence. The standard TODO interface is not perfect for this, because it assumes that the states are reached in sequence. However, the fast tag setting interface is in fact perfect for this. You can now "misuse" the TAG selection interface to also set TODO states. All you need to do is to assign keys to the TODO states, just like you also do for tags. #+SEQ_TODO: TODO(t) WAITING(w) | CANCELED(c) DONE(d) #+TAGS: @HOME(h) @OFFICE(o) @SHOP(s) Next time you try to set tags with C-c C-c, the todo states will be offered as well, and the corresponding key will switch the entry to that state. • New variable org-archive-save-context-info' governs if information that would be lost by moving a subtree to the archive file, should be stored as special properties. For example, (setq org-archive-save-context-info '(itags category)) will store the inherited tags and the category in properties ARCHIVE_ITAGS and ARCHIVE_CATEGORY, respectively. The default setting for this variable is to save everything that could be lost. This was a proposal by John Wiegley. • Sorting (C-c ^') can use the use the priority to sort. Use the "p" and "P" keys at the prompt. John Wiegley, again. • Org-ellipsis' can now also be a face to make the folding ellipsis more visible. This is based on a post by Tassilo Horn. Since org-ellipsis' only works in Org mode, you might want to use Tassilo Horn's hack directly in order to affect the folding ellipsis globally. • Scheduling info is no longer removed when an entry is marked CLOSED. This was a request by Brian van den Broek. Let me know if this breaks anything for you - then it will become an option. • New option org-agenda-skip-unavailable-files'. Currently, if a file does not exist, it will be removed from org-agenda-files' after a query. When this option is set, the file will simply be skipped. • Bug fixes. ## Version 5.06 ### Details • When exporting only a region and this region is a single (sub)tree (for example selected with C-c @'), the title for the exported document is taken to be the heading of the subtree. The sublevels become top-level entries in the export. Furthermore, if the head entry of the tree has or inherits an EXPORT_FILE_NAME property, that file name (with appropriately substituted extension) will be used for the exported tree. Thanks to Patrick Drechsler and Jost Burkart for these ideas. • org-special-ctrl-a/e has a third allowed value, reversed'. When it is set to this value, the first C-a or C-e command behaves normally, i.e. it goes to the true beginning or end of the line. Only when you press C-a or C-e immediately again, the the "special" position will be found. Additional presses of the same key jump between the two positions. I like this a lot better than the t' setting, because now the keys behave more predictable and still give easy access to the special locations. • New command to set or remove a tag from all headlines in a region. • When Org mode visits a file, it will initially hide all drawers. • The default of the variable org-cycle-global-at-bob' is now nil, meaning that TAB no longer does global visibility cycling at the beginning of the buffer. • Bug fixes, in particular the problems with scheduling and deadlines introduced in 5.05. Please check carefully if this works correctly again, and complain if not. ## Version 5.05 ### Overview • LaTeX export, finally, thanks to Bastien. • Extension mechanism for the hyperlink system. • Option to show only the next instance of repeating timestamp. • Store remember notes with only 2 keys: C-c C-c • Appointment reminders from Org mode. • Global values for selected properties. • Bug fixes. ### Details • Bastien's org-export-latex.el' is now part of the org-mode distribution. You can export an Org mode document to a LaTeX file with C-c C-e l'. For more options, see the manual, and the commentary in the Lisp file. Kudos to Bastien for contributing this frequently requested feature. I am sure this has been tough because of the many different ways I have been allowing LaTeX snippets and environments to be incorporated in lazy free-format ways. • Org mode has now an extension mechanism for the hyperlink system. This should clear the road for all those mairix and other ideas that have been floating around. Now it is on you to write and share new link types for Org mode. The interface for adding a new link type is described in the appendix of the manual, section A2. The unsolved problem is currently how to handle the new link types for export/publishing. • New global commands org-open-at-point-global' and org-insert-link-global'. You can bind these commands to global keys and use them to insert and follow Org mode-like links anywhere in Emacs. Thanks to Adam Spiers for this excellent idea. • Each deadline timestamp may now specify its own interval of lead-time display, given in days, weeks, months or years. The syntax is like this DEADLINE: <2007-08-13 Mon -5d> When combined with a repeater, the repeater has to come first: DEADLINE: <2007-08-13 Mon +2w -5d> You may now also customize the faces that are used in the agenda to indicate the distance of an approaching deadline. See the new option org-agenda-deadline-faces'. Thanks to Pavel Chalmoviansky and John Wiegley proposals in this direction. • New option org-agenda-repeating-timestamp-show-all'. When set to nil, repeating time stamps will only show up once in the agenda, either today or in the near future. Other matches will be ignored. Thanks to John Wiegley for this proposal. • New variable org-remember-store-without-prompt'. When set, exiting the remember buffer with C-c C-c will store the note without further prompts to the default location, and C-u C-c C-c' will get the prompts for file and location. So this variable reverses the prefix-argument functionality for storing remember notes. This follows a request from John Wiegley. • A new function org-agenda-to-appt' activates all appointments for the current day so that Emacs will display reminders. This uses appt.el. Thanks to Bastien for this function. • You can now set default values for properties that can be inherited by all entries in a buffer, or by all entries globally. Global properties are set in the variable org-global-properties', like this: (setq org-global-properties '(("NAME" "This is the value"))) Buffer-local values are set like this: When using org-entry-get to get the value of a property with the inherit' flag and the hierarchy above the entry does not contain this property, the buffer-local and global lists are checked as well. This is mostly useful (I think) to set the list of allowed values for a property. Thanks to Bernt Hansen and Bastien for these ideas. • Bug fixes. ## Version 5.04 ### Details • New variables org-export-author-info' and org-export-time-stamp-file' to turn off inclusion of author and time information into exported files. Thank to Patrick Drechsler for pointing out that this would be useful. • New variable to avoid moving DEADLINE and SCHEDULED info into the property drawer. The default is now to not move this stuff into the drawer. org-insert-labeled-timestamps-before-properties-drawer' • org-archive-mark-done' can be a string now, to select a specific keyword that should be used for archived entries. • New command "j" in agenda to jump to an arbitrary date. Thanks to Bernt Hansen for the patch. • Lots of minor fixes. ## Version 5.03 ### Incompatible Changes • The variable org-special-ctrl-a' has been renamed to org-special-ctrl-a/e'. The old one is still an alias (but not on Emacs 21 where variable aliases cannot be defined). ### Details • When the variable org-special-ctrl-a/e' is set, C-e in a headline first goes to the end of the headline ignoring the tags. A second C-e then goes to after the tags. • Typing and removing single characters in a headline now keeps the tags in the headline aligned. This could have a little impact on performance while deleting stuff - let me know if we need to make this customizable. • New option org-n-level-faces' can be used to set the number of different faces that are used for headlines. Default is all 8 faces Org mode defines for this purpose, level 9 uses again the level-1 face. However, you can use fewer, and then the level-1 face will be reused already for level N+1, etc. • Column View and hidestars now work together. • Bug fixes. ## Version 5.02 ### Overview • The interfaces for properties and column view are finished now and work well. • Properties can be summaries, i.e. the parent nodes can compute their value from the children's values. • Headlines finally require a space ofter the star(s). The conflict with bold text at the beginning of the line is no longer there. ### Incompatible Changes • Bad news. It looks like it is going to be really hard to make column view work on XEmacs and on Emacs 21. Emacs 22 is currently the only Emacs where this works. If you are using Emacs 21 or XEmacs, you can still use properties, but not column view. ### Details • Improvements for properties: • There are interactive commands to insert and delete properties. Read the manual chapter 7 for details. • You can define allowed values for a property. When these are defined, you can change the value of a property with S-left and S-right. And you may use completion when inserting the property. This goes a long way to prevent typos when entering properties. • Improvements for column view. • In column view, you may use the keys S-left/right (and also the keys n' and p') to switch from one allowed value to the next. • You can define summaries for columns. For example, parents can contain the sum of all children values of a property, or the parent node can have a check box property that is automatically checked when all children's boxes are checked. • There are interactive commands to add and remove columns, and to change the attributes of a column like the summary type. These additions lead to the exciting fact that the example from omni outliner posted by Scott Jaderholm can now be accurately reproduced by Org mode. • The space after the stars is now required in a headline, in order to remove the conflict with bold words at the beginning of a line. So * This is a level 1 headline *this is bold text* • S-up and S-down to navigate plain item lists are now also available in orgstruct-mode. ## Version 5.01 ### Overview • A new minor mode, orgstruct-mode, exports the Org mode structure editing commands into any other mode. • DRAWERS are a new level off folding for special sections that should stay closed during visibility cycling and only open if explicitly asked. • Entries can now have PROPERTIES. • A COLUMN VIEW implementation allows to easily view and edit the properties of a hierarchy of entries (Emacs only, for now). • Formula evaluation in the spreadsheet is more consistent now. Properties and per-file constants can be used during evaluation. • Bug fixes and minor changes. ### Incompatible changes • When using LEVEL=N in a tags search, things have changed if you are also using org-odd-levels-only'. If you are using only odd levels (i.e. 1 or 3 or 5… stars), LEVEL=2 will now refer to 3 stars, LEVEL=3 to 5 stars etc. Many thanks to Leo (or blame on him if you must) who has convinced me that this is the better convention. ### Details #### Orgstruct minor mode There is a new minor mode, orgstruct-mode. This modes works in a similar way as Orgtbl-mode. It can be used to export the Org mode structure-editing commands into arbitrary major modes in Emacs. For example, you can use it in Mail-mode to easily create lists. The functionality in Orgstruct mode is only active, if the cursor is in a line that looks either like a headline, or like the first line of a plain list item. Then the commands TAB', M-cursor', M-S-cursor', M-RET', M-S-RET', C-c ^', C-c C-c', and C-c -' will do structure-related editing just like in Org mode. If the cursor is not in such a line, all these keys will do whatever the major mode or other active minor modes have assigned to them. Orgstruct-mode is the result of a proposal by Raman, quite some time ago. It has taken a long time, but here is finally the promised implementation. #### Drawers The new concept of drawers allows to create sections that remain folded during visibility cycling. Drawers need to be configured using the variable org-drawers'. A drawer starts with a line containing only the name of the drawer bracketed by colons. It ends with :END:. For example, after setting (setq org-drawers '("PROPERTIES" "HIDDEN")) you can then create drawers like this: :HIDDEN: here is some stuff that remains hidden unless TAB is pressed directly in that line :END: The PROPERTIES drawer has special meaning for ORG-mode, it contains properties of an entry (see below). #### Properties and Column View • Entries in Org mode can now have arbitrary properties associated with them. Org mode handles some default properties like the TODO state, the priority, the local tags, and planning information like DEADLINE and SCHEDULED. In addition, you can assign arbitrary properties by creating a property drawer and inserting a line like :PROPNAME: This is the value of the property Org mode has an API for properties, if you want to write a program using properties, use the functions org-entry-properties', org-entry-get', org-entry-put', and org-entry-delete'. • Planning information like DEADLINE can be hidden in the properties drawer. If the PROPERTIES drawer starts in the first line after a headline, also the DEADLINE, SCHEDULED and CLOCK information will be inserted inside the drawer. If no PROPERTIES drawer is present, or if it does not start in the line right after the headline, this information remains in the lines directly after the headline, outside the drawer. • TAGS searches can now also query properties. For example, the search LEVEL=3+BOSS+ASSIGNED="Hans"/WAITING will find entries that • are level 3 • have the tag BOSS • have an ASSIGNED property with the value "Hans" • are TODO status WAITING. So here is an entry that will match: *** WAITING Clean up the factory :BOSS: :PROPERTIES: :ASSIGNED: Hans :END: You may also use a regular expression to match against a property value. For example, to find stuff assigned to Hans or Sarah, use ASSIGNED={^$$Hans\|Sarah$$$} • Column View is a special way to look at property values in tabular form. Column View can be used in any org-mode file, and also in any agenda buffer. It works by placing an overlay over each headline (or agenda line) that shows a table of selected properties. You can look at and edit properties from this view. Which properties are shown in the table must be set up using the COLUMNS property. You can set up different property columns on different levels of an outline tree. For example: * People :PROPERTIES: :COLUMNS: %25ITEM %Name :END: ** Family :PROPERTIES: :COLUMNS: %25ITEM %Name %3Age :END: *** Sam Info about Sam, including a property list with Name and Age. *** Sarah Info about Sarah, including a property list with Name and Age. ** Office :PROPERTIES: :COLUMNS: %25ITEM %Name %Function %Salary :END: *** Boss Info about the Boss, including a property list with Name, Function and Salary (if only we knew....). Now we have defined three different sets of columns. If you switch to column view in the Family section, you will get a different table than if you do it in the Office section. However, if you switch to column view with the cursor on the People section, the table will cover all entries, but contain only the Name. Column view does, for the time being, only work on Emacs. The XEmacs implementation needs a bit of work. • Properties can be used in table formulas. To access the value of the property :XYZ:, use$PROP_XYZ. The property needs to be defined in the hierarchy above the table, not necessarily in the same entry as the table. This was a request by Eddward. File-wide constants can be defined with #+CONSTANTS, see below. • Things that still need to be sorted out about drawers, properties and column view - comments and suggestions welcome! • How to deal with drawers and properties in HTML and ASCII export? • What key could be used to insert an empty property drawer into an entry? • Right now column view is invoked through the command C-c C-x C-c. It is too easy to type C-x C-c by mistake, and that causes Emacs to quit. Suggestions for a different key? • Fontification of drawers and properties is not good yet. Any suggestions for better defaults? • Mouse support for editing properties in column view would be nice - maybe Piotr is interested to add this to org-mouse.el? • In the spreadsheet, the evaluation of formulas has changed. Previously, first the column formulas would be evaluated everywhere, and then the field formulas would kick in, and in some cases overwrite the results of column formulas in the appropriate fields. This had the side effect that some formulas might always use the wrong, intermediate content of a field that is computed both by a column and a field formula. From now on, column formulas will no longer temporarily overwrite field formulas. This gives much more consistent results. For example you can now finally have a column of increasing numbers by setting the first field to a fixed number, and let the rest follow from a column formula. Here is an example | 1 | | 2 | | 3 | #+TBLFM: $1=@-1+1::@1$1=1 • Constants for formulas in spreadsheets are globally defined with the variable org-table-formula-constants'. File-local constants can now be set with a line like: #+CONSTANTS: c=299792458. pi=3.14 eps=2.4e-6 #### Minor changes • When entries are archived, a timestamp for the moment of archiving is added to the line with planning information. It looks like this: ARCHIVED: [2007-07-02 Mon 11:34] Thanks to J. David Boyd for constructive comments. • Bug fixes Many bugs are fixed, as usually all the ones where I replied "fixed" on emacs-orgmode. If you reported one of these bugs, please check if it really has disappeared in the new version, and complain if not. Thanks! ## Version 4.79 ### Details • We are back to a single file org.el that works both on Emacs and on XEmacs. Merging comes at a speed penalty for you as an XEmacs user, but only if you do not compile org.el. Compilation completely removes the penalty. • New L flag for literal interpolation in Lisp formulas. See manual section 3.5.3. • New options for turning off footnotes. This was a request from Ignotus. See the option org-export-with-footnotes'. • Default length for Agenda entries, but this is off by default. This was a request from Micheal. See the option org-agenda-default-appointment-duration'. • Bug fixes: • org-agenda-date-later (Juraj Kubelka) • letters off margin in orgcard.ps (Charles Cave) • TODO export problems on XEmacs ([email protected]) • args-out-of-range with table formulas (Cecil Westerhof) • problem with org-file without a heading (Tim O'Callaghan) ## Version 4.78 ### Overview • Time stamps with a time range included, like <2007-06-18 Mon 17:33-18:23> • Clock times without clocking in/out: CLOCK: => 2:00 • Language-specific characters allowed in TAGS (Emacs only). • Promotion and demotion of items gets the indentation right. • Indenting lines with TAB is more intelligent. ### Incompatible changes • There is now a special version of org.el' for XEmacs. Before installation, as an XEmacs user you must rename the file org_xemacs.el to org.el, i.e. you must overwrite org.el with the xemacs version. For example: mv org_xemacs.el org.el This is necessary so that I can make use of some features that would be cumbersome to support in a single file. The XEmacs version is derived from the Emacs version with a program, so no reason to fear that I might be dropping XEmacs support any time soon. Sorry for the trouble. ### Details • A time stamp may now contain a range of times. So you no longer need to use two separate stamps to indicate a time interval on a single day. For example <2007-06-18 Mon 17:30-18:20> This is now fully supported, including changing the time with S-up/down while the cursor is on the end time. Also, da the date/time prompt, you can simply write your time like 12:00-14:00 and the range will be inserted. This was proposed by Leo some time ago, and recently by Michael. • You may specify clocking times by hand (i.e. without clocking in and out) using this syntax. CLOCK: => 2:00 Thanks to Scott Jaderholm for this proposal. • TAGS may now contain language-specific word characters, as long as they are matched by the "[:alnum:]" regexp syntax. This is for Emacs only, the XEmacs version continues to use the character class "a-zA-Z0-9_@" for tag names. Thanks to David Smith for a patch to this effect (a modified version of that patch was applied). I am considering to make the same change for TODO keywords, but not yet. Note that files using localization features may not work correctly in the Emacs configuration of another user, so if you are sharing org-mode files with other users, it might still be best to stick to the ASCII characters. • Promotion and demotion of plain list items (commands M-left, M-right) no longer changes the indentation by just one space. Instead, it uses intelligence gathered from the surrounding list structure to do the right thing. Thanks to William Henney for starting the discussion about this. • TAB does now a better job of indenting lines. • After tables and code segments (lines starting with ":"), indentation goes back to what it was before (thanks to William Henney for suggesting this behavior). • When plain lists items are involved, we had a long discussion on emacs-orgmode where I tried to show that a too-sophisticated implementation will still be easily fooled. Here is what I have implemented now - lets see if we can agree on this: Indentation will flatten lists with the same bullet type, but indent another bullet type further. The time when this fails is in a nested list, when you want to get back out to a previous level. For example - item 1 - item 2 + item 2a + item 2b - item 3 When using TAB on every line in this list, the structure will change to - item 1 - item 2 + item 2a + item 2b - item 3 So you need to change the level of the last line by hand, using promotion and demotion functions. ## Version 4.77 ### Overview • Vertical lines in exported tables. ### Incompatible changes • The default for org-show-following-heading' is now nil. ### Details • You can now specify column groups in tables, to the effect that the groups will be separated by vertical lines in HTML and ASCII output. Column groups are specified by the characters "<" and ">" in a special table row. "<" starts a group, ">" ends a group (in each case including the the column where the character is specified). You may also use "<>" to make a group a single column wide. For example: | | N | N^2 | N^3 | N^4 | sqrt(n) | sqrt[4](N) | |---+----+-----+-----+-----+---------+------------| | / | <> | < | | > | < | > | | # | 1 | 1 | 1 | 1 | 1 | 1 | | # | 2 | 4 | 8 | 16 | 1.4142 | 1.1892 | | # | 3 | 9 | 27 | 81 | 1.7321 | 1.3161 | #+TBLFM: $3=$2^2::$4=$2^3::$5=$2^4::$6=sqrt($2)::$7=sqrt(sqrt(($2)) A table row with with nothing but "/" in the first field is never exported, but can be used to place column group information into the table. In this table, we create a group for column 2, one for columns 3-5 and one for columns 6-7. HTML export will render a vertical line between these groups. Because HTML does not require closing <colgroup> tags with </colgroup>), you can also simply start a new column wherever you want a vertical line: | N | N^2 | N^3 | N^4 | sqrt(n) | sqrt[4](N0 | |---+-----+-----+-----+---------+------------| | / | < | < | | < | | • Vertical lines are now also omitted in ASCII export, unless grouping explicitly requests these lines. • The default for org-show-following-heading' is now nil, meaning that sparse trees will be more compact. This has become possible due to in important remark by Jason Dunsmore who pointed out that TAB should behave differently in the inconsistent trees produced by the sparse tree commands. TAB does now make sure that the heading after a freshly unfolded tree is made visible at all, removing the confusing behavior we had before. • Several bugs fixed. In particular: • Strings produced by agenda batch processing with org-batch-agenda' and org-batch-agenda-csv' are now properly encoded, so that you should be able to use special characters in other languages as along as your post-processing program handles them correctly. At least for Emacs this should work now, but have not yet figured out how to do this in XEmacs. ## Version 4.76 ### Overview • Exporting Footnotes to HTML ### Details • Footnotes like here[1] are now exported to HTML [1]This is a footnote Thanks to Scott Jaderholm for this proposal and a detailed HTML example on how the exported text should look like. • Special version of the reference card, for letter paper. • Switching to OVERVIEW with S-TAB no loner moves the cursor, so after three S-TAB' commands, you will be back where you started. • Bug fixes, lots of them again. ## Version 4.75 ### Overview • Cyclic time stamps that repeat after an interval. • Special timestamps for appointments like "every 2nd Thursday in a month". • Completion of link abbreviation prefixes inside C-c C-l'. • Replacing a region of org-mode syntax with HTML. • iCalendar export now honors ARCHIVE etc. • New command to add/change emphasis markers. ### Incompatible Changes • The REPEAT(…) cookie is no longer supported, the repeater interval now goes directly into the time stamp. ### Details • Time stamps can contain a repeater code, like +1w for once every week, +2d for every two days, etc. For example, will apply to every Wednesday, starting from the date given. I believe this syntax was actually suggested by someone on the mailing list, but I cannot find the email back. To collect your credit, let me know! • You can use an sexp diary entry (with the syntax used by the Emacs calendar/diary) in a time stamp, like this: *** The nerd club meets on 2nd Thursday of every month <%%(diary-float t 4 2)> • You can put diary-style sexp entries directly into an org-mode file, where they will be interpreted just like they would in the diary. For example * Birthdays and similar stuff #+CATEGORY: Holiday %%(org-calendar-holiday) ; special function for holiday names #+CATEGORY: Ann %%(diary-anniversary 14 5 1956) Artur Dent %d is years old %%(diary-anniversary 2 10 1869) Mahatma Gandhi These entries must start at column 0 to be evaluated. It turns out that evaluating the entries in an org-mode file is actually faster than in the diary itself, because using the diary has some overhead (creating fancy diary display, then reading and re-interpreting the entries). I have moved all the sexp entries from my diary into an org-mode file, put in a few categories, and then turned off org-agenda-include-diary'. This has led to a noticeably faster agenda display. • New command org-replace-region-by-html' that converts the current region from org-mode syntax into HTML. For example, you might write an itemized list in plain text in an HTML buffer, and then invoke this command to convert it. Thanks to Raman for this idea. • When inserting a link with C-c C-l', completion will now fill in all valid link prefixes, like http or ftp, but also link abbreviation prefixes. This is based on an idea by Bastien. • Highest, lowest, and default priority can be set on a per-file basis with #+PRIORITIES: H L D For example, to use priorities from 1 to 9, you could use #+PRIORITIES: 1 9 9 Thanks to Dmitri Minaev for a patch to this effect. • iCalendar export now honors (i.e. skips) subtrees marked as ARCHIVE, COMMENT, or QUOTE. • There is a new command to add or change the emphasis (like bold or italic) of a piece of text. For lack of better available keys the command is at C-c C-x C-f', but you may well want to choose a more convenient key like C-c f' in your private setup: (add-hook 'org-load-hook (lambda () (define-key org-mode-map "\C-cf" 'org-emphasize))) The command will prompt for an emphasis type, and you may reply either with the marker that triggers the emphasis, or with the first letter of the corresponding HTML tag. For example, to select italic, press either "/" or "i". If there is an active region, the emphasis of this region will be set or changed. If there is no region, only the emphasis markers will be inserted and the cursor positioned between them. Thanks to Bastien for proposing this feature. • Bug fixes, everything where I have replied "fixed" on the mailing list. Thanks to all of you for keeping these reports coming. ## Version 4.74 ### Overview This release is about exporting agenda views, to HTML, to postscript for printing, and to a special format (CSV) for further processing in scripts. ### Incompatible Changes • The variable org-agenda-remove-tags-when-in-prefix' has been renamed to org-agenda-remove-tags'. ### Details • Agenda views can be exported as plain text, as HTML, and as Postscript(R). This can simply be done from the agenda buffer with C-x C-w' and then specifying a filename like myagenda.html' or myagenda.ps'. See section 8.6.4 of the manual. • Each custom agenda view can specify a list of associated files names. The command C-c a e' then creates all views that have associated file names and exports the views to these files. This is great for producing paper versions of your views, to take with you when you don't have your computer. The manual has an example on how to do this, and in particular on how to customize the format of the printed version. See section 8.6.4 of the manual. • You can produce a CSV format of agenda information with an Emacs batch command. This is greate for further processing in scipts. Thanks to Jason F. McBrayer for this idea. See section 8.6.5 of the manual. • New variable org-agenda-skip-deadline-if-done'. When set, a deadline associated with a DONE item will not be shown in the agenda. This is based upon a report by Denis Bueno. • Quite a few bug fixes. Minor bug fixes. ## Version 4.72 ### Overview • Control over blank lines between trees in collapsed view. • Info about the running clock is shown in the modeline. • C-a can behave specially in headlines. • Better color and scaling defaults for LaTeX fragments. • Customizable list of keys in org-mode to be replaced. • Stuck project descriptions have been extended. • Emphasis code has been modified to fix some issues. • Bug fixes. ### Incompatible changes • The option org-format-latex-options' has changed. If you have customized it, please revert to default and then redo your customization. • org-CUA-compatible' no longer modifies S-RET by default, because newer versions of CUA don't use this key anymore. If you need this replacement, customize the variable org-disputed-keys'. • The variable org-CUA-compatible' is obsolete, please use org-replace-disputed-keys' instead. org-CUA-compatible' is still an alias for this new variable, though. ### Details • Better control over blank lines between trees in collapsed view. This has come up several times in the past and most recently by Scott Jaderholm. There is now a new variable org-cycle-separator-lines' with default value 2. It says how many empty lines there need to be after the end of a subtree to get an empty line in collapsed view. So with the default, if you leave only one empty line it will disappear in collapsed view. If you leave two, one empty line will remain so that you can use double empty lines to structure the collapsed views of a file. I love it, so many thanks to Scott fro bringing this up again. One property of the new setup is that you will never get more than one blank line in collapsed view. We could do something special to allow several empty lines in collapsed view, but I think this is counter-productive. In Emacs 22, if you want to make full use of this, make sure that you have not set outline-blank-line'. • When the clock is running, Org mode will put info about it into the modeline. The info consists of the elapsed time and the heading of the clocked item. This was a proposal from Bastien who got the idea from Muse. • C-a can behave specially in headlines when you set the variable org-special-ctrl-a'. It will bring the cursor first back only to the beginning of the headline text, i.e. after the stars and the TODO keyword, if any. A second C-a will then move the cursor to the beginning of the line. If the cursor is already at the beginning of the line, C-a will spring forward to the headline text. This was a proposal from Leo, based on a request from Scott Jaderholm. I have not turned this turned this on by default, should I? • When LaTeX fragments are processed into images, there is now more control and (hopefully) betters defaults for colors and scaling. Special values can be set for HTML export, so that these values can differ from what is used for display in an emacs buffer. The default foreground and background colors for images embedded in emacs are now taken from the default emacs face. Thanks to Xiao-Yong Jin for proposing these changes. • There is now a much better mechanism to change some keys in org-mode if these keys clash with other modes you use. Turn this on by setting org-replace-disputed-keys' (aliased to org-CUA-compatible'). The list of keys to replace is now fully customizable, see the option org-disputed-keys'. Many thanks to Meciej Katafiasz for a patch implementing this. • Stuck project descriptions have been extended. You can now use "*" as a TODO keyword or tag to say that any TODO keyword or TAG marks a project as non-stuck. You also can give an arbitrary regular expression that, if it matches, indicates a non-stuck project. • The code for emphasis like bold, italic etc has been modified - I might have broken something in the process, please let me know if you find problems. • A number of bugs have been fixed - those where I have replied "Fixed" on the mailing list. ## Version 4.71 ### Details • New variables to customize the header and data tags in exported HTML. These are the variables org-export-table-header-tags' and org-export-table-data-tags'. This follows a request from Scott Otterson. • New option org-format-latex-header' for customizing the header of the LaTeX file used to convert embedded LaTeX to images. Thanks to Matthieu Lemerre' for the suggestion. • The prefix version of org-todo-list' works again. This means that C-1 C-c a t' produces the list of TODO entries for the first TODO keyword. If you use different TODO setups in different agenda files, be careful: This number now refers to the list of all todo keywords used in files that are scanned for the agenda. • Many bug fixes. ## Version 4.70 ### Overview • Dust settles after revamp of TODO keyword system. • The export title can be taken from the first text line. • TTY replacement keys have changed. ### Incompatible changes • Some TTY replacement keys are changed, see below. ### Details • Further development concerning TODO keywords. • You can now have several DONE states in a sequence, like #+SEQ_TODO: TODO VERIFY | DONE DELEGATED The difference to the proposal discussed on the mailing list (and which is also works!) #+SEQ_TODO: TODO VERIFY | DONE #+SEQ_TODO: | CANCELED is that in the first case, the extra DONE states will be reached with C-c C-t' (or with t' from the agenda), while in the second case you need S-<right> to get to the special states. I guess both ideas can be useful - I am leaning toward using the latter. • Setting up TODO keywords in Lisp previously used two separate variables: org-todo-keywords' and org-todo-interpretation'. The preferred way is now to use only org-todo-keywords', with a new structure: (setq org-todo-keywords '((sequence "TODO" "|" "DONE") (sequence "BUG" "KNOWNCAUSE" "|" "FIXED" "IGNORED") (type "Fred" "Lisa" "Peter" "|" "DONE") (sequence "CANCELED") ; for things we decide to not do. )) If your setting has this new structure, org-todo-interpretation' will be ignored. This change does not break backward compatibility. The old way of using a flat list in org-todo-keywords' and taking the interpretation from the other variable still works. • When listing specific TODO entries via a sparse tree (C-u C-c C-v') or via the agenda (C-c a T' or C-u C-c a t'), you can now specify several keywords to be selected, like "TODO|VERIFY|WAITING". This also works for custom agenda commands. Thanks to Jason F. McBrayer for pointing out this omission. • If you have configured Org mode to export also the text before the first headline (this is done by setting the variable org-export-skip-text-before-1st-heading' to nil), then the first normal text line in the buffer becomes the title of the exported document. A title set with #+TITLE overules this default, and the first line then belongs to the normal text. Thanks to David House for this proposal. • TTY replacement keys. Some of the key bindings used by Org mode do not work on a tty, so replacement key sequences are provided on ttys. In version 4.70, there are some changes in the tty replacements. Thanks to Jason F. McBrayer for coming up with the idea to use C-c <cursor> keys. Command   Old TTY New TTY org-….. Main Key Replacement Replacement shiftleft S-left C-c C-x left C-c left shiftright S-right C-c C-x right C-c right shiftup S-up C-c C-x up C-c up shiftdown S-down C-c C-x down C-c down shiftcontrolleft C-S-left   C-c C-x left shiftcontrolright C-s-right   C-c C-x right ## Version 4.69 ### Overview This time the changes affect the following areas: • TODO keywords: Multiple sequences in a single file. • Export: More control over text before the first heading. • Export: More control over sub/superscript interpretation. • Plain lists: Option to let empty lines terminate lists. • Tables: New command to insert hline and move into line below. • REPEATing items: Turn of note taking. • Bug fixes. ### Incompatible changes • It used to be possible to spread the list of TODO keywords over several lines, like #+SEQ_TODO: TODO #+SEQ_TODO: PROGRESS #+SEQ_TODO: DONE This is no longer possible. Each such line now specifies an independent set of TODO keywords, with its own DONE state. See below for details. • The #+TEXT construct has been used to insert unchanged HTML into an exported file. This is no longer possible, the TEXT lines will be processed like any other lines. However, there are now much better ways of getting quoted HTML into the exported file. ### Details • You can now use multiple sets of TODO keywords in the same buffer. For example, you may put the following three lines into a file: #+SEQ_TODO: TODO DONE #+SEQ_TODO: REPORT BUG KNOWNCAUSE RESOLVED #+TYP_TODO: Fred Laura Peter Me OK Each sub-sequence has its own DONE state. It is best to use different keywords in all sequences, to make sure Org mode does not loose track in which specific sequence it is working. You could use the same word for all DONE states, but then cycling through to a TODO state might not bring you where you want to be. After initially setting a keyword, C-c C-t' cycles through a sublist, i.e. is cycles from TODO to DONE or from KNOWNCAUSE to RESOLVED and further to (nothing) and back to REPORT. S-right and S-left allow to select any keyword, so they move from DONE to REPORT and from RESOLVED to Fred. C-S-right and C-S-left jump from one sub-sequence to the next, for example from TODO or DONE to REPORT to Fred. Thanks to Rick Moynihan for triggering this development. • Text before the first headline can now be exported if you configure Org mode accordingly. Either set the variable org-export-skip-text-before-1st-heading' to nil, or use the new in-buffer option #+OPTION: skip:nil • Export content specified via the #+TEXT construct is now fully processed, i.e. links, emphasis etc. are all interpreted. #+TEXT lines may include #+BEGIN_HTML… #+END_HTML sections to embed literal HTML. • During HTML export, you can request to have ab interpreted as a subscript, but to leave a_b as it is. This can be done by setting the variable org-export-sub-superscript to the symbol {}' with (setq org-export-sub-superscript '{}) or by using #+OPTIONS: ^:{} Thanks to Eddward DeVilla for this idea. • New variable org-empty-line-terminates-plain-lists'. Default is nil, meaning that empty lines are part of the previous list item, and that you can have several paragraphs in one such item. Set this to t if you want an empty line terminate all levels of plain list items. Thanks to Mike Newman for triggering this development. • C-c RET does insert a horizontal separator line and move the cursor into the table line below it. Thanks to Bastien for this proposal. • Org mode always offers you to record a note when a TODO item automatically repeats, even if you are not logging state changes. The new variable org-log-repeat' allows to turn this off, so that notes are really only been taken if you are logging all state changes. • Various Bug fixes, thanks to everyone who reported. ## Version 4.68 ### Overview • Priority handling in the tags view • Date/time prompt follows the popup calender, and accepts AM/PM times. • Standard references like B4 in the spreadsheet. • Improvements to the formula editor. • C-j does better indentation. • Bug fixes ### Details • Priority handling in the tags view • Agenda lists selected by tag are now sorted by priority. Thanks to Andrew Korty for reporting this omission. • Improvements to the date/time prompt. • When you move (using S-cursor keys) the cursor in the pop-up calendar window while responding to a date/time prompt, the prompt is updated with the new default date (Emacs only). • You can now enter AM/PM times at this prompt. • You can now also write B4 instead of @4$2 as a reference in formulas. The column references without specified row can be written as C& instead of$3. Such references make formulas easier to read and are now the default way how references are shown when you edit existing formulas. To get the old behavior back (i.e. only @row$col references), set the variable org-table-use-standard-references' to nil. Relative references like @-3$-2 or @II..III continue to use the internal format. • Changes in the formula editor (the one you get with "C-c '") • The formulas are organized in a more logical way. • There is now a menu with commands. • When starting the formula editor with "C-c '", the cursor immediately moves to the formula for the current field. • With the cursor on a reference in the formula, you can use S-cursor keys to change the field being referenced. • C-j indents the following line correctly whe used in a headline or in aplain list item. Thanks to Leo for this suggestion. • Bug fixes • Flyspell now knows about special org-mode commands. Thanks to Vinod Valsalam for reporting this problem, and to Andrew Korty for showing how to fix it. • Most other bugs discussed recently on [email protected] should be fixed, except the problem with non-ASCII characters in tags…. ## Version 4.67 • Expert mode for fast tag selection. When org-fast-tag-selection-single-key is expert', not even the selection window is shown, only the prompt. One more C-c gets you the window, another one goes to multiple selection mode. • Synchronized with Emacs once more: Emacs CVS has now org-mode 4.67. At least until it causes a problem, then the Emacs people will switch back to 4.56. Lets hope there will be no problem. • Code cleanup • Bug fixes ## Version 4.66 ### Overview • Sorting of top-level entries works now if the region contains top-level entries, or if the cursor is before the first headline. Thanks to "redblue" for reporting this bug. • When entering date and time at the prompt, you can now mix entering text and selecting something in the calendar. For example, enter 22:15 at the prompt without pressing RET, and then click on a date in the calendar. Both pieces of information will be included in the resulting time stamp. You can also use S-curser to move the cursor in the calendar to the desired date and then enter 22:15 and press RET at the prompt. • When setting a deadline or a schedule, entering a time now automatically selects the time stamp format that includes the time. Bug report (by means of a question) from Bastre. • C-c C-l can be used to convert a plain link into a bracket link. • Internal links now match inside (the visible part of) other links. Thanks to Scott Otterson for reporting this bug. • iCalendar export of TODO items fixed, see also the variable org-icalendar-include-todo'. Thanks to Philipp Raschdorf. • The number of levels in the table of contents of an exported document can now be set independently of the number of headline levels. For example: #+OPTIONS: H:4 toc:2 • The command C-c }' toggles the display of row and column numbers the the current table, to aid constructing formulas. To try it, move the cursor to a table and press C-c }', or use the menu entry. • Orgtbl translation functions (introduced in 4.65) have been simplified using a generic function orgtbl-to-generic' that can be used for very general languanges. Writing your own translator should be very easy now. More info in the manual. • CONTENTS visibility can be limited to a certain level. The command C-3 S-TAB' will switch to CONTENTS view and show the first 3 levels. • Bug fixes. ## Version 4.65 ### Overview • Orgtbl can be used to maintain tables in LaTeX, and in any other mode • Editing Lisp formulas for tables improved. • Better structure for HTML exported tables. • New "calculation" marker "/" to mark lines that should not be exported. ### Detailed description of changes • You can use orgtbl mode to maintain a LaTeX table, or pretty much any table in any mode. This does not work by making Orgtbl aware of LaTeX syntax. That would be a box of Pandora I am not willing to open. Instead, you use a normal Orgtbl-mode table, and a converter program to automatically place a LaTeX version of the table into the correct spot in the LaTeX file. The orgtbl-mode table can be maintained inside the same file, in a block comment. I am providing translators for LaTeX, HTML, and TeXInfo. For other applications, you need to write one yourself - but that is not hard if you start from the LaTeX version and just modify it. Thanks to Thomas Baumann for triggering this development through a request for a table-to-LaTeX converter. • In the special buffer to edit the formulas of a table (created with "C-c '"), there is now better support for editing Lisp formulas. TAB and M-TAB work like in an Emacs Lisp buffer, indenting lines and completing lisp symbols. With the cursor on a line defining a complex Lisp formula, a first press on TAB will convert the formula into a pretty-printed version with proper linebreaks and indentation. A second TAB folds the line back to the compact form. • Tables in HTML export have now additional structure elements defined. The header (before the first hline) is wrapped into <thead>..</thead>, and each part of the body (as separated in org-mode by hlines) is wrapped into <tbody>..</tbody> tags. I have also changed the CSS style for <td> fields and the value of org-export-html-table-tag' to get cleaner tables. Basically, tables now have horizontal lines only where needed, and no vertical lines at all, as generally recommended for tables in printed text. I like the new look, but I am not sure if this change will find general approval, please throw in your view if you like. Thanks to Scott for driving this, and to goud-H for pointing me to the row grouping in tables. • In a table with calculation markers in the first column, you can now also put "/" into the first column. It indicates that this line should not be exported. The foremost application for this are lines containing only "<N>" markers for narrowing columns. ## Version 4.64 ### Overview • Email links get better, configurable descriptions • When inserting a link, selected text becomes the description • Horizontal lines in HTML export. • Remember templates and storing of notes improved. ### Detailed description of changes • The descriptive part of links to email messages can be configured using the variable org-email-link-description-format'. The new default is "Email %c: %.30s" and leads to Email from NAME: SUBJECT If you configure the variable org-from-is-user-regexp' correctly, then for email you sent this will actually change to Email to NAME: SUBJECT The subject is limited to 30 characters. If you have become attached to the previous default (look twice, the new one is better), use "%f on: %s" as your format. • Selecting text before entering a new link with C-c C-l now really works, the selected text becomes the description part of the link. Requested by Scott, buggy 4.62 implementation is now fixed. • Stored links are part of the history list for C-c C-l, so to reach them, you can use up/down rather than completion. Thanks to Raman for this excellent idea. • A line consisting only of "-", and at least 5 of them, is exported into HTML as <hr/>, as proposed by Giovanni Ridolfi. • Several changes to org <-> remember integration • You can use org-remember' as your default command to start remember. It will automatically detect if there is an active region and use it as initial content (we will probably make remember.el work like this as well). Also, when calling org-remember' in a remember buffer that was created with a template, you will again be asked to select a template. The buffer is then re-created with the new template, but the old context information. This is useful if you change your mind about the template to use (Leo's idea). • Besides specifying a default target file for a note, you can also give a default heading of which the note should become a subitem. In many cases this avoids or speeds up navigating to the right location. Both file and heading can be different for each template. Both are non-binding, you can change them while storing the note. However, when you exit remember with C-u C-c C-c, these defaults will be used without interaction. • Templates can specify interactive fields. During expansion of the template, you will be prompted for the information in that field. For example %^t will pop up a calendar and ask you to select a date. This new feature follows a proposal from Leo, who in the mean time has said he does not need it anymore. But I liked it, so here it is :-) • Templates can access information specific to the link type created, for example the author and subject of an email. Syntax is %:fromname, %:fromaddress, %:subject etc, details in the manual. Proposed by Peder O. Klingenberg. • I have been considering to move, at some stage, the template functionality into remember.el itself - which would of course require consent of the remember.el maintainers. I am not sure how well this would work though, since some things like the interactive time stamps are org.el specific, so treating them would require special hooks. Comments? • Bug fixes ## Version 4.62 • Many changes to the spreadsheet functions in the table editor. For details, please re-read the manual section 3.4. • New Features • It is much easier to assign formulas to individual fields. • References to arbitrary fields and ranges. • Absolute references are modified in row-editing commands. • Formula editor that highlights referenced fields. • Incompatible changes • Empty fields are excluded in range references, see "E" mode flag. • &… ranges no longer supported, use new @… ranges. • Variable insertion into Lisp formulas work differently. • Selected text becomes the default description for C-c C-l links.(Scott) • The date format in the agenda/timeline views is now customizable. See the new option org-agenda-date-format'. (request by Victor) • Link abbreviations no longer need a double colon, single colon is fine. • Bug fixes. ## Version 4.61 • Avoiding keybinding clashes with flyspell • Archiving is now also on C-C C-x C-s' (was just C-c ') • Cycling through agenda files is now also on "C-'" (was just "C-,") • Colon is considered part of number, to align times in clock tables. • Fixed bug for list of stuck projects. • Fixed several bugs/problems concerning linking to gnus. • Block agendas can contain the list of stuck projects. • #+ARCHIVE may now appear several times in the buffer. • More bug fixes. ## Version 4.60 • HTML export: inlining images, clickable images (manual 10.2.4). • Incremental search now shows proper context when exiting. • Tables calculation and Calc package. • Calc is no longer needed when using only elisp formulas. • Proper error messages when calc is needed and not available. • Tracking TODO state changes with time stamps and notes. • Empty entries go full circle. • Links in iCalendar export cleaned up. • Bug fixes. ## Version 4.59 • Cleanup code, bug fixes. ## Version 4.58 • Full undo support in the agenda buffer. • Listing stuck GTD projects (projects without any NEXT ACTIONS). Configure org-stuck-projects' before using it. • C-c C-x b shows the current subtree in an indirect buffer, in another, dedicated frame. • Custom agenda commands take precedence over builtin commands. • auto-fill for comments works on the Emacs side, XEmacs not yet. ## Version 4.57 • Sorting of outline items on same level. • Sorting tables automatically selects line range between hlines. • Changes in Agenda buffer • C-c C-o' follows a link in the current line. • C-c' archives the subtree corresponding to the line. • Changing dates with S-left and S-right show new date in agenda, but still do not move the entry to the new date. • new option org-agenda-skip-scheduled-if-done'. • Agenda and sparse tree construction using tag matches can now use regular expressions. • When prompted for a date/time, entering "+7" indicates a date 7 days from now - but only this is the only thing you give. • Custom time formats also apply to exported html and ascii. • Bug fixes. ## Version 4.56 • C-k' in agenda kills current line and corresponding subtree in file. • XEmacs compatibility issues fixed, in particular tag alignment. • M-left/right now in/outdents plain list items, no Shift needed. • Bug fixes. • Bug fixes. ## Version 4.54 • Improvements to fast tag selection • show status also in target line. • option to auto-exit after first change to tags list (see manual). • Tags sparse trees now also respect the settings in org-show-hierarchy-above' and org-show-following-heading'. • Bug fixes. ## Version 4.53 • Custom time formats can be overlayed over time stamps. • New option org-agenda-todo-ignore-deadlines'. • Work-around for flyspell bug (CVS Emacs has this fixed in flyspell.el). • Work-around for session.el problem with circular data structures. • Bug fixes. ## Version 4.52 • TAG matches can also specify conditions on TODO keywords. • The fast tag interface allows setting tags that are not in the predefined list. • Bug fixes. ## Version 4.51 • Link abbreviations (manual section 4.5). • More control over how agenda is displayed. See the new variables org-agenda-window-setup', org-agenda-restore-windows-after-quit'. • Bug fixes. ## Version 4.50 • Closing a TODO item can record an additional note. See variables org-log-done' and org-log-note-headings'. • Inserting headlines and bullets can leave an extra blank line. See variable org-blank-before-new-entry'. (Ed Hirgelt patch) • bracket links in the agenda are active just as in org-mode buffers. • C-c C-o on a date range displays the agenda for exactly this range. • The default for org-cycle-include-plain-lists' is back to nil. • Calls to org-occur' can be stacked by using a prefix argument. • The options org-show-hierarchy-above' and org-show-following-heading' now always default to t', but can be customized differently for different types of sparse trees or jump commands. • Bug fixes. ## Version 4.49 • Agenda views can be made in batch mode from the command line. • org-store-link' does the right thing in dired-mode. • File links can contain environment variables. • Full Emacs 21 compatibility has been restored. • Bug fixes. ## Version 4.47 • Custom commands may produce an agenda which contains several blocks, each block created by a different agenda command. • Agenda commands can be restricted to the current file, region, subtree. • The timeline command must now be called through the agenda dispatcher (C-c a L). C-c C-r' no longer works. • Agenda items can be sorted by tag. The last tag is used for this. • The prefix and the sorting strategy for agenda items can depend upon the agenda type. • The handling of mailto:' links can be customized, see the new variable org-link-mailto-program'. • mailto' links can specify a subject after a double colon, like mailto:[email protected]::Org mode is buggy. • In the #+STARTUP line, M-TAB completes valid keywords. • In the #+TAGS: line, M-TAB after ":" inserts all currently used tags. • Again full Emacs 21 support: Checkboxes and publishing are fixed. • More minor bug fixes. ## Version 4.45 • Checkbox lists can show statistics about checked items. • C-TAB will cycle the visibility of archived subtrees. • Documentation about checkboxes has been moved to chapter 5. • Bux fixes. ## Version 4.44 • Clock table can be done for a limited time interval. • Obsolete support for the old outline mode has been removed. • Bug fixes and code cleaning. ## Version 4.43 • Bug fixes • s' key in the agenda saves all org-mode buffers. ## Version 4.41 • Shift-curser keys can modify inactive time stamps (inactive time stamps are the ones in […] brackets. • Toggle all checkboxes in a region/below a headline. • Bug fixes. • Bug fixes. ## Version 4.39 • Special tag ARCHIVE keeps a subtree closed and away from agenda lists. • LaTeX code in Org mode files can be converted to images for HTML. • Bug fixes. • CDLaTeX-mode features can be used in Org mode to help inserting LaTeX environment and math. ## Version 4.38 • noutline.el is now required (important for XEmacs users only). • Dynamic blocks. • Archiving of all level 1 trees without open TODO items. • Clock reports can be inserted into the file in a special section. • FAQ removed from the manual, now only on the web. • Bug fixes. ## Version 4.37 • Clock-feature for measuring time spent on specific items. • Improved emphasizing allows configuration and stacking. ## Version 4.36 • Improved indentation of ASCII export, when headlines become items. • Handling of 12am and 12pm fixed. Times beyond 24:00 can be used and will not lead to conflicts. • Support for mutually exclusive TAGS with the fast tags interface. • Bug fixes. ## Version 4.35 • HTML export is now valid XHTML. • Timeline can also show dates without entries. See new option org-timeline-show-empty-dates'. • The bullets created by the ASCII exporter can now be configured. See the new option org-export-ascii-bullets'. • New face org-upcoming-deadline' (was org-scheduled-previously'). • New function org-context' to allow testing for local context. • Bug fixes. ## Version 4.33 • New commands to move through plain lists: S-up and S-down. • Bug fixes and documentation update. ## Version 4.32 • Fast (single-key-per-tag) interface for setting TAGS. • The list of legal tags can be configured globally and locally. • Elisp and Info links (thanks to Todd Neal). • org-export-publishing-directory' can be an alist, with different directories for different export types. • All context-sensitive commands use call-interactively' to dispatch. • org-confirm-shell-links' renamed to org-confirm-shell-link-function'. • Bug fixes. • Bug fixes. ## Version 4.30 • Modified installation: Autoloads have been collected in org-install.el. • Logging (org-log-done) is now a #+STARTUP option. • Checkboxes in plain list items, following up on Frank Ruell's idea. • File links inserted with C-c C-l will use relative paths if the linked file is in the current directory or a subdirectory of it. • New variable org-link-file-path-type' to specify preference for relative and absolute paths. • New CSS classes for tags, timestamps, timestamp keywords. • Bug and typo fixes. ## Version 4.29 • Inlining images in HTML export now depends on wheather the link contains a description or not. • TODO items can be scheduled from the global TODO list using C-c C-s. • TODO items already scheduled can be made to disappear from the global todo list, see org-agenda-todo-ignore-scheduled'. • In Tables, formulas may also be Lisp forms. • Exporting the visible part of an outline with C-c C-x v' works now for all available exporters. • Bug fixes, lots of them :-( • Bug fixes. ## Version 4.27 • HTML exporter generalized to receive external options. As part of the process, author, email and date have been moved to the end of the HTML file. • Support for customizable file search in file links. • BibTeX database links as first application of the above. • New option org-agenda-todo-list-sublevels' to turn off listing TODO entries that are sublevels of another TODO entry. • Bug fixes. ## Version 4.25 • Revision of the font-lock faces section, with better tty support. • TODO keywords in Agenda buffer are fontified. • Export converts links between .org files to links between .html files. • Better support for bold/italic/underline emphasis. • Bug fixes. • Bug fixes. ## Version 4.22 • Bug fixes. • In agenda buffer, mouse-1 no longer follows link. See org-agenda-mouse-1-follows-link' and org-mouse-1-follows-link'. ## Version 4.20 • Links use now the description format by default. When inserting links, the user is prompted for a description. • If a link has a description, only the description is displayed the link part is hidden. Use C-c C-l to edit the link part. • TAGS are now bold, but in the same color as the headline. • The width of a table column can be limited by using a field "<N>". • New structure for the customization tree. • Bug fixes. ## Version 4.13 • The list of agenda files can be maintainted in an external file. • Bug fixes. ## Version 4.12 • Templates for remember buffer. Note that the remember setup changes. To set up templates, see org-remember-templates'. • The time in new time stamps can be rounded, see new option org-time-stamp-rounding-minutes'. • Bug fixes (there are always more bugs). […] Version 4.00 • Headlines can contain TAGS, and Org mode can produced a list of matching headlines based on a TAG search expression. • org-agenda' has now become a dispatcher that will produce the agenda and other views on org-mode data with an additional keypress. ## Version 3.24 • Switching and item to DONE records a time stamp when the variable org-log-done' is turned on. Default is off. ## Version 3.23 • M-RET makes new items as well as new headings. • Various small bug fixes ## Version 3.22 • CamelCase words link to other locations in the same file. • Plain list items can be folded with org-cycle'. See new option org-cycle-include-plain-lists'. • Sparse trees for specific TODO keywords through numeric prefix argument to C-c C-v'. • Global TODO list, also for specific keywords. • Matches in sparse trees are highlighted (highlights disappear with next buffer change due to editing). ## Version 3.21 • Improved CSS support for the HTML export. Thanks to Christian Egli. • Editing support for hand-formatted lists • M-S-cursor keys handle plain list items • C-c C-c renumbers ordered plain lists ## Version 3.20 • There is finally an option to make TAB jump over horizontal lines in tables instead of creating a new line before that line. The option is org-table-tab-jumps-over-hlines', default nil. • New command for sorting tables, on C-c ^'. • Changes to the HTML exporter • hand-formatted lists are exported correctly, similar to markdown lists. Nested lists are possible. See the docstring of the variable org-export-plain-list-max-depth'. • cleaned up to produce valid HTML 4.0 (transitional). • support for cascading style sheets. • New command to cycle through all agenda files, on C-, • C-c [ can now also be used to change the sequence of agenda files. • Bug fixes ## Version 3.18 • Export of calendar information in the standard iCalendar format. • Some bug fixes. ## Version 3.17 • HTML export specifies character set depending on coding-system. ## Version 3.16 • In tables, directly after the field motion commands like TAB and RET, typing a character will blank the field. Can be turned off with variable org-table-auto-blank-field'. • Inactive timestamps with C-c !'. These do not trigger the agenda and are not linked to the calendar. • Additional key bindings to allow Org mode to function on a tty emacs. • C-c C-h' prefix key replaced by C-c C-x', and C-c C-x C-h' replaced by C-c C-x b' (b=Browser). This was necessary to recover the standard meaning of C-h after a prefix key (show prefix bindings). ## Version 3.15 • QUOTE keyword at the beginning of an entry causes fixed-width export of unmodified entry text. C-c :' toggles this keyword. • New face org-special-keyword' which is used for COMMENT, QUOTE, DEADLINE and SCHEDULED, and priority cookies. Default is only a weak color, to reduce the amount of aggressive color in the buffer. ## Version 3.14 • Formulas for individual fields in table. • Automatic recalculation in calculating tables. • Named fields and columns in tables. • Fixed bug with calling org-archive' several times in a row. ## Version 3.13 • Efficiency improvements: Fewer table re-alignments needed. • New special lines in tables, for defining names for individual cells. ## Version 3.12 • Tables can store formulas (one per column) and compute fields. Not quite like a full spreadsheet, but very powerful. • table.el keybinding is now C-c ~'. • Numeric argument to org-cycle does show-subtree' above on level ARG. • Small changes to keys in agenda buffer. Affected keys: [w] weekly view; [d] daily view; [D] toggle diary inclusion. • Bug fixes. ## Version 3.11 • Links inserted with C-c C-l are now by default enclosed in angle brackets. See the new variable org-link-format'. • ">" terminates a link, this is a way to have several links in a line. Both "<" and ">" are no longer allowed as characters in a link. • Compatibility with CUA-mode (see variable org-CUA-compatible'). • Compatibility problems with viper-mode fixed. • Improved html export of tables. • Various clean-up changes. ## Version 3.10 • Using define-derived-mode' to derive org-mode' from outline-mode'. ## Version 3.09 • Time-of-day specifications in agenda are extracted and placed into the prefix. Timed entries can be placed into a time grid for day. ## Version 3.08 • "|" no longer allowed as part of a link, to allow links in tables. • The prefix of items in the agenda buffer can be configured. • Cleanup. ## Version 3.07 • Some folding inconsistencies removed. • BBDB links to company-only entries. • Bug fixes and global cleanup. ## Version 3.06 • M-S-RET inserts a new TODO heading. • New startup option content'. • Better visual response when TODO items in agenda change status. • Window positioning after visibility state changes optimized and made configurable. See org-cycle-hook' and org-occur-hook'. ## Version 3.05 • Agenda entries from the diary are linked to the diary file, so adding and editing diary entries can be done directly from the agenda. • Many calendar/diary commands available directly from agenda. • Field copying in tables with S-RET does increment. • C-c C-x C-v extracts the visible part of the buffer for printing. • Moving subtrees up and down preserves the whitespace at the tree end. ## Version 3.04 • Table editor optimized to need fewer realignments, and to keep table shape when typing in fields. • A new minor mode, orgtbl-mode, introduces the Org mode table editor into arbitrary major modes. • Fixed bug with realignment in XEmacs. • Startup options can be set with special #+STARTUP line. • Heading following a match in org-occur can be suppressed. ## Version 3.03 • Copyright transfer to the FSF. • Effect of C-u and C-u C-u in org-timeline swapped. • Timeline now always contains today, and .' jumps to it. • Table editor: • cut and paste of rectangular regions in tables • command to convert org-mode table to table.el table and back • command to treat several cells like a paragraph and fill it • command to convert a buffer region to a table • import/export tables as tab-separated files (exchange with Excel) • Agenda: • Sorting mechanism for agenda items rewritten from scratch. • Sorting fully configurable. • Entries specifying a time are sorted together. • Completion also covers option keywords after #-'. • Bug fixes. ## Version 3.01 • New reference card, thanks to Philip Rooke for creating it. • Single file agenda renamed to "Timeline". It no longer shows warnings about upcoming deadlines/overdue scheduled items. That functionality is now limited to the (multifile) agenda. • When reading a date, the calendar can be manipulated with keys. • Link support for RMAIL and Wanderlust (from planner.el, untested). • Minor bug fixes and documentation improvements. ## Version 3.00 • Multifile Agenda shows current entries from many different files. • TeXInfo documentation (thanks to Christian Egli for the conversion). • Additional applications for TODO keywords, see documentation. Different files may have different TODO keywords etc. • Priorities for TODO items. • The browser mode used by org-remember-handler' is improved. • Images get inlined in HTML export (thanks to Carsten Wimmer). • File links can contain line numbers, like file:///usr/etc/config:255 • Minor bug fixes. ## Version 2.10 • TODO entries can have additional states besides TODO and DONE. See new variable org-todo-keywords'. • TODO keywords can be interpreted as categories. See variable org-todo-interpretation'. • M-TAB completion on TODO keywords, TeX symbols, and normal words. • All keywords (like TODO, DEADLINE etc) are configurable. • Cursor positioning optimized after pro/demotion and TODO cycling. • Emphasizing in HTML works now for bold, italic and underline. • New commands to kill, copy and yank entire subtrees. Yanking modifies the level of the tree before insertion. • New command org-goto' (C-c C-j) to quickly move to other locations in the buffer without affecting outline visibility. • Hooks for John Wiegley's remember.el. • org-read-date' pops up calendar for date selection with the mouse. See variable org-popup-calendar-for-date-prompt'. ## Version 2.6 • TODO items can be SCHEDULED to a certain date. • Expired DEADLINEs are ignored if in an entry marked DONE. • From the diary or time-sorted view (C-c C-r), C-c C-t can be used to change the TODO state of an item remotely. • Horizontal computations in table editor. See org-table-eval-formula'. • Fixed bug with summing tables (command org-table-sum', C-c +'). • Calendar window follows the timestamp when a timestamp is changed. New variable org-calendar-follow-timestamp-change'. • Time-sorted view (org-diary-view', C-c C-r) now uses the prefix argument to force inclusion of unscheduled TODO items. • New variable org-confirm-shell-links' to turn of safety query. • New variable org-open-non-existing-files'. ## Version 2.4 • A time-sorted view on all time stamps can be created with C-c C-r. • Timestamps and Deadlines can be shown in the Emacs diary. • Date ranges introduced. • Time-string formats are no longer configurable. • Vertical lines in tables can be made invisible with C-c |'. • New "link" type to execute shell commands, like "ls *.org" • Upon export, "myfile.org" becomes "myfile.html" or "myfile.txt", instead of "myfile.org.html" or "myfile.org.txt". • When the cursor is in the white space at the beginning of a line, TAB removes the whitespace before indenting again. ## Version 2.0 • Windows (NT/2000) support. • Works with both Emacs and XEmacs. • Fully automatic table editor. • New link types into Gnus, VM and BBDB. • Time stamps are treated as links to the calendar. • Insertion of links with C-c C-l' works differently now. • Space characters allowed as part of a link. • Options in org-file-apps' extended. The command may now be symbol 'emacs', or a lisp form. • Timestamp changes • org-deadline' now prompts for a date. • A line can now contain several timestamps. Updating of a timestamp only happens if the cursor is at the timestamp. • Changed the time-stamp-format to ISO, to make sure it will always work (non-English month names had caused problems with parse-time-string'.). Changing the time stamp format is not recommended. • Picture mode enhancements have been removed from org.el ## Version 1.4 • Some option name changes, not backward compatible. • HTML exporter upgrade: fixed-width regions, better sub/superscripts, many TeX symbols supported. • Calendar support. 1
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.37249138951301575, "perplexity": 4245.703753391082}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-49/segments/1416400379462.60/warc/CC-MAIN-20141119123259-00132-ip-10-235-23-156.ec2.internal.warc.gz"}
https://math.stackexchange.com/questions/1569900/determine-whether-a-matrix-is-othrogonal
# Determine whether a matrix is othrogonal I need to determine if the following matrix is orthogonal $A = \frac{1}{\sqrt{2}} \begin{pmatrix} 1 & 1 \\ 0 & 0 \\ 1 & -1 \end{pmatrix}$ Here is what I did: $u \cdot v = (\frac{1}{\sqrt{2}} \cdot \frac{1}{\sqrt{2}}) + (\frac{1}{\sqrt{2}} \cdot -\frac{1}{\sqrt{2}}) = 0$ $||u|| = {\sqrt{(\frac{1}{\sqrt{2}})^2 + 0 + (\frac{1}{\sqrt{2}}}})^2 = 1$ $||v|| = {\sqrt{(\frac{1}{\sqrt{2}})^2 + 0 + (-\frac{1}{\sqrt{2}}}})^2 = 1$ This should indicate that the matrix is orthogonal, however, the answer in the book says it is not orthogonal and I can't see where I went wrong • Shouldn't orthogonal matices be $n\times n$? – user228113 Dec 10 '15 at 21:14 • I suppose that is where my mistake is. I guess I got a bit too caught up and overlooked something as simple as that – user273323 Dec 10 '15 at 21:16 • Orthogonal matrices are square matrices that have the property: $A^T = A^{-1}$. – Nathan Marianovsky Dec 10 '15 at 21:16 Your matrix satisfies $A^TA=I_2$, that is the columns are orthonormal. Such matrices are useful when you want to define the polar decomposition of a $m\times n$ matrix $M$ with $m\geq n$ and $rank(M)=n$. The decomposition is $M=US$ where $U$ is a $m\times n$ matrix with orthonormal columns and $S$ is a $n\times n$ SDP matrix. More precisely $S=\sqrt{M^TM}$ and $U=MS^{-1}$.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9702929258346558, "perplexity": 161.53241151967183}, "config": {"markdown_headings": true, "markdown_code": false, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-25/segments/1623487608856.6/warc/CC-MAIN-20210613131257-20210613161257-00428.warc.gz"}
https://www.physicsforums.com/threads/elastic-collision-symbolic-question.222342/
# Elastic Collision - Symbolic Question 1. Mar 16, 2008 ### kanavulator 1. The problem statement, all variables and given/known data Suppose you hold a small ball of mass m1 in contact with, and directly over, the center of mass of a large ball of mass m2. If you then drop the small ball a short time after dropping the large ball, the small ball rebounds with surprising speed. If we ignore air resistance and assume the large ball makes an elastic collision with the floor and then makes an elastic collision with the still descending small ball and that large ball has much larger mass than the small ball then: a) If the velocity of the small ball immediately before the collision is v, what is the velocity of the large ball? (in terms of v) b) What is the velocity of the small ball immediately after its collision with the large ball? (in terms of v) c) What is the ratio of the small ball's rebound distance to the distance it fell before the collision? (a number) 2. Relevant equations 1/2mv^2 + mgh = 1/2mv^2 + mgh Elastic collision: V1 = -V2 3. The attempt at a solution a. -V b. ? c. ? 2. Mar 16, 2008 ### Staff: Mentor Hint: Analyze the problem in the center of mass frame, then transform back to the lab frame. (Assume m2 >> m1.) 3. Mar 16, 2008 ### kanavulator That...doesn't make a bit of sense to me. Pardon my lack of knowledge, but I'm not really familiar with the terms you were using, Doc Al. 4. Mar 16, 2008 ### Staff: Mentor No problem. Sometimes problems are easier to solve in certain frames of reference--but let's forget that for the moment. What do you know about elastic collision? You'll need this for part c. What does this mean? If you mean the relative velocity reverses: Great! Use it. Assuming you meant -v (the same v as the small ball): Good! Here's a hint for part b: If a ping pong ball hits a bowling ball, what happens to the velocity of the bowling ball? (To a good approximation.)
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.900945246219635, "perplexity": 906.814050199842}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-50/segments/1480698541322.19/warc/CC-MAIN-20161202170901-00224-ip-10-31-129-80.ec2.internal.warc.gz"}
http://mathhelpforum.com/calculus/141115-parametric-equations.html
1. ## Parametric equations. The following problem problem I need help with:- The parametric curve de ned by the equations x (t) = cos(t); y(t) = sin(3t); 0 <=t <= 2: a) Find a formula which represents the slope of the tangent line to the curve at the point ( x(t); y(t)): b) Find the points (values of t and corresponding (x; y) coordinates) where the curve has a horizontal tangent line, and nd the points where the curve has a vertical tangent line. c) Use the information you found in part b) to draw a sketch of the curve. d) Set-up, but do not compute, an integral that represents the length of this parametric curve. 2. [QUOTE=Sally_Math;500410] The following problem problem I need help with:- The parametric curve de ned by the equations x (t) = cos(t); y(t) = sin(3t); 0 <=t <= 2: a) Find a formula which represents the slope of the tangent line to the curve at the point ( x(t); y(t)): The "slope of the tangent line" is $\frac{dy}{dx}= \frac{\frac{dy}{dt}}{\frac{dx}{dt}}$ b) Find the points (values of t and corresponding (x; y) coordinates) where the curve has a horizontal tangent line, and nd the points where the curve has a vertical tangent line. The tangent line will be horizontal when $\frac{dy}{dt}= 0$ and vertical when $\frac{dx}{dt}= 0$. c) Use the information you found in part b) to draw a sketch of the curve. d) Set-up, but do not compute, an integral that represents the length of this parametric curve. $\int \sqrt{(\frac{dx}{dt})^2+ \frac{dy}{dt})^2} dt$
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 4, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9107535481452942, "perplexity": 848.8243251333419}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-26/segments/1498128319688.9/warc/CC-MAIN-20170622181155-20170622201155-00233.warc.gz"}
https://math.stackexchange.com/questions/570954/simple-undergraduate-series-quesiton
consider $\displaystyle \sum_{n=1}^\infty (-1)^{n-1}a_n$ where $(a_n)$ is a monotone decreasing sequence of nonnegative numbers with $a_n \rightarrow 0$ by the alternating series test, series of this form always converge. Show that $0 \leq \displaystyle\sum_{n=1}^\infty (-1)^{n-1}a_n \leq a_1$ There is a hint in the question: if $(S_N)$ denotes the sequence of partial sums, consider the subsequences $(S_{2N})$ and $(S_{2N-1})$ can you show that one is decreasing while the other is increasing? I have proven the hint - however I am unable to proceed from here - Could someone please direct me to the correct direction? We will show that $0\leq S_{n}\leq a_{1}$ for all $n\in\mathbb{N}$ (and hence $0\leq\lim S_{n}\leq a_{1}$). We know that $0\leq a_{1}-a_{2}=S_{2}\leq S_{1}= a_{1}$ and you already showed that $S_{2n}$ is increasing while $S_{2n-1}$ is decreasing. Now let $k\in\mathbb{N}$ and notice that $S_{2k}\leq S_{2k-1}$. Because $S_{2n}$ is increasing and $S_{2n-1}$ is decreasing we now know that $0\leq S_{2}\leq S_{2k}\leq S_{2k-1}\leq S_{1}=a_{1}$. To finish note that, for any $n\in\mathbb{N}$, $S_{n}=S_{2k}$ or $S_{n}=S_{2k-1}$ for some $k\in\mathbb{N}$. $$S_{n}=a_1\underbrace{-a_2+a_3}_{\leq 0}\underbrace{-a_4+a_5}_{\leq 0}\cdot\cdot \cdot\underbrace{-a_{n-1}+a_{n}}_{\leq 0}\leq a_1$$. On the other hand if n is even than $$S_{n}=a_1\underbrace{-a_2+a_3}_{\leq 0}\underbrace{-a_4+a_5}_{\leq 0}\cdot\cdot \cdot\underbrace{-a_{n-2}+a_{n-1}}_{\leq 0}\underbrace{-a_{n}}_{\leq 0}\leq a_1$$ So in any case $S_n\leq a_1$
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9956572651863098, "perplexity": 56.5382320681935}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-10/segments/1614178358203.43/warc/CC-MAIN-20210227054852-20210227084852-00425.warc.gz"}
https://docs.scipy.org/doc/scipy-1.3.0/reference/generated/scipy.signal.lsim2.html
# scipy.signal.lsim2¶ scipy.signal.lsim2(system, U=None, T=None, X0=None, **kwargs)[source] Simulate output of a continuous-time linear system, by using the ODE solver scipy.integrate.odeint. Parameters systeman instance of the lti class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: • 1: (instance of lti) • 2: (num, den) • 3: (zeros, poles, gain) • 4: (A, B, C, D) Uarray_like (1D or 2D), optional An input array describing the input at each time T. Linear interpolation is used between given times. If there are multiple inputs, then each column of the rank-2 array represents an input. If U is not given, the input is assumed to be zero. Tarray_like (1D or 2D), optional The time steps at which the input is defined and at which the output is desired. The default is 101 evenly spaced points on the interval [0,10.0]. X0array_like (1D), optional The initial condition of the state vector. If X0 is not given, the initial conditions are assumed to be 0. kwargsdict Additional keyword arguments are passed on to the function odeint. See the notes below for more details. Returns T1D ndarray The time values for the output. youtndarray The response of the system. xoutndarray The time-evolution of the state-vector. Notes This function uses scipy.integrate.odeint to solve the system’s differential equations. Additional keyword arguments given to lsim2 are passed on to odeint. See the documentation for scipy.integrate.odeint for the full list of arguments. If (num, den) is passed in for system, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. s^2 + 3s + 5 would be represented as [1, 3, 5]). #### Previous topic scipy.signal.lsim #### Next topic scipy.signal.impulse
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.44608834385871887, "perplexity": 1846.3475213834076}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-34/segments/1596439735958.84/warc/CC-MAIN-20200805124104-20200805154104-00469.warc.gz"}
https://arxiv.org/abs/1311.0214
nucl-ex (what is this?) # Title:Centrality, rapidity and transverse momentum dependence of $J/ψ$ suppression in Pb-Pb collisions at $\sqrt{s_{\rm NN}}$=2.76 TeV Abstract: The inclusive $J/ψ$ nuclear modification factor $R_{\rm AA}$ in Pb-Pb collisions at $\sqrt{s_{\rm NN}}$=2.76 TeV has been measured by ALICE as a function of centrality in the e$^+$e$^-$ decay channel at mid-rapidity $|y|<0.8$ and as a function of centrality, transverse momentum and rapidity in the $μ^{+}μ^{-}$ decay channel at forward-rapidity $2.5<y<4$.The $J/ψ$ yields measured in Pb-Pb are suppressed compared to those in pp collisions scaled by the number of binary collisions. The $R_{\rm AA}$ integrated over a centrality range corresponding to 90% of the inelastic Pb-Pb cross section is $0.72\pm0.06$ (stat.) $\pm0.10$ (syst.) at mid-rapidity and $0.57 \pm 0.01$ (stat.) $\pm0.09$ (syst.) at forward-rapidity. At low transverse momentum, significantly larger values of $R_{\rm AA}$ are measured at forward-rapidity compared to measurements at lower energy. These features suggest that a contribution to the $J/ψ$ yield originates from charm quarks (re)combination in the deconfined partonic medium. Comments: 24 pages, 5 captioned figures, 3 tables, authors from page 19, published version, figures at this http URL Subjects: Nuclear Experiment (nucl-ex); High Energy Physics - Experiment (hep-ex) Journal reference: Phys. Lett. B 734 (2014) 314-327 DOI: 10.1016/j.physletb.2014.05.064 Report number: CERN-PH-EP-2013-203 Cite as: arXiv:1311.0214 [nucl-ex] (or arXiv:1311.0214v4 [nucl-ex] for this version) ## Submission history From: Alice Publications [view email] [via ALICE proxy] [v1] Fri, 1 Nov 2013 16:03:31 UTC (367 KB) [v2] Thu, 15 May 2014 13:59:22 UTC (319 KB) [v3] Thu, 2 Oct 2014 18:52:10 UTC (319 KB) [v4] Tue, 11 Apr 2017 18:40:54 UTC (319 KB)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9756028652191162, "perplexity": 5877.232654805069}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-04/segments/1547583680452.20/warc/CC-MAIN-20190119180834-20190119202834-00415.warc.gz"}
http://csd.newcastle.edu.au/chapters/chapter17.html
You are here : Control System Design - Index | Book Contents | Chapter 17 ## 17. Linear State Space Models ### Preview We have seen that there are many alternative model formats that can be used for linear dynamic systems. In simple SISO problems, any representation is probably as good as any other. However, as we move to more complex problems (especially multivariable problems) it is desirable to use special model formats. One of the most flexible and useful structures is the state space model. As we saw in Chapter 3, this model takes the form of a coupled set of first order differential (or difference) equations. This model format is particularly useful with regard to numerical computations. State space models were briefly introduced in Chapter 3. Here we will examine linear state space models in a little more depth for the SISO case. Note, however, that many of the ideas will directly carry over to the multivariable case presented later. In particular, we will study • similarity transformations and equivalent state representations • state space model properties • controllability, reachability and stabililizability • observability, reconstructability and detectability • special (canonical) model formats The key tools used in studying linear state space methods are linear algebra and vector space methods. The reader is thus encouraged to briefly review these concepts as a prelude to reading this chapter. ### Summary • State variables are system internal variables, upon which a full model for the system behavior can be built. The state variables can be ordered in a state vector. • Given a linear system, the choice of state variables is not unique. However, • the minimal dimension of the state vector is a system invariant, • there exists a nonsingular matrix which defines a similarity transformation between any two state vectors, and • any designed system output can be expressed as a linear combination of the states variables and the inputs. • For linear, time invariant systems the state space model is expressed in the following equations: • Stability and natural response characteristics of the system can be studied from the eigenvalues of the matrix ( , ). • State space models facilitate the study of certain system properties which are paramount in the solution control design problem. These properties relate to the following questions • By proper choice of the input u, can we steer the system state to a desired state (point value)? controllability • If some states are or uncontrollable, will these states generate a time decaying component? (stabilizability) • If one knows the input, u(t) for , can we infer the state at time t=t0 by measuring the system output, y(t) for ? (observability) • If some of the states are unobservable, do these states generate a time decaying signal? (detectability) • Controllability tells us about the feasibility to control a plant. • Observability tells us about whether it is possible to know what is happening in a given system by observing its outputs. • The above system properties are system invariants. However, changes in the number of inputs, in their injection points, in the number of measurements and in the choice of variables to be measured may yield different properties. • A transfer function can always be derived from a state space model. • A state space model can be built from a transfer function model. However, only the completely controllable and observable part of the system is described in that state space model. Thus the transfer function model might be only a partial description of the system. • The properties of individual systems do not necessarily translate unmodified to composed systems. In particular, given two systems completely reachable, observable, controllable and reconstructible, their cascade connection: • is not completely observable if a pole of the first system coincides with a zero of the second system (pole-zero cancellation), • is not detectable if the pole-zero cancellation affects an unstable pole, • is not completely controllable if a zero of the first system coincides with a pole of the second system (zero-pole cancellation), and • is not stabilizable if the zero-pole cancellation affects a NMP zero • this chapter provides a foundation for the design criteria which states that one should never attempt to cancel unstable poles and zeros. Previous - Chapter 16 Up - Book Contents Next - Chapter 18
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8766963481903076, "perplexity": 596.6161507918613}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-13/segments/1521257644271.19/warc/CC-MAIN-20180317035630-20180317055630-00781.warc.gz"}
http://www.formuladirectory.com/user/formula/285
HOSTING A TOTAL OF 318 FORMULAS WITH CALCULATORS Length Of The Hypotenuse Of Right Triangle In mathematics, the Pythagorean theorem — or Pythagoras' theorem — is a relation in Euclidean geometry among the three sides of a right triangle (right-angled triangle). In any right-angled triangle, the area of the square whose side is the hypotenuse (the side opposite the right angle) is equal to the sum of the areas of the squares whose sides are the two legs (the two sides that meet at a right angle). $\sqrt{{a}^{2}+{b}^{2}}$ Here,a=base,b=perpendicular and c=Hypotenuse. ENTER THE VARIABLES TO BE USED IN THE FORMULA SOLVE FORMULA
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 1, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9647494554519653, "perplexity": 290.0779505337279}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-51/segments/1544376824119.26/warc/CC-MAIN-20181212203335-20181212224835-00495.warc.gz"}
http://www.researchgate.net/publication/252304359_Baxter%27sT-Q_equation_SU%28_N%29SU%282%29_N_-_3_correspondence_and_Omega-deformed_Seiberg-Witten_prepotential
Article # Baxter'sT-Q equation, SU( N)/SU(2) N - 3 correspondence and Omega-deformed Seiberg-Witten prepotential Journal of High Energy Physics (Impact Factor: 5.62). 01/2011; 9. DOI:10.1007/JHEP09(2011)125 ABSTRACT We study Baxter's T-Q equation of XXX spin-chain models under the semiclassical limit where an intriguing SU( N)/SU(2) N-3 correspondence is found. That is, two kinds of 4D {N} = 2 superconformal field theories having the above different gauge groups are encoded simultaneously in one Baxter's T-Q equation which captures their spectral curves. For example, while one is SU( N c ) with N f = 2 N c flavors the other turns out to be {{SU}}{(2)^{{N_c} - 3}} with N c hyper-multiplets ( N c > 3). It is seen that the corresponding Seiberg-Witten differential supports our proposal. 0 0 · 0 Bookmarks · 8 Views • Source ##### Article: On non-stationary Lam\'e equation from WZW model and spin-1/2 XYZ chain [hide abstract] ABSTRACT: We study the link between WZW model and the spin-1/2 XYZ chain. This is achieved by comparing the second-order differential equations from them. In the former case, the equation is the Ward-Takahashi identity satisfied by one-point toric conformal blocks. In the latter case, it arises from Baxter's TQ relation. We find that the dimension of the representation space w.r.t. the V-valued primary field in these conformal blocks gets mapped to the total number of chain sites. By doing so, Stroganov's "The Importance of being Odd" (cond-mat/0012035) can be consistently understood in terms of WZW model language. We first confirm this correspondence by taking a trigonometric limit of the XYZ chain. That eigenstates of the resultant two-body Sutherland model from Baxter's TQ relation can be obtained by deforming toric conformal blocks supports our proposal. Journal of High Energy Physics 02/2012; 2012(6). · 5.62 Impact Factor • Source ##### Article: Liouville theory, \mathcal{N} = 2 gauge theories and accessory parameters [hide abstract] ABSTRACT: The correspondence between the semiclassical limit of the DOZZ quantum Liouville theory and the Nekrasov-Shatashvili limit of the $\mathcal{N} = 2$ (Ω-deformed) U(2) super-Yang-Mills theories is used to calculate the unknown accessory parameter of the Fuchsian uniformization of the 4-punctured sphere. The computation is based on the saddle point method. This allows to find an analytic expression for the N f = 4, U(2) instanton twisted superpotential and, in turn, to sum up the 4-point classical block. It is well known that the critical value of the Liouville action functional is the generating function of the accessory parameters. This statement and the factorization property of the 4-point action allow to express the unknown accessory parameter as the derivative of the 4-point classical block with respect to the modular parameter of the 4-punctured sphere. It has been found that this accessory parameter is related to the sum of all rescaled column lengths of the so-called ’critical’ Young diagram extremizing the instanton ’free energy’. It is shown that the sum over the ’critical’ column lengths can be rewritten in terms of a contour integral in which the integrand is built out of certain special functions closely related to the ordinary Gamma function. Journal of High Energy Physics 01/2012; 2012(5). · 5.62 Impact Factor
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.909515380859375, "perplexity": 1003.455439536521}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-15/segments/1397609537186.46/warc/CC-MAIN-20140416005217-00018-ip-10-147-4-33.ec2.internal.warc.gz"}
https://www.physicsforums.com/threads/help-for-the-stochastic-differential-equations.540552/
# Help for the stochastic differential equations 1. Oct 15, 2011 ### ptc_scr Hi, Could some one help me to solve the equations ? dX =sqrt(X) dB where X is a process; B is a Brownian motion with B(0,w) =0;sqrt(X) is squart root of X. 2. Oct 15, 2011 ### chiro Hey ptc_scr and welcome to the forums. In these forums, we require the poster to show any work that they have done before we can help them. We do this so that you can actually learn for yourself what is going on so that you do the work and end up understanding it yourself. So first I ask you to show any working, and secondly what do you know about solving SDE's with Brownian motion? Do you know about Ito's lemma and its assumptions? 3. Oct 16, 2011 ### ptc_scr Hi, I just try to assign Y=sqrt(X) and use Ito lemma to solve the problem. so dY= 1/2 dB+ 1/(4Y) dt. Obviously, we cannot put Y one left side. So the substitution is failed. ANy one can show me how to find a good substitution or show me it is impossible to solve the problem ? But for existence and uniqueness theorm for Ito-diffusion, it seems that the problem can be solve ? because sqrt(X) <= C(1+|X|) for some certain C. Thanks
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9128122925758362, "perplexity": 1180.4061280547812}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-39/segments/1505818689900.91/warc/CC-MAIN-20170924081752-20170924101752-00093.warc.gz"}
https://brilliant.org/problems/volume-of-the-tetrahedron/
# Volume of the tetrahedron Geometry Level pending In a regular hexagonal prism $$ABCDEF-GHIJKL$$, whose volume is $$90 m^3$$, calculates the volume of the tetrahedron $$ADHK$$. ×
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8461540937423706, "perplexity": 1838.345706205665}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-13/segments/1490218189252.15/warc/CC-MAIN-20170322212949-00581-ip-10-233-31-227.ec2.internal.warc.gz"}
http://physics.stackexchange.com/questions/54078/ratio-of-distance-between-mirror-and-person/54079
# Ratio of distance between mirror and person In perspective of a given example, if a man was to stand $2\ m$ away from a mirror which was $0.9\ m$ in height and was able to see his full reflection, what would the height of the mirror have to be if the man was now $6\ m$ away from the mirror and was to maintain a full reflection? Would the mirror be equal to or less than the original height and why? This scenario seems to have caught me out multiple times. So, some reasoning would be much appreciated. - You haven't said what the condition for the height the mirror would have to be is. Perhaps the requirement is that the man can see his full reflection? This is not an uncommon example problem in geometric optics and the answer is slightly surprising but very easy to get using the rules of reflection. – dmckee Feb 15 '13 at 20:47 Ah thank you for pointing out that critical fact! Post was edited to show that requirement – Jared Ping Feb 15 '13 at 20:49
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8817617893218994, "perplexity": 330.6988372620073}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2016-07/segments/1454701166570.91/warc/CC-MAIN-20160205193926-00204-ip-10-236-182-209.ec2.internal.warc.gz"}
http://www.zora.uzh.ch/view/authors_for_linking_in_citation/Arratia=3AR=3A=3A.html
# Browse by Creators Export as AKABERASCII CitationBibTeXCitaviDATACITE_DCDublin CoreDublin CoreEP3 XMLEndNoteEvaluationHTML CitationJSONMETSMultiline CSVObject IDsPubListRDF+N-TriplesRDF+N3RDF+XMLReferReference Manager Number of items: 10. Arratia, R; Barbour, A D; Tavaré, S (2006). A tale of three couplings: Poisson-Dirichlet and GEM approximations for random permutations. Combinatorics, Probability & Computing, 15(1-2):31-62. Arratia, R; Barbour, A D; Tavaré, S (2005). A probabilistic approach to analytic arithmetic on algebraic function fields. Mathematical Proceedings of the Cambridge Philosophical Society, 139(1):1-26. Arratia, R; Barbour, A D; Tavaré, S (2003). Logarithmic combinatorial structures: a probabilistic approach. Zürich: European Mathematical Society (EMS). Arratia, R; Barbour, A D; Tavaré, S (2000). Limits of logarithmic combinatorial structures. The Annals of Probability, 28(4):1620-1644. Arratia, R; Barbour, A D; Tavaré, S (2000). The number of components in a logarithmic combinatorial structure. Annals of Applied Probability, 10(2):331-361. Arratia, R; Barbour, A D; Tavaré, S (1999). On Poisson-Dirichlet limits for random decomposable combinatorial structures. Combinatorics, Probability & Computing, 8(3):193-208. Arratia, R; Barbour, A D; Tavaré, S (1999). The Poisson-Dirichlet distribution and the scale-invariant Poisson process. Combinatorics, Probability & Computing, 8(5):407-416. Arratia, R; Barbour, A D; Tavaré, S (1997). Random combinatorial structures and prime factorizations. Notices of the American Mathematical Society, 44(8):903-910. Arratia, R; Barbour, A D; Tavaré, S (1993). On random polynomials over finite fields. Mathematical Proceedings of the Cambridge Philosophical Society, 114(2):347-368. Arratia, R; Barbour, A D; Tavaré, S (1992). Poisson process approximations for the Ewens sampling formula. Annals of Applied Probability, 2(3):519-535. This list was generated on Wed Mar 21 05:01:34 2018 CET.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9797129034996033, "perplexity": 12624.616638388272}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 5, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-13/segments/1521257647567.36/warc/CC-MAIN-20180321023951-20180321043951-00613.warc.gz"}
https://www.physicsforums.com/threads/linear-algebra-inverse-of-the-sum-of-two-matrices.421434/
# Homework Help: Linear algebra: inverse of the sum of two matrices 1. Aug 10, 2010 ### degs2k4 1. The problem statement, all variables and given/known data Show that $$(I-A)^{-1} = I + A + A^2 + A^3$$ if $$A^4=0$$ 3. The attempt at a solution I found at Google Books some kind of formula for it: However, I think I should develop some kind of series for it using I = A(A^-1), I tried but I haven't been successful... 2. Aug 10, 2010 ### Dick Just multiply (I-A) by I+A+A^2+A^3 and see if you get I. 3. Aug 10, 2010 ### degs2k4 Thanks for your reply, got it solved!
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9424253702163696, "perplexity": 1806.5628791691822}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-26/segments/1529267864019.29/warc/CC-MAIN-20180621020632-20180621040632-00054.warc.gz"}
http://repo.scoap3.org/record/32971
# Testing production scenarios for (anti-)(hyper-)nuclei and exotica at energies available at the CERN Large Hadron Collider Bellini, Francesca (European Organisation for Nuclear Research (CERN), 1211 Geneva, Switzerland) ; Kalweit, Alexander P. (European Organisation for Nuclear Research (CERN), 1211 Geneva, Switzerland) 28 May 2019 Abstract: We present a detailed comparison of coalescence and thermal-statistical models for the production of (anti-) (hyper-)nuclei in high-energy collisions. For the first time, such a study is carried out as a function of the size of the object relative to the size of the particle emitting source. Our study reveals large differences between the two scenarios for the production of objects with extended wave functions. While both models give similar predictions and show similar agreement with experimental data for (anti-)deuterons and (anti-)${}^{3}\mathrm{He}$ nuclei, they largely differ in their description of (anti-)hypertriton production. We propose to address experimentally the comparison of the production models by measuring the coalescence parameter systematically for different (anti-)(hyper-)nuclei in different collision systems and differentially in multiplicity. Such measurements are feasible with the current and upgraded Large Hadron Collider experiments. Our findings highlight the unique potential of ultrarelativistic heavy-ion collisions as a laboratory to clarify the internal structure of exotic QCD objects and can serve as a basis for more refined calculations in the future. Published in: Physical Review C 99 (2019)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 1, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8573830127716064, "perplexity": 1794.9020492557142}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-26/segments/1560627999163.73/warc/CC-MAIN-20190620065141-20190620091141-00079.warc.gz"}
http://math.stackexchange.com/questions/255454/about-sylow-systems
"$G$ is a solvable group if and only if $G$ has a Sylow system" (Sylow system: a set $S$ of Sylow subgroups of $G$, one for each prime dividing $|G|$, so that if $P$, $Q$ $\in{S}$, then $PQ=QP$).
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 1, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9918639659881592, "perplexity": 85.36948538091323}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2014-23/segments/1406510264270.11/warc/CC-MAIN-20140728011744-00058-ip-10-146-231-18.ec2.internal.warc.gz"}
https://www.physicsforums.com/threads/simple-classic-action-integral.658356/
# Simple classic action integral 1. Dec 11, 2012 ### dydxforsn I'm trying to solve this simple problem (it's the first problem of Quantum Mechanics and Path Integrals by Feynman, I feel like an idiot not being able to do it....) It's just solving for the action, S, of a free particle (no potential, only kinetic energy..) So it should just be $$S = \int_{t_a}^{t_b}{\frac{m}{2} (\frac{dx}{dt})^2 dt}$$ which according to the book is simply $$S = \frac{m}{2} \frac{(x_b - x_a)^2}{t_b - t_a}$$ I've tried a couple of different ways to reason myself into this solution but I can't seem to figure it out. 2. Dec 11, 2012 ### Mute What have you tried so far? What did you plug in for $dx/dt$? 3. Dec 11, 2012 ### dydxforsn Incredibly wrong stuff, heh.. Yeah I'm an idiot. I was supposed to just plug in $v = \left ( \frac{x_{b} - x_{a}}{t_{b} - t_{a}} \right )$ because 'v' is constant from the Euler-Lagrange equation.. Thanks for helping me see what should have been obvious >_< I was hell bent on doing things symbolically and didn't seem to care about the appearance of the end point 'x' values.. These should have been very suggestive. Last edited: Dec 11, 2012 4. Dec 11, 2012 ### Mute Great! You figured it out! Yeah, with a problem like this it helps to remember that the action is a functional of $x(t)$ and $\dot{x}(t)$, so you get different answers depending on which function x(t) you use. Of course, varying the action with respect to x(t) (giving the Euler-Lagrange equations) yields the equation of motion for the classical path. The problem wanted the action of a classical path with boundary values $x(t_a) = x_a$ and $x(t_b) = x_b$. It can take some practice seeing these sorts of problems a few times before it clicks. =) Similar Discussions: Simple classic action integral
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 2, "mathjax_display_tex": 1, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8906840085983276, "perplexity": 541.9356910328266}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2017-43/segments/1508187824325.29/warc/CC-MAIN-20171020192317-20171020212317-00755.warc.gz"}
https://chemrxiv.org/articles/Machine-Learnt_Fragment-Based_Energies_for_Crystal_Structure_Prediction/7583294/1
## Machine-Learnt Fragment-Based Energies for Crystal Structure Prediction 2019-01-14T19:10:39Z (GMT) by Crystal structure prediction involves a search of a complex configurational space for local minima corresponding to stable crystal structures, which can be performed efficiently using atom-atom force fields for the assessment of intermolecular interactions. However, for challenging systems, the limitations in the accuracy of force fields prevents a reliable assessment of the relative thermodynamic stability of potential structures. Here we present a method to rapidly improve force field lattice energies by correcting two-body interactions with a higher level of theory in a fragment-based approach, and predicting these corrections with machine learning. We find corrected lattice energies with commonly used density functionals and second order perturbation theory (MP2) all significantly improve the ranking of experimentally known polymorphs where the rigid molecule model is applicable. The relative lattice energies of known polymorphs are also found to systematically improve towards experimentally determined values and more comprehensive energy models when using MP2 corrections, despite remaining at the force field geometry. Predicting two-body interactions with atom-centered symmetry functions in a Gaussian process is found to give highly accurate results with as little as 10-20% of the training data, reducing the cost of the energy correction by up to an order of magnitude. The machine learning approach opens up the possibility of using fragment-based methods to a greater degree in crystal structure prediction, providing alternative energy models where standard approaches are insufficient.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8085839152336121, "perplexity": 1276.7927197952847}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2019-09/segments/1550249578748.86/warc/CC-MAIN-20190224023850-20190224045850-00401.warc.gz"}
https://link.springer.com/article/10.1007%2Fs10470-009-9334-6
, Volume 62, Issue 2, pp 215–222 Widely tunable low-power high-linearity current-mode integrator built using DG-MOSFETs • Savas Kaya • Hesham F. A. Hamed • Anish Kulkarni Article Abstract A novel tunable current-mode integrator for low-voltage low-power applications is presented using mixed-mode TCAD simulations. The design is based on independently driven double-gate (IDDG) MOSFETs, a nano-scale four-terminal device, where one gate can be used to change the characteristics of the other. Using current-mirrors built with IDDG-MOSFETs, we show that the number of active devices in the tunable current-mode integrator, 16 in bulk CMOS design, may be halved, i.e. considerable savings in both total area and power dissipation. The integrator operates with single supply voltage of 1 V and a wide range of tunable bandwidth (~2 decades) and gain (~30 dB). This linear circuit has third-order harmonic distortion as low as −70 dB in appropriate bias conditions, which can be set via the back-gates. The impact of tuning on the IDDG integrator and conventional design using symmetrically driven (SDDG) MOSFETs is comparatively studied. The proposed design is a good example for performance leverage through IDDG MOSFET architectures in analog circuits integral to future mixed-signal systems. Notes Acknowledgments S. Kaya was supported in part by the Air Force Office of Scientific Research, under Summer Faculty Fellowship Program, during the course of this work. References 1. 1. Philip Wong, H.-S. (2002). Beyond the conventional transistor. IBM Journal of Research and Development, 46, 133–168. 2. 2. Mathew, L., et al. (2002). Vertical CMOS double gate MOSFET with notched poly gates. IEEE Si Nanoelectronics Workshop, June 2002 (pp. 5–6). Honolulu, USA.Google Scholar 3. 3. Pei, G., & Kan, E. C.-C. (2004). Independently driven DG MOSFETs for mixed-signal circuits: Part I-quasi-static and nonquasi-static channel coupling. IEEE Transactions on Electron Devices, 51, 2086–2093. 4. 4. Parvais, B., et al. (2007). FinFET technology for analog and RF circuits, Proceedings of ICECS (pp. 182–185), December 11–14, 2007, Marrakech, Morocco.Google Scholar 5. 5. Kaya, S., Hamed, H. F. A., & Starzyk, J. (2007). Low-power tunable analog circuit blocks based on nanoscale double-gate MOSFETs. IEEE Transactions on Circuits and Systems II, 54, 571. 6. 6. Beckett, P. (2003). Exploiting multiple functionality for nano-scale reconfigurable systems, Proceedings of GLSVLSI (pp. 50–55) April 28–29, 2003, Washington DC, USA.Google Scholar 7. 7. Hamed, H. F. A., Kaya, S., & Starzyk, J. (2008). Use of nano-scale double-gate MOSFETs in low-power tunable current mode analog circuits. Analog Integrated Circuits and Signal Processing, 54, 211. 8. 8. Karsilayan, A., & Tan, M. (1995). Current mode tunable integrator for low voltage applications. Electronics Letters, 31, 1525–1526. 9. 9. Zeki, A., Toker, A., & Ozoguz, S. (2001). Linearly tunable transconductor using modified CDBA. Analog Integrated Circuits and Signal Processing, 26, 179–183. 10. 10. Smith, S., & Sanchez-Sinencio, E. (1996). Low voltage integrators for high frequency CMOS current mode techniques. IEEE Transactions on Circuits and Systems II, 43, 39–48. 11. 11. De Lima, J. (2004). A low voltage wide swing programmable gain current amplifier. Analog Integrated Circuits and Signal Processing, 41, 147–157. 12. 12. 13. 13. DESSIS is a part of Synopsys’s TCAD Suite, http://www.synopsis.com. 14. 14. Sedighi, B., & Bakhtiar, M. S. (2007). Variable-gain current mirror for high-speed applications. IEICE Electronics Express, 4, 277–281. 15. 15. Razavi, B. (1998). RF microelectronics. Englewood Cliffs, NJ: Prentice-Hall.Google Scholar 16. 16. Cerdeira, A., et al. (2002). A new method for determination of harmonic distortion in SOI FD transistors. Solid-State Electronics, 46, 103–108. 17. 17. Cerdeira, A., et al. (2004). Integral function method for determination of nonlinear harmonic distortion. Solid-State Electronics, 48, 2225–2234. 18. 18. Hamed, H. F. A. (2003). A low voltage digitally programmable current-mode filter, Proceedings of ICM (pp. 413–417) Cairo, Egypt.Google Scholar Authors and Affiliations • Savas Kaya • 1 • Hesham F. A. Hamed • 2 • Anish Kulkarni • 1 1. 1.School of Electrical Engineering and Computer ScienceOhio UniversityAthensUSA 2. 2.Department of Electrical EngineeringEl-Minia UniversityEl-MiniaEgypt
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8491052389144897, "perplexity": 14669.650826933384}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-51/segments/1544376823817.62/warc/CC-MAIN-20181212091014-20181212112514-00207.warc.gz"}
http://mdh.diva-portal.org/smash/resultList.jsf?af=%5B%5D&aq=%5B%5B%7B%22categoryId%22%3A%2211507%22%7D%5D%5D&aqe=%5B%5D&aq2=%5B%5B%5D%5D&language=sv&query=
mdh.sePublikationer Ändra sökning Avgränsa sökresultatet 1234 1 - 50 av 168 RefereraExporteraLänk till träfflistan Permanent länk Referera Referensformat • apa • ieee • modern-language-association-8th-edition • vancouver • Annat format Fler format Språk • de-DE • en-GB • en-US • fi-FI • nn-NO • nn-NB • sv-SE • Annat språk Fler språk Utmatningsformat • html • text • asciidoc • rtf Träffar per sida • 5 • 10 • 20 • 50 • 100 • 250 Sortering • Standard (Relevans) • Författare A-Ö • Författare Ö-A • Titel A-Ö • Titel Ö-A • Publikationstyp A-Ö • Publikationstyp Ö-A • Äldst först • Nyast först • Skapad (Äldst först) • Skapad (Nyast först) • Senast uppdaterad (Äldst först) • Senast uppdaterad (Nyast först) • Disputationsdatum (tidigaste först) • Disputationsdatum (senaste först) • Standard (Relevans) • Författare A-Ö • Författare Ö-A • Titel A-Ö • Titel Ö-A • Publikationstyp A-Ö • Publikationstyp Ö-A • Äldst först • Nyast först • Skapad (Äldst först) • Skapad (Nyast först) • Senast uppdaterad (Äldst först) • Senast uppdaterad (Nyast först) • Disputationsdatum (tidigaste först) • Disputationsdatum (senaste först) Markera Maxantalet träffar du kan exportera från sökgränssnittet är 250. Vid större uttag använd dig av utsökningar. • 1. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation. Black-Litterman Model: Practical Asset Allocation Model Beyond Traditional Mean-Variance2016Självständigt arbete på grundnivå (kandidatexamen), 10 poäng / 15 hpStudentuppsats (Examensarbete) This paper consolidates and compares the applicability and practicality of Black-Litterman model versus traditional Markowitz Mean-Variance model. Although well-known model such as Mean-Variance is academically sound and popular, it is rarely used among asset managers due to its deficiencies. To put the discussion into context we shed light on the improvement made by Fisher Black and Robert Litterman by putting the performance and practicality of both Black- Litterman and Markowitz Mean-Variance models into test. We will illustrate detailed mathematical derivations of how the models are constructed and bring clarity and profound understanding of the intuition behind the models. We generate two different portfolios, composing data from 10-Swedish equities over the course of 10-year period and respectively select 30-days Swedish Treasury Bill as a risk-free rate. The resulting portfolios orientate our discussion towards the better comparison of the performance and applicability of these two models and we will theoretically and geometrically illustrate the differences. Finally, based on extracted results of the performance of both models we demonstrate the superiority and practicality of Black-Litterman model, which in our particular case outperform traditional Mean- Variance model. • 2. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, School of Physical Sciences, Makerere University, Kampala, Uganda. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, College of Natural and Applied Sciences, University of Dar es Salaam,Tanzania. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, Makerere University, Kampala, Uganda. Department of Mathematics, Makerere University, Kampala, Uganda. Department of Mathematics, Makerere University, Kampala, Uganda. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. A Variant of Updating Page Rank in Evolving Tree graphs2019Ingår i: Proceedings of 18th Applied Stochastic Models and Data Analysis International Conference with the Demographics 2019 Workshop, Florence, Italy: 11-14 June, 2019 / [ed] Christos H. Skiadas, ISAST: International Society for the Advancement of Science and Technology , 2019, s. 31-49Konferensbidrag (Refereegranskat) PageRank update refers to the process of computing new PageRank values after change(s) (addition or removal of links/vertices) has occurred in real life networks. The purpose of the updating is to avoid recalculating the values from scratch. To efficiently carry out the update, we consider PageRank as the expected number of visits to target vertex if multiple random walks are performed, starting at each vertex once and weighing each of these walks by a weight value. Hence, it might be looked at as updating non-normalised PageRank. In the proposed approach, a scaled adjacency matrix is sequentially updated after every change and the levels of the vertices being updated as well. This enables sets of internal and sink vertices dependent on their roots or parents, thus vector-vector product can be performed sequentially since there are no infinite steps from one vertex to the other. • 3. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, School of Physical Sciences, Makerere University, Kampala, Uganda. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, College of Natural and Applied Sciences, University of Dar es Salaam,Tanzania. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, School of Physical Sciences, Makerere University, Kampala, Uganda. Department of Mathematics, School of Physical Sciences, Makerere University, Kampala, Uganda. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. PageRank in evolving tree graphs2018Ingår i: Stochastic Processes and Applications: SPAS2017, Västerås and Stockholm, Sweden, October 4-6, 2017 / [ed] Sergei Silvestrov, Anatoliy Malyarenko, Milica Rančić, Springer, 2018, Vol. 271, s. 375-390Kapitel i bok, del av antologi (Refereegranskat) In this article, we study how PageRank can be updated in an evolving tree graph. We are interested in finding how ranks of the graph can be updated simultaneously and effectively using previous ranks without resorting to iterative methods such as the Jacobi or Power method. We demonstrate and discuss how PageRank can be updated when a leaf is added to a tree, at least one leaf is added to a vertex with at least one outgoing edge, an edge added to vertices at the same level and forward edge is added in a tree graph. The results of this paper provide new insights and applications of standard partitioning of vertices of the graph into levels using breadth-first search algorithm. Then, one determines PageRanks as the expected numbers of random walk starting from any vertex in the graph. We noted that time complexity of the proposed method is linear, which is quite good. Also, it is important to point out that the types of vertex play essential role in updating of PageRank. • 4. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, School of Physical Sciences, Makerere University, Kampala, Uganda. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, College of Natural and Applied Sciences, University of Dar es Salaam,Tanzania. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Stockholm University, Sweden. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, Makerere University, Kampala, Uganda. Department of Mathematics, Makerere University, Kampala, Uganda. Nonlinearly  Perturbed Markov Chains  and  Information Networks2019Ingår i: Proceedings of 18th Applied Stochastic Models and Data Analysis International Conference with the Demographics 2019 Workshop, Florence, Italy: 11-14 June, 2019 / [ed] Christos H. Skiadas, ISAST: International Society for the Advancement of Science and Technology , 2019, s. 51-79Konferensbidrag (Refereegranskat) The paper is devoted to studies of perturbed Markov chains commonly used for description of information networks. In such models, the matrix of transition probabilities for the corresponding Markov chain is usually regularised by adding  a special damping matrix multiplied by a small damping (perturbation) parameter ε. In this paper, we present results of the detailed perturbation analysis of Markov chains with damping component and numerical experiments supporting and illustrating the results of this perturbation analysis. • 5. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola. Mälardalens högskola. Asymptotics of Implied Volatility in the Gatheral Double Stochastic Volatility Model2019Ingår i: Proceedings of 18th Applied Stochastic Models and Data Analysis International Conference with the Demographics 2019 Workshop, Florence, Italy: 11-14 June, 2019 / [ed] Christos H. Skiadas, ISAST: International Society for the Advancement of Science and Technology , 2019, s. 81-90Konferensbidrag (Refereegranskat) The double-mean-reverting model by Gatheral [1] is motivated by empirical dynamics of the variance of the stock price. No closed-form solution for European option exists in the above model. We study the behaviour of the implied volatility with respect to the logarithmic strike price and maturity near expiry and at-the- money. Using the method by Pagliarani and Pascucci [6], we calculate explicitly the first few terms of the asymptotic expansion of the implied volatility within a parabolic region. • 6. WorldLight.com AB, Sweden. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation. The mathematics of internet search engines2008Ingår i: Acta Applicandae Mathematicae - An International Survey Journal on Applying Mathematics and Mathematical Applications, ISSN 0167-8019, E-ISSN 1572-9036, Vol. 104, nr 2, s. 211-242Artikel i tidskrift (Refereegranskat) This article presents a survey of techniques for ranking results in search engines, with emphasis on link-based ranking methods and the PageRank algorithm. The problem of selecting, in relation to a user search query, the most relevant documents from an unstructured source such as the WWW is discussed in detail. The need for extending classical information retrieval techniques such as boolean searching and vector space models with link-based ranking methods is demonstrated. The PageRank algorithm is introduced, and its numerical and spectral properties are discussed. The article concludes with an alternative means of computing PageRank, along with some example applications of this new method. • 7. Riga Technical University, Latvia. Riga Technical University, Latvia. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Algorithms of the Copula Fit to the Nonlinear Processes in the Utility Industry2017Ingår i: Procedia Computer Science, ISSN 1877-0509, E-ISSN 1877-0509, Vol. 104, s. 572-577Artikel i tidskrift (Refereegranskat) Our research studies the construction and estimation of copula-based semi parametric Markov model for the processes, which involved in water flows in the hydro plants. As a rule analyzing the dependence structure of stationary time series regressive models defined by invariant marginal distributions and copula functions that capture the temporal dependence of the processes is considered. This permits to separate out the temporal dependence (such as tail dependence) from the marginal behavior (such as fat tails) of a time series. Dealing with utility company data we have found the best copula describing data - Gumbel copula. As a result constructed algorithm was used for an imitation of low probability events (in a hydro power industry) and predictions. • 8. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, School of Physical Sciences, Makerere University, Kampala, Uganda. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. A comparison of graph centrality measures based on random walks and their computation2019Ingår i: Proceedings of 18th Applied Stochastic Models and Data Analysis International Conference with the Demographics 2019 Workshop, Florence, Italy: 11-14 June, 2019 / [ed] Christos H. Skiadas, ISAST: International Society for the Advancement of Science and Technology , 2019, s. 121-135Konferensbidrag (Refereegranskat) When working with a network it is often of interest to locate the "most important" nodes in the network. A common way to do this is using some graph centrality measures. Since what constitutes an important node is different between different networks or even applications on the same network there is a large amount of different centrality measures proposed in the literature. Due to the large amount of different centrality measures proposed in different fields, there is also a large amount very similar or equivalent centrality measures in the sense that they give the same ranks. In this paper we will focus on centrality measures based on powers of the adjacency matrix or similar matrices and those based on random walk in order to show how some of these are related and can be calculated efficiently using the same or slightly altered algorithms. • 9. Faculty of Sciences, Dept of Mathematics and Computer Sciences, Eduardo Mondlane University, Mozambique. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Calibration of Multiscale Two-Factor Stochastic Volatility Models: A Second-Order Asymptotic Expansion Approach2018Ingår i: / [ed] Christos H Skiadas, ISAST: International Society for the Advancement of Science and Technology , 2018Konferensbidrag (Refereegranskat) The development of financial markets imposes more complex models on the option pricing problems. On the previous papers by the authors, we consider a model under which the underlying asset is driven by two independent Heston-type stochastic volatility processes of multiscale (fast and slow) mean-reverting rates and we compute an approximate solution for the option pricing problem, using asymptotic expansion method. In the present paper, we aim to calibrate the model using the market prices of options on Euro Stoxx 50 index and an equity stock in the European market. Our approach is to use the market implied volatility surface for calibrating directly a set of new parameters required in our second-order asymptotic expansion pricing formula for European options. This secondorder asymptotic expansion formula provides a better approximation formula for European option prices than the first-order formula, as explained in an earlier work of the authors. • 10. Biffi, Elena Mälardalens högskola, Akademin för utbildning, kultur och kommunikation. Monte Carlo semi-Markov methods  for credit risk migration and Basel II rules II2008Ingår i: Journal of Numerical and Applied Mathematics, Vol. 96, s. 59-86Artikel i tidskrift (Refereegranskat) • 11. Biffi, Elena Mälardalens högskola, Akademin för utbildning, kultur och kommunikation. Monte Carlo semi-Markov methods  for credit risk migration and Basel II rules. I.2008Ingår i: Journal of Numerical and Applied Mathematics, ISSN 0868-6912, Vol. 1, s. 28-58Artikel i tidskrift (Refereegranskat) • 12. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, College of Natural and Applied Sciences, University of Dar es Salaam,Tanzania. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, School of Physical Sciences, Makerere University, Kampala, Uganda. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Department of Mathematics, School of Physical Sciences, Makerere University, Kampala, Uganda. Department of Mathematics, School of Physical Sciences, Makerere University, Kampala, Uganda. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. PageRank and perturbed Markov chains2019Ingår i: Proceedings of 18th Applied Stochastic Models and Data Analysis International Conference with the Demographics 2019 Workshop, Florence, Italy: 11-14 June, 2019 / [ed] Christos H. Skiadas, ISAST: International Society for the Advancement of Science and Technology , 2019, s. 233-247Konferensbidrag (Refereegranskat) PageRank is a widely-used hyperlink-based algorithm to estimate the relative importance of nodes in networks [11]. Since many real world networks are large sparse networks, this makes efficient calculation of PageRank complicated. Moreover, one needs to escape from dangling effects in some cases as well as slow convergence of the transition matrix. Primitivity adjustment with a damping (perturbation) parameter ε$\in$(0,ε0] (for fixed ε$\simeq$0.15) is one of the essential procedure that is known to ensure convergence of the transition matrix [24]. If ε is large, the transition matrix looses information due to shift of information to teleportation matrix [27]. In this paper, we formulate PageRank problem as the first and second order Markov chains perturbation problem. Using numerical experiments, we compare convergence rates for the two problems for different values of ε on different graph structures and investigate the difference in ranks for the two problems. • 13. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Analysis of Key Comparisons with Two Reference Standards: Extended Random Effects Meta-AnalysisIngår i: Advanced Mathematical and Computational Tools in Metrology and Testing XI / [ed] Alistair B Forbes, Nien-Fan Zhang, Anna Chunovkina, Sascha Eichstädt, Singapore: World ScientificKapitel i bok, del av antologi (Refereegranskat) We propose a statistical method for analyzing key comparisons with two transfer standards measured in two petals. The new approach is based on an extension of the established random effects model. A full Bayesian analysis based on the reference prior is developed and analytic expressions for the results are derived. One benefit of the suggested approach is that it provides a comprehensive assessment of the laboratory biases in terms of their posterior distributions. Another advantage is that it can easily be applied in practice. The approach is illustrated for the CCM.M-K7 key comparison data. • 14. Physikalisch-Technische Bundesanstalt, Germany. Physikalisch-Technische Bundesanstalt, Germany. Assessment of Vague and Noninformative Priors for Bayesian Estimation of the Realized Random Effects in Random Effects Meta-Analysis2018Ingår i: AStA Advances in Statistical Analysis, ISSN 1863-8171, E-ISSN 1863-818X, Vol. 102, s. 1-20Artikel i tidskrift (Refereegranskat) Random-effects meta-analysis has become a well-established tool applied in many areas, for example, when combining the results of several clinical studies on a treatment effect. Typically, the inference aims at the common mean and the amount of heterogeneity. In some applications, the laboratory effects are of interest, for example, when assessing uncertainties quoted by laboratories participating in an interlaboratory comparison in metrology. We consider the Bayesian estimation of the realized random effects in random-effects meta-analysis. Several vague and noninformative priors are examined as well as a proposed novel one. Conditions are established that ensure propriety of the posteriors for the realized random effects. We present extensive simulation results that assess the inference in dependence on the choice of prior as well as mis-specifications in the statistical model. Overall good performance is observed for all priors with the novel prior showing the most promising results. Finally, the uncertainties reported by eleven national metrology institutes and universities for their measurements on the Newtonian constant of gravitation are assessed. • 15. Physikalisch-Technische Bundesanstalt, Germany. Physikalisch-Technische Bundesanstalt, Germany. Physikalisch-Technische Bundesanstalt, Germany. Robust Bayesian Linear Regression with Application to an Analysis of the CODATA Values for the Planck Constant2017Ingår i: Metrologia, ISSN 0026-1394, E-ISSN 1681-7575, Vol. 55, nr 1, s. 20-28Artikel i tidskrift (Refereegranskat) Weighted least-squares estimation is commonly applied in metrology to fit models to measurements that are accompanied with quoted uncertainties. The weights are chosen in dependence on the quoted uncertainties. However, when data and model are inconsistent in view of the quoted uncertainties, this procedure does not yield adequate results. When it can be assumed that all uncertainties ought to be rescaled by a common factor, weighted least-squares estimation may still be used, provided that a simple correction of the uncertainty obtained for the estimated model is applied. We show that these uncertainties and credible intervals are robust, as they do not rely on the assumption of a Gaussian distribution of the data. Hence, common software for weighted least-squares estimation may still safely be employed in such a case, followed by a simple modification of the uncertainties obtained by that software. We also provide means of checking the assumptions of such an approach. The Bayesian regression procedure is applied to analyze the CODATA values for the Planck constant published over the past decades in terms of three different models: a constant model, a straight line model and a spline model. Our results indicate that the CODATA values may not have yet stabilized • 16. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. National Institute of Standards and Technology, USA. Approximate Bayesian Evaluations of Measurement Uncertainty2018Konferensbidrag (Övrigt vetenskapligt) • 17. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. National Institute of Standards and Technology, USA. Approximate Bayesian evaluations of measurement uncertainty2018Ingår i: Metrologia, ISSN 0026-1394, E-ISSN 1681-7575, Vol. 55, s. 147-157Artikel i tidskrift (Refereegranskat) The Guide to the Expression of Uncertainty in Measurement (GUM) includes formulas that produce an estimate of a scalar output quantity that is a function of several input quantities, and an approximate evaluation of the associated standard uncertainty. This contribution presents approximate, Bayesian counterparts of those formulas for the case where the output quantity is a parameter of the joint probability distribution of the input quantities, also taking into account any information about the value of the output quantity available prior to measurement expressed in the form of a probability distribution on the set of possible values for the measurand. The approximate Bayesian estimates and uncertainty evaluations that we present have a long history and illustrious pedigree, and provide sufficiently accurate approximations in many applications, yet are very easy to implement in practice. Differently from exact Bayesian estimates, which involve either (analytical or numerical) integrations, or Markov Chain Monte Carlo sampling, the approximations that we describe involve only numerical optimization and simple algebra. Therefore, they make Bayesian methods widely accessible to metrologists. We illustrate the application of the proposed techniques in several instances of measurement: isotopic ratio of silver in a commercial silver nitrate; odds of cryptosporidiosis in AIDS patients; height of a manometer column; mass fraction of chromium in a reference material; and potential-difference in a Zener voltage standard. • 18. Boulougari, Andromachi Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Application of a power-exponential function-based model to mortality rates forecasting2019Ingår i: Communications in Statistics: Case Studies, Data Analysis and Applications, E-ISSN 2373-7484, Vol. 5, nr 1, s. 3-10Artikel i tidskrift (Refereegranskat) There are many models for mortality rates. A well-known problem that complicates modeling of human mortality rates is the “accident hump” occurring in early adulthood. Here, two models of mortality rate based on power-exponential functions are presented and compared to a few other models. The models will be fitted to known data of measured death rates from several different countries using numerical techniques for curve-fitting with the nonlinear least-squares method. The properties of the model with respect to forecasting with the Lee–Carter method will be discussed. • 19. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Numerical Studies on Asymptotics of European Option under Multiscale Stochastic Volatility2015Ingår i: ASMDA 2015 Proceedings: 16th Applied Stochastic Models and Data Analysis International Conference with 4th Demographics 2015 Workshop / [ed] Christos H Skiadas, ISAST: International Society for the Advancement of Science and Technology , 2015, s. 53-66Konferensbidrag (Refereegranskat) Multiscale stochastic volatilities models relax the constant volatility assumption from Black-Scholes option pricing model. Such model can capture the smile and skew of volatilities and therefore describe more accurately the movements of the trading prices. Christoffersen et al. [3] presented a model where the underlying priceis governed by two volatility components, one changing fast and another changing slowly. Chiarella and Ziveyi [2] transformed Christoffersen’s model and computed an approximate formula for pricing American options. They used Duhamel’s principle to derive an integral form solution of the boundary value problem associated to the option price. Using method of characteristics, Fourier and Laplace transforms, they obtained with good accuracy the American options prices. In a previous research of the authors (Canhanga et al. [1]), a particular case of Chiarella and Ziveyi [2] model is used for pricing of European options. The novelty of this earlier work is to present an asymptotic expansion for the option price. The present paper provides experimental and numerical studies on investigating the accuracy of the approximation formulae given by this asymptotic expansion. We present also a procedure for calibrating the parameters produced by our first-order asymptotic approximation formulae. Our approximated option prices will be compared to the approximation obtained by Chiarella and Ziveyi [2]. 1. Canhanga B., Malyarenko, A., Ni, Y. and Silvestrov S. Perturbation methods for pricing European options in a model with two stochastic volatilities. 3rd SMTDA Conference Proceedings. 11-14 June 2014, Lisbon Porturgal, C. H. Skiadas (Ed.) 489-500 (2014). 2. Chiarella, C, and Ziveyi, J. American option pricing under two stochastic volatility processes. J. Appl. Math. Comput. 224:283–310 (2013). 3. Christoffersen, P.; Heston, S.; Jacobs, K. The shape and term structure of the index option smirk: why multifactor stochastic volatility models work so well. Manage. Sci. 55 (2) 1914-1932; (2009). • 20. DMI, Eduardo Mondlane University, Maputo, Mozambique. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Numerical Studies on Asymptotics of European Option Under Multiscale Stochastic Volatility2017Ingår i: Methodology and Computing in Applied Probability, ISSN 1387-5841, E-ISSN 1573-7713, Vol. 19, nr 4, s. 1075-1087Artikel i tidskrift (Refereegranskat) Multiscale stochastic volatilities models relax the constant volatility assumption from Black-Scholes option pricing model. Such models can capture the smile and skew of volatilities and therefore describe more accurately the movements of the trading prices. Christoffersen et al. Manag Sci 55(2):1914–1932 (2009) presented a model where the underlying price is governed by two volatility components, one changing fast and another changing slowly. Chiarella and Ziveyi Appl Math Comput 224:283–310 (2013) transformed Christoffersen’s model and computed an approximate formula for pricing American options. They used Duhamel’s principle to derive an integral form solution of the boundary value problem associated to the option price. Using method of characteristics, Fourier and Laplace transforms, they obtained with good accuracy the American option prices. In a previous research of the authors (Canhanga et al. 2014), a particular case of Chiarella and Ziveyi Appl Math Comput 224:283–310 (2013) model is used for pricing of European options. The novelty of this earlier work is to present an asymptotic expansion for the option price. The present paper provides experimental and numerical studies on investigating the accuracy of the approximation formulae given by this asymptotic expansion. We present also a procedure for calibrating the parameters produced by our first-order asymptotic approximation formulae. Our approximated option prices will be compared to the approximation obtained by Chiarella and Ziveyi Appl Math Comput 224:283–310 (2013). • 21. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Pricing European Options Under Stochastic Volatilities Models2016Ingår i: Engineering Mathematics I: Electromagnetics, Fluid Mechanics, Material Physics and Financial Engineering / [ed] Sergei Silvestrov; Milica Rancic, Springer, 2016, s. 315-338Kapitel i bok, del av antologi (Refereegranskat) Interested by the volatility behavior, different models have been developed for option pricing. Starting from constant volatility model which did not succeed on capturing the effects of volatility smiles and skews; stochastic volatility models appearas a response to the weakness of the constant volatility models. Constant elasticity of volatility, Heston, Hull and White, Schöbel-Zhu, Schöbel-Zhu-Hull-Whiteand many others are examples of models where the volatility is itself a random process. Along the chapter we deal with this class of models and we present the techniques of pricing European options. Comparing single factor stochastic volatility models to constant factor volatility models it seems evident that the stochastic volatility models represent nicely the movement of the asset price and its relations with changes in the risk. However, these models fail to explain the large independent fluctuations in the volatility levels and slope. Christoffersen et al. in [4] proposed a model with two-factor stochastic volatilities where the correlation between the underlying asset price and the volatilities varies randomly. In the last section of this chapter we introduce a variation of Chiarella and Ziveyi model, which is a subclass of the model presented in [4] and we use the first order asymptotic expansion methods to determine the price of European options. • 22. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Faculty of Sciences, Department of Mathematics and Computer Sciences, Eduardo Mondlane University, Maputo, Mozambique. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Analytical and Numerical Studies on the Second Order Asymptotic Expansion Method for European Option Pricing under Two-factor Stochastic Volatilities2018Ingår i: Communications in Statistics - Theory and Methods, ISSN 0361-0926, E-ISSN 1532-415X, Vol. 47, nr 6, s. 1328-1349Artikel i tidskrift (Refereegranskat) The celebrated Black–Scholes model made the assumption of constant volatility but empirical studies on implied volatility and asset dynamics motivated the use of stochastic volatilities. Christoffersen in 2009 showed that multi-factor stochastic volatilities models capture the asset dynamics more realistically. Fouque in 2012 used it to price European options. In 2013 Chiarella and Ziveyi considered Christoffersen's ideas and introduced an asset dynamics where the two volatilities of the Heston type act separately and independently on the asset price, and using Fourier transform for the asset price process and double Laplace transform for the two volatilities processes, solved a pricing problem for American options. This paper considers the Chiarella and Ziveyi model and parameterizes it so that the volatilities revert to the long-run-mean with reversion rates that mimic fast(for example daily) and slow(for example seasonal) random effects. Applying asymptotic expansion method presented by Fouque in 2012, we make an extensive and detailed derivation of the approximation prices for European options. We also present numerical studies on the behavior and accuracy of our first and the second order asymptotic expansion formulas. • 23. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Perturbation Methods for Pricing European Options in a Model with Two Stochastic Volatilities2015Ingår i: New Trends in Stochastic Modelling and Data Analysis / [ed] Raimondo Manca, Sally McClean, Christos H Skiadas, ISAST , 2015, s. 199-210Kapitel i bok, del av antologi (Refereegranskat) Financial models have to reflect the characteristics of markets in which they are developed to be able to predict the future behavior of a financial system. The nature of most trading environments is characterized by uncertainties which are expressed in mathematical models in terms of volatilities. In contrast to the classical Black-Scholes model with constant volatility, our model includes one fast-changing and another slow-changing stochastic volatilities of mean-reversion type. The different changing frequencies of volatilities can be interpreted as the effects of weekends and effects of seasons of the year (summer and winter) on the asset price. We perform explicitly the transition from the real-world to the risk-neutral probability measure by introducing market prices of risk and applying Girsanov Theorem. To solve the boundary value problem for the partial differential equation that corresponds to the case of a European option, we perform both regular and singular multiscale expansions in fractional powers of the speed of mean-reversion factors. We then construct an approximate solution given by the two-dimensional Black-Scholes model plus some terms that expand the results obtained by Black and Scholes. • 24. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Numerical Methods on European Options Second Order Asymptotic Expansions for Multiscale Stochastic Volatility2017Ingår i: INCPAA 2016 Proceedings: 11th International Conference on Mathematical Problems in Engineering, Aerospace, and Sciences, ICNPAA 2016, La Rochelle, France, 4 - 8 July 2016. / [ed] S. Sivasundaram, 2017, Vol. 1798, s. 020035-1-020035-10, artikel-id 020035Konferensbidrag (Refereegranskat) After Black-Scholes proposed a model for pricing European Option in 1973, Cox, Ross and Rubinstein in 1979, and Heston in 1993, showed that the constant volatility assumption in the Black-Scholes model was one of the main reasons for the model to be unable to capture some market details. Instead of constant volatilities, they introduced non-constant volatilities to the asset dynamic modeling. In 2009, Christoffersen empirically showed "why multi-factor stochastic volatility models work so well". Four years later, Chiarella and Ziveyi solved the model proposed by Christoffersen. They considered an underlying asset whose price is governed by two factor stochastic volatilities of mean reversion type. Applying Fourier transforms, Laplace transforms and the method of characteristics they presented an approximate formula for pricing American option.The huge calculation involved in the Chiarella and Ziveyi approach motivated us to investigate another approach to compute European option prices on a Christoffersen type model. Using the first and second order asymptotic expansion method we presented a closed form solution for European option, and provided experimental and numerical studies on investigating the accuracy of the approximation formulae given by the first order asymptotic expansion. In the present chapter we will perform experimental and numerical studies for the second order asymptotic expansion and compare the obtained results with results presented by Chiarella and Ziveyi. • 25. Riga Technical University. Malyarenko, AnatoliyMälardalens högskola, Akademin för utbildning, kultur och kommunikation.Pärna, KalevUniversity of Tartu. Exploring the world of financial engineering2011Samlingsverk (redaktörskap) (Övrigt vetenskapligt) Mälardalen University (Sweden), Riga Technical University (Latvia) and University of Tartu (Estonia) organised courses “Exploring the world of financial engineering" for teachers and students of the above higher education institutions under financial support of the Nordplus Framework mobility project HE-2010_1a-21005. These courses take place in the city of Västerås (Sweden) on May 9–May 13, 2011. In this book, we present the material of the courses’ lectures. • 26. Mälardalens högskola, Institutionen för matematik och fysik. Weak Convergence of First-Rare-Event Times for Semi-Markov Processes2007Doktorsavhandling, monografi (Övrigt vetenskapligt) I denna avhandling studerar vi nödvändiga och tillräckliga villkor för svag konvergens av första-sällan-händelsetider för semi-Markovska processer. I introduktionen ger vi nödvändiga grundläggande definitioner och beskrivningar av modeller som betraktas i avhandlingen, samt ger några exempel på situationer i vilka metoder av första-sällan-händelsetider kan vara lämpliga att använda. Dessutom analyserar vi publicerade resultat om asymptotiska problem för stokastiska funktionaler som definieras på semi-Markovska processer. I artikel A betraktar vi första-sällan-händelsetider för semi-Markovska processer med en ändlig mängd av lägen. Vi ger också en sammanfattning av våra resultat om nödvändiga och tillräckliga villkor för svag konvergens, samt diskuterar möjliga tillämpningar inom aktuarie-området. I artikel B redovisar vi i detalj de resultat som annonseras i artikel A och bevisen för dem. Vi ger också nödvändiga och tillräckliga villkor för svag konvergens av första-sällan-händelsetider för semi-Markovska processer med en ändlig mängd av lägen i ett icke-triangulärt tillstånd. Dessutom beskriver vi med hjälp av Laplacetransformationen klassen av alla möjliga gränsfördelningar. I artikel C studerar vi villkor av svag konvergens av flöden av sällan-händelser i ett icke-triangulärt tillstånd. Vi formulerar nödvändiga och tillräckliga villkor för konvergens, och beskriver klassen av alla möjliga gränsflöden. Vi tillämpar också våra resultat i asymptotisk analys av icke-ruin-sannolikheten för störda riskprocesser. I artikel D ger vi nödvändiga och tillräckliga villkor för svag konvergens av första-sällan-händelsetider för semi-Markovska rocesser med en ändlig mängd av lägen i ett triangulärt tillstånd, samt beskriver klassen av alla möjliga gränsfördelningar. Resultaten utvidgar slutsatser från artikel B till att gälla för ett allmänt triangulärt tillstånd. I artikel E ger vi nödvändiga och tillräckliga villkor för svag konvergens av flöden av sällan-händelser för semi-Markovska processer i ett triangulärt tillstånd. Detta generaliserar resultaten från artikel C till att beskriva ett allmänt triangulärt tillstånd. Vidare ger vi tillämpningar av våra resultat på asymptotiska problem av störda riskprocesser och till kösystemen med snabb service. • 27. CNRS UMR 8163 STL, Universit´e Lille 3, 59653 Villeneuve d’Ascq, France. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. LIM&BIO UFR SMBH Universit´e Paris 13, France. CNRS UMR 8163 STL, Universit´e Lille 3, 59653 Villeneuve d’Ascq, France. Comparison of Clustering Approaches through Their Application to Pharmacovigilance Terms2013Ingår i: Artificial Intelligence in Medicine. Lecture Notes in Computer Science, vol. 7885 / [ed] Niels Peek, Roque Marín Morales, Mor Peleg, Berlin Heidelberg: Springer, 2013, s. 58-67Kapitel i bok, del av antologi (Refereegranskat) In different applications (i.e., information retrieval, filteringor analysis), it is useful to detect similar terms and to provide the possibilityto use them jointly. Clustering of terms is one of the methods whichcan be exploited for this. In our study, we propose to test three methodsdedicated to the clustering of terms (hierarchical ascendant classification,Radius and maximum), to combine them with the semantic distance algorithmsand to compare them through the results they provide whenapplied to terms from the pharmacovigilance area. The comparison indicatesthat the non disjoint clustering (Radius and maximum) outperformthe disjoint clusters by 10 to up to 20 points in all the experiments. • 28. Stockholm University. Stockholm University. Coupling and explicit rates of convergence in Cramér-Lundberg approximation for          reinsurance risk processes2011Ingår i: Communications in Statistics - Theory and Methods, ISSN 0361-0926, E-ISSN 1532-415X, Vol. 40, nr 19-20, s. 3524-3539Artikel i tidskrift (Refereegranskat) A classical result in risk theory is the Cramér-Lundberg approximation which says that under some general conditions the exponentially normalized ruin probability converges. In this article, we state an explicit rate of convergence for the Cramér-Lundberg approximation for ruin probabilities in the case where claims are bounded, which is realistic for, e.g., reinsurance models. The method, used to get the corresponding results, is based on renewal and coupling arguments. • 29. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Generalisation of the Damping Factor in PageRank for Weighted Networks2014Ingår i: Modern Problems in Insurance Mathematics / [ed] Silvestrov, Dmitrii; Martin-Löf, Anders, Springer International Publishing , 2014, s. 313-333Kapitel i bok, del av antologi (Refereegranskat) In this article we will look at the PageRank algorithm used to rank nodes in a network. While the method was originally used by Brin and Page to rank home pages in order of “importance”, since then many similar methods have been used for other networks such as financial or P2P networks. We will work with a non-normalised version of the usual PageRank definition which we will then generalise to enable better options, such as adapting the method or allowing more types of data. We will show what kind of effects the new options creates using examples as well as giving some thoughts on what it can be used for. We will also take a brief look at how adding new connections between otherwise unconnected networks can change the ranking. • 30. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. PageRank, a Look at Small Changes in a Line of Nodes and the Complete Graph2016Ingår i: Engineering Mathematics II: Algebraic, Stochastic and Analysis Structures for Networks, Data Classification and Optimization / [ed] Sergei Silvestrov; Milica Rancic, Springer, 2016, s. 223-247Kapitel i bok, del av antologi (Refereegranskat) • 31. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. PageRank, Connecting a Line of Nodes with a Complete Graph2016Ingår i: Engineering Mathematics II: Algebraic, Stochastic and Analysis Structures for Networks, Data Classification and Optimization / [ed] Sergei Silvestrov; Milica Rancic, Springer, 2016Kapitel i bok, del av antologi (Refereegranskat) The focus of this article is the PageRank algorithm originally defined by S. Brin and L. Page as the stationary distribution of a certain random walk on a graph used to rank homepages on the Internet. We will attempt to get a better understanding of how PageRank changes after you make some changes to the graph such as adding or removing edge between otherwise disjoint subgraphs. In particular we will take a look at link structures consisting of a line of nodes or a complete graph where every node links to all others and different ways to combine the two. Both the ordinary normalized version of PageRank as well as a non-normalized version of PageRank found by solving corresponding linear system will be considered. We will see that it is possible to find explicit formulas for the PageRank in some simple link structures and using these formulas take a more in-depth look at the behavior of the ranking as the system changes. • 32. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. PageRank for networks, graphs and Markov chains2017Ingår i: Theory of Probability and Mathematical Statistics, ISSN 0868-6904, Vol. 96, s. 61-83Artikel i tidskrift (Refereegranskat) In this work it is described how a partitioning of a graph into components can be used to calculate PageRank in a large network and how such a partitioning can be used to re-calculate PageRank as the network changes. Although considered problem is that of calculating PageRank, it is worth to note that the same partitioning method could be used when working with Markov chains in general or solving linear systems as long as the method used for solving a single component is chosen appropriately. An algorithm for calculating PageRank using a modified partitioning of the graph into strongly connected components is described. Moreover, the paper focuses also on the calculation of PageRank in a changing graph from two different perspectives, by considering specific types of changes in the graph and calculating the difference in rank before and after certain types of edge additions or removals between components. Moreover, some common specific types of graphs for which it is possible to find analytic expressions for PageRank are considered, and in particular the complete bipartite graph and how PageRank can be calculated for such a graph. Finally, several open directions and problems are described. • 33. Mälardalens högskola, Institutionen för matematik och fysik. Statistical and combinatorial aspects of comparative genomics2004Ingår i: Scandinavian Journal of Statistics, ISSN 0303-6898, Vol. 31, nr 2, s. 203-216Artikel, forskningsöversikt (Refereegranskat) This document presents a survey of the statistical and combinatorial aspects of four areas of comparative genomics: gene order based measures of evolutionary distances between species, construction of phylogenetic trees, detection of horizontal transfer of genes, and detection of ancient whole genome duplications. • 34. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Stockholm Univ, Stockholm, Sweden. Stockholm Univ, Stockholm, Sweden. Stockholm Univ, Stockholm, Sweden. Social learning may lead to population level conformity without individual level frequency bias2017Ingår i: Scientific Reports, ISSN 2045-2322, E-ISSN 2045-2322, Vol. 7, artikel-id 17341Artikel i tidskrift (Refereegranskat) A requirement of culture, whether animal or human, is some degree of conformity of behavior within populations. Researchers of gene-culture coevolution have suggested that population level conformity may result from frequency-biased social learning: individuals sampling multiple role models and preferentially adopting the majority behavior in the sample. When learning from a single role model, frequency-bias is not possible. We show why a population-level trend, either conformist or anticonformist, may nonetheless be almost inevitable in a population of individuals that learn through social enhancement, that is, using observations of others' behavior to update their own probability of using a behavior in the future. The exact specification of individuals' updating rule determines the direction of the trend. These results offer a new interpretation of previous findings from simulations of social enhancement in combination with reinforcement learning, and demonstrate how results of dynamical models may strongly depend on seemingly innocuous choices of model specifications, and how important it is to obtain empirical data on which to base such choices. • 35. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Stockholm University. Chalmers University of Technology, Sweden. Lord's Paradox in a Continuous Setting and a Regression Artifact in Numerical Cognition Research.2014Ingår i: PLoS ONE, ISSN 1932-6203, E-ISSN 1932-6203, Vol. 9, nr 4, s. e95949-e95949Artikel i tidskrift (Refereegranskat) In this paper we review, and elaborate on, the literature on a regression artifact related to Lord's paradox in a continuous setting. Specifically, the question is whether a continuous property of individuals predicts improvement from training between a pretest and a posttest. If the pretest score is included as a covariate, regression to the mean will lead to biased results if two critical conditions are satisfied: (1) the property is correlated with pretest scores and (2) pretest scores include random errors. We discuss how these conditions apply to the analysis in a published experimental study, the authors of which concluded that linearity of children's estimations of numerical magnitudes predicts arithmetic learning from a training program. However, the two critical conditions were clearly met in that study. In a reanalysis we find that the bias in the method can fully account for the effect found in the original study. In other words, data are consistent with the null hypothesis that numerical magnitude estimations are unrelated to arithmetic learning. • 36. Pulchowk Campus, Institute of Engineering, Tribhuvan University, Nepal. Pulchowk Campus, Institute of Engineering, Tribhuvan University, Nepal. Department of Mathematical Sciences, School of Science, Kathmandu University, Nepal. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. A Survey on Queueing Systems with Mathematical Models and Applications2017Ingår i: American Journal of Operational Research, ISSN 2324-6537, E-ISSN 2324-6545, ISSN 2324-6537, Vol. 7, nr 1, s. 1-14Artikel i tidskrift (Refereegranskat) Queuing systems consist of one or more servers that provide some sort of services to arriving customers. Almost everyone has some experience of tedious time being in a queue during several daily life activities. It is reasonable to accept that service should be provided to the one who arrives first in the queue. But this rule always may not work. Sometimes the last comer or the customer in the high priority gets service earlier than the one who is waiting in the queue for a long time. All these characteristics are the interesting areas of research in the queueing theory. In this paper, we present some of the previous works of various researchers with brief explanations. We then carry out some of the mathematical expressions which represent the different queueing behaviors. In almost all the literatures, these queueing behaviors are examined with the help of mathematical simulations. Based on the previous contributions of researchers, our specific point of attraction is to study the finite capacity queueing models in which limited number of customers are served by a single or multiple number of servers and the batch queueing models where arrival or service or both occur in a bulk. Furthermore, we present some performance measure equations of some queueing models together with necessary components used in the queueing theory. Finally, we report some applications of queueing systems in supply chain management pointing out some areas of research as further works. • 37. Department of Informatics, University of Electro-communications, Tokyo, Japan. Department of Informatics, University of Electro-communications, Tokyo, Japan. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. American Option Pricing under Markovian Regime Switching Model2019Ingår i: Proceedings of 18th Applied Stochastic Models and Data Analysis International Conference with the Demographics 2019 Workshop, Florence, Italy: 11-14 June, 2019 / [ed] Christos H. Skiadas, ISAST: International Society for the Advancement of Science and Technology , 2019, s. 515-522Konferensbidrag (Refereegranskat) In this research, we consider the pricing of American options when the price dynamics of the underlying risky assets is governed by Markovian regime switching process. We assume that the price dynamics depends on the economy, the state of which transits based on a discrete-time Markov chain. The underlying economy cannot be known directly but can be partially observed by receiving a signal stochastically related to the real state of economy. The pricing procedure and optimal stopping problem are formulated using partially observable Markov decision process, and some structural properties of the resulting optimal expected payoff functions are derived under certain assumptions. These properties establish the existence of a monotonic policy with respect to the holding time, asset price, and economic conditions. • 38. Jonsson, Markus An exponential limit shape of random q-proportion Bulgarian solitaireManuskript (preprint) (Övrigt vetenskapligt) We introduce $p_n$-random $q_n$-proportion Bulgarian solitaire ($0), played on n cards distributed in piles. In each pile, a number of cards equal to the proportion $q_n$ of the pile size rounded upward to the closest integer are candidates to be picked. Each candidate card is picked with probability $p_n$, independently of other candidate cards. This generalizes Popov's random Bulgarian solitaire, in which there is a single candidate card in each pile. Popov showed that a triangular limit shape is obtained for a fixed p as n tends to infinity. Here we let both $p_n$ and $q_n$ vary with n. We show that under the conditions $q_n^2 p_n n/{\log n}\rightarrow \infty$ and $p_n q_n \rightarrow 0$ as $n\to\infty$, the $p_n$-random $q_n$-proportion Bulgarian solitaire has an exponential limit shape. • 39. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation. Optimal Linear Combinations of Portfolios Subject to Estimation Risk2015Självständigt arbete på avancerad nivå (masterexamen), 20 poäng / 30 hpStudentuppsats (Examensarbete) The combination of two or more portfolio rules is theoretically convex in return-risk space, which provides for a new class of portfolio rules that gives purpose to the Mean-Variance framework out-of-sample. The author investigates the performance loss from estimation risk between the unconstrained Mean-Variance portfolio and the out-of-sample Global Minimum Variance portfolio. A new two-fund rule is developed in a specific class of combined rules, between the equally weighted portfolio and a mean-variance portfolio with the covariance matrix being estimated by linear shrinkage. The study shows that this rule performs well out-of-sample when covariance estimation error and bias are balanced. The rule is performing at least as good as its peer group in this class of combined rules. • 40. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation. Momentum Investment Strategies with Portfolio Optimization: A Study on Nasdaq OMX Stockholm Large Cap2014Självständigt arbete på grundnivå (kandidatexamen), 10 poäng / 15 hpStudentuppsats (Examensarbete) This report covers a study testing the possibility of adding portfolio optimization by mean-variance analysis as a tool to extend the concept of momentum strategies in contrast to naive allocation formed by Jegadeesh & Titman (1993). Further these active investment strategies are compared with a passive benchmark as well as a randomly selected portfolio over the entire study-period. The study showed that the naive allocation model outperformed the mean-variance model both economically as well as statistically. No indication where obtained for a lagged return effect when letting a mean-variance model choose weights for a quarterly holding period and the resulting investment recommendation is to follow a naive investment strategy within a momentum framework. • 41. Cardiff University, UK. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Matérn Class Tensor-Valued Random Fields and Beyond2017Ingår i: Journal of statistical physics, ISSN 0022-4715, E-ISSN 1572-9613, Vol. 168, s. 1276-1301Artikel i tidskrift (Refereegranskat) We construct classes of homogeneous random fields on a three-dimensional Euclidean space that take values in linear spaces of tensors of a fixed rank and are isotropic with respect to a fixed orthogonal representation of the group of 3 × 3 orthogonal matrices.The constructed classes depend on finitely many isotropic spectral densities. We say that such a field belongs to either the Matérn or the dual Matérn class if all of the above densities are Matérn or dual Matérn. Several examples are considered. • 42. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation. Market illiquidity and market excess return: Cross-section and time-series effects: A study of the Shanghai stock exchange2013Självständigt arbete på avancerad nivå (masterexamen), 20 poäng / 30 hpStudentuppsats (Examensarbete) The purpose of the current paper is to explore the cross-sectional relationship between market illiquidity and market excess return on stocks traded in the Shanghai Stock Exchange(SSE)over-time; using data from monthly and yearly databases of CSMAR(China Securities Market and Accounting Research) and statistics annual Shanghai Stock Exchange from 2001.1-2012.12. We believe that the empirical tests on the stocks traded in the New York Stock Exchange (NYSE) of the well-established paper by Amihud(2002)would be potentially useful to be tested in a different setting, the SSE; in doing so, we apply the same illiquidity measure and estimating models to examine the hypotheses of the current study. In consideration of the aim of the current study, an illiquidity measure proposed by a Chinese scholar Huang (2009)is also applied in the empirical tests. Due to that Chinese stock market is still young and under development, any outcomes from the current study that are dissimilar to the ones appeared in Amihud(2002) in the sense of the effectiveness of market illiquidity have nothing to do with the utility of illiquidity theory; rather, different market characteristics should be taken into account, such as the unpredictability of frequent policy interventions on a Chinese stock market, following Wang Fang, Han Dong and Jiang Xianglin (2002). • 43. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Generalized Vandermonde matrices and determinants in electromagnetic compatibility2017Licentiatavhandling, sammanläggning (Övrigt vetenskapligt) Matrices whose rows (or columns) consists of monomials of sequential powers are called Vandermonde matrices and can be used to describe several useful concepts and have properties that can be helpful for solving many kinds of problems. In this thesis we will discuss this matrix and some of its properties as well as a generalization of it and how it can be applied to curve fitting discharge current for the purpose of ensuring electromagnetic compatibility. In the first chapter the basic theory for later chapters is introduced. This includes the Vandermonde matrix and some of its properties, history, applications and generalizations, interpolation and regression problems, optimal experiment design and modelling of electrostatic discharge currents with the purpose to ensure electromagnetic compatibility. The second chapter focuses on finding the extreme points for the determinant for the Vandermonde matrix on various surfaces including spheres, ellipsoids, cylinders and tori. The extreme points are analysed in three dimensions or more. The third chapter discusses fitting a particular model called the p-peaked Analytically Extended Function (AEF) to data taken either from a standard for electromagnetic compatibility or experimental measurements. More specifically the AEF will be fitted to discharge currents from the IEC 62305-1 and IEC 61000-4-2 standards for lightning protection and electrostatic discharge immunity as well as some experimentally measured data of similar phenomena. • 44. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. University of Nairobi, Kenya. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. University of Nairobi, Kenya. Construction of moment-matching multinomial lattices using Vandermonde matrices and Gröbner bases2017Ingår i: AIP Conference Proceedings / [ed] Sivasundaram, S, American Institute of Physics (AIP), 2017, Vol. 1798, s. 020094-1-020094-7, artikel-id 020094Konferensbidrag (Refereegranskat) In order to describe and analyze the quantitative behavior of stochastic processes, such as the process followed by a financial asset, various discretization methods are used. One such set of methods are lattice models where a time interval is divided into equal time steps and the rate of change for the process is restricted to a particular set of values in each time step. The well-known binomial- and trinomial models are the most commonly used in applications, although several kinds of higher order models have also been examined. Here we will examine various ways of designing higher order lattice schemes with different node placements in order to guarantee moment-matching with the process. • 45. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. University of Nairobi, Nairobi, Kenya. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. University of Nairobi, Nairobi, Kenya. Asian Options, Jump-Diffusion Processes on a Lattice, and Vandermonde Matrices2014Ingår i: Modern Problems in Insurance Mathematics / [ed] Silvestrov, Dmitrii, Martin-Löf, Anders, Springer International Publishing , 2014, s. 335-363Kapitel i bok, del av antologi (Refereegranskat) Asian options are options whose value depends on the average asset price during its lifetime. They are useful because they are less subject to price manipulations. We consider Asian option pricing on a lattice where the underlying asset follows the Merton–Bates jump-diffusion model. We describe the construction of the lattice using the moment matching technique which results in an equation system described by a Vandermonde matrix. Using some properties of Vandermonde matrices we calculate the jump probabilities of the resulting system. Some conditions on the possible jump sizes in the lattice are also given. • 46. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. University of Nairobi, Nairobi, Kenya. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. University of Nairobi, Nairobi, Kenya. Moment-matching multinomial lattices using Vandermonde matrices for option pricing2016Ingår i: Stochastic and Data Analysis Methods and Applications in Statistics and Demography: Book 2 / [ed] James R. Bozeman, Teresa Oliveira and Christos H. Skiadas, ISAST , 2016, Vol. 2, s. 15-29Konferensbidrag (Refereegranskat) Lattice models are discretization methods that divide the life of a financial option into time steps of equal length and model the underlying asset movement at each time step. A financial option of American or European style can be evaluated conveniently via backward induction using a lattice model. The most common lattice models are the well-known binomial- and trinomial lattice models, although severalkinds of higher order models have also been examined in the literature. In the presentpaper we present an explicit scheme for creating a lattice model of arbitrary order and use the Vandermonde matrix to determine suitable parameters. Some selected models created using this scheme are examined with regard to their suitability for option pricing • 47. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. University of Nis, Faculty of Electronic Eng., Serbia. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Novel Approach to Modelling of Lightning Current Derivative2017Ingår i: Facta Universitatis Series: Electronics and Energetics, ISSN 0353-3670, E-ISSN 2217-5997, Vol. 30, nr 2, s. 245-256Artikel i tidskrift (Refereegranskat) A new approach to mathematical modelling of lightning current derivative is proposed in this paper. It builds on the methodology, previously developed by the authors, for representing lightning currents and electrostatic discharge (ESD) currents waveshapes. It considers usage of a multi-peaked form of the analytically extended function (AEF) for approximation of current derivative waveshapes. The AEF function parameters are estimated using the Marquardt least-squares method (MLSM) and the framework for fitting the multi-peaked AEF to a waveshape with an arbitrary number of peaks is briefly described. This procedure is validated performing a few numerical experiments, including fitting the AEF to single- and multi-peaked waveshapes corresponding to measured current derivatives. • 48. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Mälardalens högskola, Akademin för utbildning, kultur och kommunikation, Utbildningsvetenskap och Matematik. Modelling mortality rates using the powerexponential function2017Ingår i: Booklet of abstracts of SPAS2017 - International Conference on Stochastic Processes and Algebraic Structures– From Theory Towards Applications, 2017Konferensbidrag (Refereegranskat) There are many models for the mortality rates of humans and other organisms. A phenomenon that complicates the modelling of human mortality rates is a rapid increase in mortality rate for young adults (in western Europe this is especially pronounced at the age of 25). We will examine models for mortality rates based on power-exponential functions, compare them to empirical data for mortality rates and other models. • 49. Mälardalens högskola, Institutionen för matematik och fysik. Optimal Stopping Domains for Discrete Time Knock Out American Options2007Ingår i: Recent Advances in Stochastic Modelling and Data Analysis: Chania, Greece, 29 May - 1 June 2007, World Scientific , 2007, s. 613-620Kapitel i bok, del av antologi (Refereegranskat) • 50. Mälardalens högskola, Institutionen för matematik och fysik. Structure of Optimal Stopping Domains for American Options with Knock out Domains2007Ingår i: Theory of Stochastic Processes, ISSN 0321-3900, Vol. 13(29), nr 4, s. 98-129Artikel i tidskrift (Refereegranskat) American options give us the possibility to exercise them at any moment of time up to maturity. An optimal stopping domain for American type options is a domain that, if the underlying price process enters we should exercise the option. A knock out option is a American barrier option of knock out type, but with more general shape structure of the knock out domain. An algorithm for generating the optimal stopping domain for American type knock out options is constructed. Monte Carlo simulation is used to determine the structure of the optimal stopping domain. Results of the structural, and stability of studies are presented for different models of payoff functions and knock out domains. 1234 1 - 50 av 168 RefereraExporteraLänk till träfflistan Permanent länk Referera Referensformat • apa • ieee • modern-language-association-8th-edition • vancouver • Annat format Fler format Språk • de-DE • en-GB • en-US • fi-FI • nn-NO • nn-NB • sv-SE • Annat språk Fler språk Utmatningsformat • html • text • asciidoc • rtf v. 2.35.9 | |
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 14, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.7467643618583679, "perplexity": 6140.848308900665}, "config": {"markdown_headings": false, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-10/segments/1581875146562.94/warc/CC-MAIN-20200226211749-20200227001749-00347.warc.gz"}
https://www.spruceid.dev/treeldr/treeldr-basics/properties
SpruceID Search… ⌃K # Properties There exists two ways of defining a property. One is to embed the property definition inside a type definition: base <https://example.com/>; type MyType { myProperty: Type // embedded property definition. } This will define the `https://example.com/MyType/myProperty` property. Note how the base IRI changes inside the braces to match the IRI of the type. The `myProperty` relative IRI is resolved into `https://example.com/MyType/myProperty` and not `https://example.com/myProperty`. For this reason, one may prefer to define properties independently. This can be done using the `property` keyword: base <https://example.com/>; property myProperty: Type; // independent property definition. It can then be referred to using an absolute, relative, or compact IRI: use <https://example.com/> as ex; type MyType { <https://example.com/myProperty>, <../myProperty>, // same as above ex:myProperty // same as above } As showed in this example, when a property is defined outside the type definition, it is not required to specify its type again.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9361906051635742, "perplexity": 2948.384688312572}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2023-06/segments/1674764500028.12/warc/CC-MAIN-20230202133541-20230202163541-00069.warc.gz"}
https://www.nextgurukul.in/wiki/concept/cbse/class-12/chemistry/the-p-block-elements/group-17-atomic-properties/3961983
Notes On Group 17: Atomic Properties - CBSE Class 12 Chemistry Trends of some of the atomic properties of group seventeen elements: Atomic properties include atomic and ionic radii, ionisation enthalpy, electron gain enthalpy and electro-negativity. Trend of atomic and ionic radii: As we move down the group, the atomic radii and ionic radii increase due to the addition of a new principal energy level in each successive element. These elements have the least atomic radii when compared to other elements in the corresponding periods. This is because of maximum effective nuclear charge. Ionisation enthalpy: These elements show very high values of ionisation enthalpy. As a consequence, the atoms of these elements have little tendency to lose electrons and form positive ions. As we move down the group, the value of ionisation energy decreases. This is due to the gradual increase in the atomic size, which reduces the force of attraction between the valence electrons and the nucleus. The ionisation enthalpy of fluorine is appreciably higher than any other halogen, which is attributed to its small size. Electron gain enthalpy: Halogens have the maximum negative electron gain enthalpy in the respective periods. The electron gain enthalpy becomes less negative on descending the group. Fluorine has less negative electron gain enthalpy than chlorine. I.e. chlorine has the maximum negative electron gain enthalpy among all the elements. It is because of the small size and compact 2p sub-shell of the fluorine atom. Owing to the small size of the fluorine atom, the incoming electron experiences a greater amount of repulsion from the electrons that are already present. The electron-electron repulsions between the incoming electron and the electrons already present .outweigh the attraction between the added electron and the nucleus Electro-negativity: The halogens have very high electro-negativity values. You can see from the values of electro-negativity in the table that the electro-negativity decreases gradually on moving down the group from fluorine to iodine due to the corresponding increase in the atomic radii. Fluorine is the most electronegative element in the periodic table. #### Summary Trends of some of the atomic properties of group seventeen elements: Atomic properties include atomic and ionic radii, ionisation enthalpy, electron gain enthalpy and electro-negativity. Trend of atomic and ionic radii: As we move down the group, the atomic radii and ionic radii increase due to the addition of a new principal energy level in each successive element. These elements have the least atomic radii when compared to other elements in the corresponding periods. This is because of maximum effective nuclear charge. Ionisation enthalpy: These elements show very high values of ionisation enthalpy. As a consequence, the atoms of these elements have little tendency to lose electrons and form positive ions. As we move down the group, the value of ionisation energy decreases. This is due to the gradual increase in the atomic size, which reduces the force of attraction between the valence electrons and the nucleus. The ionisation enthalpy of fluorine is appreciably higher than any other halogen, which is attributed to its small size. Electron gain enthalpy: Halogens have the maximum negative electron gain enthalpy in the respective periods. The electron gain enthalpy becomes less negative on descending the group. Fluorine has less negative electron gain enthalpy than chlorine. I.e. chlorine has the maximum negative electron gain enthalpy among all the elements. It is because of the small size and compact 2p sub-shell of the fluorine atom. Owing to the small size of the fluorine atom, the incoming electron experiences a greater amount of repulsion from the electrons that are already present. The electron-electron repulsions between the incoming electron and the electrons already present .outweigh the attraction between the added electron and the nucleus Electro-negativity: The halogens have very high electro-negativity values. You can see from the values of electro-negativity in the table that the electro-negativity decreases gradually on moving down the group from fluorine to iodine due to the corresponding increase in the atomic radii. Fluorine is the most electronegative element in the periodic table. Previous Next
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 2, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.6176928281784058, "perplexity": 957.118356965772}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-45/segments/1603107878921.41/warc/CC-MAIN-20201022053410-20201022083410-00534.warc.gz"}
https://www.mathjax.org/mathjax-v2-7-3-now-available/
# MathJax v2.7.3 now available We are happy to officially release MathJax v2.7.3 today. This is mostly a bug-fix release, with a few enhancements as well. The primary enhancement is the addition of version 2.3 of the Speech-Rule Engine that underlies the MathJax accessibility tools. This includes performance enhancements as well as a Spanish localization that is tied to the MathJax localization menu. In addition, the Explorer menu in the Assistive submenu has been slimmed down to remove unneeded options. For details on all bug fixes and enhancements, please see below. This release should be available on all CDN providers, e.g., https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.3/MathJax.js which you can load it in place of the version you are currently using (or load latest.js instead of Mathjax.js to get the latest version 2.x, whatever it is, but note that this loads asynchronously, so the MathJax global variable may not be available immediately). Alternatively, you can get a ZIP archive or access the branch on GitHub. Thanks for your continuing interest in MathJax. We hope that this release makes your MathJax experience even better. The MathJax Team. ## New in MathJax v2.7.3 ### Input • AsciiMath has been updated to include new features that have been added in the official AsciiMathML.js file since v2.7.2 was released. • TeX: Remove balanceBraces option from tex2jax, which was never implemented (#1871) • TeX: Make HTML id’s used in \tag handling more robust (#1899) • TeX: Make \DeclareMathOperator and \Newextarrow localizable by begingroup (#1876) • TeX: Have \bigg and friends to trim spaces from their arguments (#1819) • TeX: Don’t produce unwanted mrows with \left…\right (#1829) ### Output • HTML-CSS: Improve detection of web fonts (#517) • Improve line breaking past the container width when no break is found within it (#1883) • SVG: Don’t lose pre-spacing in elements containing line breaks (#1915) • CommonHTML: Fix width of roots containing line breaks (#1882) • SVG: Measure sizes of annotation-xml elements properly (#1870) • Handle default border width properly in SVG and HTML-CSS (#1855) • CommonHTML: Reset character width if a reset occurs while an equation is being processed (#1837) • CommonHTML: Properly scale widths in line breaking algorithm (#1881) • HTML-CSS: Fix position of rightmost glyph in multi-glyph horizontal stretchy characters (#1896) • MathML: Don’t add duplicate xmlns attribute when original is empty (#1862) ### Interface • Decode hash URI component so it works with special characters (#1843)
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.15503211319446564, "perplexity": 13945.677398219126}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2018-26/segments/1529267863119.34/warc/CC-MAIN-20180619193031-20180619213031-00227.warc.gz"}
https://uwaterloo.ca/astereae-lab/research/goldenrods/classification-and-illustrations/solidago-ludoviciana
# Solidago ludoviciana ## Louisiana Goldenrod Solidago ludoviciana (A. Gray) Small is native to dry open woods, edges of woods near roads, and railroad embankments in the western portion of the southeastern U.S.  It is rhizomatous (slender stoloniferous and deep seated) the leaves have tapering bases and the upper stem leaves are quickly reduced, ascending to appressed (Semple & Cook 2006 FNA).  It shares the rhizome features with S. tarda.  It is a member of the S. arguta complex (S. subsect. Argutae; Semple & Beck 2021) and was included in the multivariate analysis of the complex (Semple et al. 2021). Both diploids and tetraploids occur in S. ludoviciana. Nesom (2009) stated that S. dispersa Small was the correct name for this species, but G.H. Morton annotated the type of Solidago dispersa as possibly being S. arguta introgressed with S. ulmifolia or S. speciosa.  The inflorescence of the holotype is not the same as that of the many collections of S. ludoviciana I have seen.  I agree with Gary Morton that S. dispersa is not the same as S. ludoviciana and that the latter name is the correct one to used for this species.
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.9143243432044983, "perplexity": 9873.442666976362}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.3, "absolute_threshold": 20, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2022-40/segments/1664030335469.40/warc/CC-MAIN-20220930113830-20220930143830-00771.warc.gz"}
https://www.originlab.com/doc/LabVIEW/Custom-VIs/OA_Col-Setting
# 1.1.8 OA_Col-Setting ## Contents ### Description Set Column data types and labels. Minimum Version Required: Origin 8.5 SR0 ### Controls and Indicators Origin.Column is a reference to Origin.Column. Type is the designation of the column. It must be one of the following values: COLTYPE_NO_CHANGE = -1 COLTYPE_Y = 0 COLTYPE_NONE = 1 COLTYPE_ERROR = 2 COLTYPE_X = 3 COLTYPE_LABEL = 4 COLTYPE_Z = 5 COLTYPE_X_ERROR = 6 COLTYPE_GROUP = 7 COLTYPE_SUBJECT = 8 The default value is COLTYPE_NO_CHANGE. DataFormat is the data type of the column. It must be one of the following values: DF_NO_CHANGE =-1 DF_DOUBLE =0 DF_TEXT =1 DF_TIME = 2 DF_DATE = 3 DF_TEXT_NUMERIC = 9 DF_FLOAT = 32 DF_SHORT = 33 DF_LONG =34 DF_CHAR =35 DF_BYTE = 38 DF_USHORT = 39 DF_ULONG =40 DF_COMPLEX = 41 The default value is DF_NO_CHANGE. LongName is a string of LongName set to the Column. Units is a string of Unit set to the Column. Comments is a string of comment set to the Column. Error In contains error information that occur before this VI or function runs. Origin.Column is a reference to Origin.Column. Error Out contains error information.
{"extraction_info": {"found_math": true, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 1, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.4223271608352661, "perplexity": 6512.4310410862345}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2020-16/segments/1585371799447.70/warc/CC-MAIN-20200407121105-20200407151605-00417.warc.gz"}
https://bird.bcamath.org/handle/20.500.11824/572?show=full
dc.contributor.author Haspot, B. dc.date.accessioned 2017-02-21T08:18:20Z dc.date.available 2017-02-21T08:18:20Z dc.date.issued 2012-12-31 dc.identifier.issn 1674-7283 dc.identifier.uri http://hdl.handle.net/20.500.11824/572 dc.description.abstract This paper is dedicated to the study of viscous compressible barotropic fluids in dimension N ≥ 2. We address the question of well-posedness for large data having critical Besov regularity. Our result improves the analysis of Danchin and of the author inasmuch as we may take initial density in B N/p p,1 with 1 ≤ p < +∞. Our result relies on a new a priori estimate for the velocity, where we introduce a new unknown called effective velocity to weaken one of the couplings between the density and the velocity. In particular, our result is the first in which we obtain uniqueness without imposing hypothesis on the gradient of the density. dc.format application/pdf dc.language.iso eng en_US dc.rights Reconocimiento-NoComercial-CompartirIgual 3.0 España en_US dc.rights.uri http://creativecommons.org/licenses/by-nc-sa/3.0/es/ en_US dc.subject fluid mechanics dc.subject harmonic analysis dc.subject partial differential equation dc.title Existence of strong solutions in critical spaces for barotropic viscous fluids in larger spaces dc.type info:eu-repo/semantics/article en_US dc.identifier.doi 10.1007/s11425-012-4360-8 dc.relation.publisherversion https://www.scopus.com/inward/record.uri?eid=2-s2.0-84856722537&doi=10.1007%2fs11425-012-4360-8&partnerID=40&md5=83fd79576e3812cebe67fdfdf9bef20b dc.rights.accessRights info:eu-repo/semantics/openAccess en_US dc.type.hasVersion info:eu-repo/semantics/publishedVersion en_US dc.journal.title Science China Mathematics en_US  ### This item appears in the following Collection(s) Except where otherwise noted, this item's license is described as Reconocimiento-NoComercial-CompartirIgual 3.0 España
{"extraction_info": {"found_math": false, "script_math_tex": 0, "script_math_asciimath": 0, "math_annotations": 0, "math_alttext": 0, "mathml": 0, "mathjax_tag": 0, "mathjax_inline_tex": 0, "mathjax_display_tex": 0, "mathjax_asciimath": 0, "img_math": 0, "codecogs_latex": 0, "wp_latex": 0, "mimetex.cgi": 0, "/images/math/codecogs": 0, "mathtex.cgi": 0, "katex": 0, "math-container": 0, "wp-katex-eq": 0, "align": 0, "equation": 0, "x-ck12": 0, "texerror": 0, "math_score": 0.8723323941230774, "perplexity": 5276.311478584193}, "config": {"markdown_headings": true, "markdown_code": true, "boilerplate_config": {"ratio_threshold": 0.18, "absolute_threshold": 10, "end_threshold": 15, "enable": true}, "remove_buttons": true, "remove_image_figures": true, "remove_link_clusters": true, "table_config": {"min_rows": 2, "min_cols": 3, "format": "plain"}, "remove_chinese": true, "remove_edit_buttons": true, "extract_latex": true}, "warc_path": "s3://commoncrawl/crawl-data/CC-MAIN-2021-49/segments/1637964362230.18/warc/CC-MAIN-20211202145130-20211202175130-00595.warc.gz"}