in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
Cloud-CV__EvalAI-1233
Change the font everywhere to Roboto We are planning to switch the font to Roboto for the whole web application. Please let me know if there are concerns/issues.
[]
[]
diff --git a/frontend/src/css/modules/base.scss b/frontend/src/css/modules/base.scss index bda19c41e4..5918f25e84 100644 --- a/frontend/src/css/modules/base.scss +++ b/frontend/src/css/modules/base.scss @@ -9,44 +9,6 @@ display: none !important; } -/*needed fonts*/ - -@font-face { - font-family: "geo"; - src: url("../fonts/Geometos.ttf"); -} - -@font-face { - font-family: "mons-lig"; - src: url("../fonts/Montserrat-Light.ttf"); -} - -@font-face { - font-family: "mons-med"; - src: url("../fonts/Montserrat-Medium.ttf"); -} - -@font-face { - font-family: "mons-reg"; - src: url("../fonts/Montserrat-Regular.ttf"); -} - -.geo-font { - font-family: 'geo'; -} - -.mons-lig-font { - font-family: 'mons-lig'; -} - -.mons-med-font { - font-family: 'mons-med'; -} - -.mons-reg-font { - font-family: 'mons-reg'; -} - .display-large { display: block; } @@ -91,7 +53,7 @@ input[type="radio"].selectTeam + label { body, html { - font-family: 'mons-reg'; + font-family: 'Roboto'; } a { @@ -452,7 +414,7 @@ nav { .btn { text-transform: none; - font-family: 'mons-lig'; + font-family: 'Roboto'; i { font-size: 14px; } diff --git a/frontend/src/views/web/landing.html b/frontend/src/views/web/landing.html index de5f4c6571..147e65b7a8 100644 --- a/frontend/src/views/web/landing.html +++ b/frontend/src/views/web/landing.html @@ -40,7 +40,7 @@ <h4 class="text-dark-black"><strong>EvalAI</strong></h4> <div class="grad-container text-med-black "> <div class="row"> <div class="col s12 m6"> - <h4 class="mons-med-font"> + <h4> Featured Challenge </h4> <div class="display-large"> @@ -60,22 +60,22 @@ <h4 class="grad-span"> </h4> </div> <p> - <span class="w-500 text-light-black mons-med-font">Organized by: </span> <span class="w-500 mons-med-font">{{main.featuredChallenge.creator.team_name}}</span> + <span class="w-500 text-light-black">Organized by: </span> <span class="w-500">{{main.featuredChallenge.creator.team_name}}</span> </p> <p> - <span class="text-dark-black mons-reg-font">{{main.featuredChallenge.short_description}}<span ng-if="main.isMore">...</span></span> + <span class="text-dark-black">{{main.featuredChallenge.short_description}}<span ng-if="main.isMore">...</span></span> </p> <p> - <span class="w-500 text-light-black mons-med-font">Status: </span> <span class="w-500 mons-med-font"> In Progress</span> + <span class="w-500 text-light-black">Status: </span> <span class="w-500"> In Progress</span> <span class="right"> - <a ng-show="isAuth" class="view-more mons-reg-font" ui-sref="web.challenge-main.challenge-page({challengeId:main.featured-challenge/Challenge.id})">view more <i class="fa fa-external-link"></i></a> - <a ng-show="!isAuth" class="view-more mons-reg-font" ui-sref="featured-challenge-page({challengeId:main.featuredChallenge.id})">view more <i class="fa fa-external-link"></i></a> + <a ng-show="isAuth" class="view-more" ui-sref="web.challenge-main.challenge-page({challengeId:main.featured-challenge/Challenge.id})">view more <i class="fa fa-external-link"></i></a> + <a ng-show="!isAuth" class="view-more" ui-sref="featured-challenge-page({challengeId:main.featuredChallenge.id})">view more <i class="fa fa-external-link"></i></a> </span> </p> </div> <div ng-if="!main.isChallenge"> <p> - <span class="text-dark-black mons-reg-font">Sorry! there are no featured challenges. Please check again after a while.</span> + <span class="text-dark-black">Sorry! there are no featured challenges. Please check again after a while.</span> </p> </div> <div class="display-small"> @@ -95,8 +95,8 @@ <h4 class="grad-span"> <div class="row"> <div class="col s12"> <div class="align-center"> - <h4 class="mons-med-font"><div>What is EvalAI all about?</h4> - <p><span class="text-dark-black w-500 mons-light-font">EvalAI is an open-source web platform for organizing and participating in competitions to push the state of the art on AI tasks.</span> + <h4><div>What is EvalAI all about?</h4> + <p><span class="text-dark-black w-500">EvalAI is an open-source web platform for organizing and participating in competitions to push the state of the art on AI tasks.</span> <!-- <ul class="text-med-black"> <li> EvailAI details one performance on Artificial Intelligence challenges @@ -123,8 +123,8 @@ <h4 class="mons-med-font"><div>What is EvalAI all about?</h4> <div class="row"> <div class="col s12"> <div class="align-center"> - <h4 class="mons-med-font"><div>Who uses EvalAI?</div></h4> - <p><span class="text-med-black mons-reg-font">Several Research Organizations have already started hosting their challenges on EvalAI. Some of them are</span> + <h4><div>Who uses EvalAI?</div></h4> + <p><span class="text-med-black">Several Research Organizations have already started hosting their challenges on EvalAI. Some of them are</span> </p> <div class="row"> <div class="col s12 l2"> diff --git a/settings/dev.sample.py b/settings/dev.sample.py old mode 100755 new mode 100644
python__python-docs-es-1269
Translate 'whatsnew/2.3.po' This needs to reach 100% translated. The rendered version of this file will be available at https://docs.python.org/es/3.8/whatsnew/2.3.html once translated. Meanwhile, the English version is shown. Current stats for `whatsnew/2.3.po`: * Fuzzy: 0 * Percent translated: 0.0% * Entries: 0 / 324 * Untranslated: 324 Please, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it. Remember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).
[]
[]
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0ec9648d0f..f970c8bc1b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/JulienPalard/powrap - rev: main + rev: v0.4.0 hooks: - id: powrap - repo: local @@ -11,7 +11,7 @@ repos: language: python # This one requires package ``hunspell-es_es`` in Archlinux - repo: https://github.com/AFPy/pospell - rev: v1.0.11 + rev: v1.0.12 hooks: - id: pospell args: ['--personal-dict', 'dict.txt', '--language', 'es_ES', '--language', 'es_AR'] diff --git a/dictionaries/whatsnew_2.3.txt b/dictionaries/whatsnew_2.3.txt index 2116c7051e..c10acf94e0 100644 --- a/dictionaries/whatsnew_2.3.txt +++ b/dictionaries/whatsnew_2.3.txt @@ -1,41 +1,110 @@ +Oberkirch Age +Altis Bauer Brunning Chermside Chris +Christopher +Cliff +Connor +Craig Dalke Daniels +Denis Detlef Drake +Dörwald Francesco Fraser +Geert Gerber +Getopt +Gilfix Hans +Hetland Hindle +Hisao +Hodgson Hudson Hurd +Hylton +Icon Jansen Jeff +Jeremy +Jones Jr +Karatsuba Kelly +Labs Lalo Lambert +Lange +Language Lannert +Lauder Löwis +Magnus +Malley +Marangozov +Martelli Martin Martins +Mick +Montanaro +Moore Neal Netzer +Nicholas Niemeyer Norwitz Nowak +Ondrej +Optik +Orendorff +Otkidach +Overview +Palkovsky +Pedroni +Piers +Programming Reifschneider Ricciardi Richie +Robert Roman +Samuele +Simionato Simon +Skip Suzi +Suzuki Tishler Travis +Trent +Walter +Ward +Wells +What +Wilson +benchmark +deserializadas +desescalado +dev +empaquetarlo +enumerate frame +idna +llamables ports +pystone +reanudables +recompilara +reelaborado +reescríbalas +reformateada +sobreescrituras +topológica +xmlrpclib +Åstrand diff --git a/scripts/completed_files.py b/scripts/completed_files.py old mode 100644 new mode 100755 diff --git a/scripts/print_percentage.py b/scripts/print_percentage.py old mode 100644 new mode 100755 diff --git a/whatsnew/2.3.po b/whatsnew/2.3.po index bdd2425c3a..9b0e113686 100644 --- a/whatsnew/2.3.po +++ b/whatsnew/2.3.po @@ -9,15 +9,15 @@ msgstr "" "Project-Id-Version: Python 3.8\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2019-05-06 11:59-0400\n" -"PO-Revision-Date: 2021-04-17 07:53-0500\n" +"PO-Revision-Date: 2021-09-25 10:30+0100\n" "Language-Team: python-doc-es\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Last-Translator: \n" +"Last-Translator: Claudia Millan <[email protected]>\n" "Language: es\n" -"X-Generator: Poedit 2.2.1\n" +"X-Generator: Poedit 3.0\n" #: ../Doc/whatsnew/2.3.rst:3 msgid "What's New in Python 2.3" @@ -97,7 +97,7 @@ msgstr "" #: ../Doc/whatsnew/2.3.rst:41 msgid "PEP 218: A Standard Set Datatype" -msgstr "PEP 218: A Standard Set Datatype" +msgstr "PEP 218: Un tipo de datos de conjunto estándar" #: ../Doc/whatsnew/2.3.rst:43 msgid "" @@ -159,17 +159,19 @@ msgstr "" #: ../Doc/whatsnew/2.3.rst:117 msgid ":pep:`218` - Adding a Built-In Set Object Type" -msgstr ":pep:`218` - Adding a Built-In Set Object Type" +msgstr ":pep:`218` - Añadiendo un tipo de objeto de conjunto incorporado" #: ../Doc/whatsnew/2.3.rst:117 msgid "" "PEP written by Greg V. Wilson. Implemented by Greg V. Wilson, Alex Martelli, " "and GvR." msgstr "" +"PEP escrito por Greg V. Wilson. Implementado por Greg V. Wilson, Alex " +"Martelli y GvR." #: ../Doc/whatsnew/2.3.rst:126 msgid "PEP 255: Simple Generators" -msgstr "" +msgstr "PEP 255: Generadores simples" #: ../Doc/whatsnew/2.3.rst:128 msgid "" @@ -181,6 +183,14 @@ msgid "" "2.2\" document; if you read it back when Python 2.2 came out, you can skip " "the rest of this section." msgstr "" +"En Python 2.2, los generadores se añadieron como una característica " +"opcional, que se activaba mediante una directiva ``from __future__ import " +"generators``. En 2.3 los generadores ya no necesitan ser habilitados " +"especialmente, y ahora están siempre presentes; esto significa que :keyword:" +"`yield` es ahora siempre una palabra clave. El resto de esta sección es una " +"copia de la descripción de los generadores del documento \"What's New in " +"Python 2.2\"; si lo leíste cuando salió Python 2.2, puedes saltarte el resto " +"de esta sección." #: ../Doc/whatsnew/2.3.rst:136 msgid "" @@ -194,10 +204,20 @@ msgid "" "This is what generators provide; they can be thought of as resumable " "functions." msgstr "" +"Sin duda estás familiarizado con cómo funcionan las llamadas a funciones en " +"Python o C. Cuando llamas a una función, ésta obtiene un espacio de nombres " +"privado donde se crean sus variables locales. Cuando la función llega a una " +"declaración :keyword:`return`, las variables locales se destruyen y el valor " +"resultante se retorna a quien la llamó. Una llamada posterior a la misma " +"función obtendrá un nuevo conjunto de variables locales. Pero, ¿qué pasaría " +"si las variables locales no se tiraran al salir de una función? ¿Qué pasaría " +"si pudieras reanudar la función donde la dejaste? Esto es lo que " +"proporcionan los generadores; se puede pensar en ellos como funciones " +"reanudables." #: ../Doc/whatsnew/2.3.rst:145 msgid "Here's the simplest example of a generator function::" -msgstr "" +msgstr "Este es el ejemplo más sencillo de una función generadora::" #: ../Doc/whatsnew/2.3.rst:151 msgid "" @@ -206,6 +226,10 @@ msgid "" "this is detected by Python's bytecode compiler which compiles the function " "specially as a result." msgstr "" +"Se ha introducido una nueva palabra clave, :keyword:`yield`, para los " +"generadores. Cualquier función que contenga una declaración :keyword:`!" +"yield` es una función generadora; esto es detectado por el compilador de " +"código de bits de Python que compila la función especialmente como resultado." #: ../Doc/whatsnew/2.3.rst:156 msgid "" @@ -222,16 +246,31 @@ msgid "" "try`...\\ :keyword:`!finally` statement; read :pep:`255` for a full " "explanation of the interaction between :keyword:`!yield` and exceptions.)" msgstr "" +"Cuando se llama a una función generadora, ésta no retorna un único valor, " +"sino que retorna un objeto generador que soporta el protocolo de los " +"iteradores. Al ejecutar la sentencia :keyword:`yield`, el generador retorna " +"el valor de ``i``, de forma similar a una sentencia :keyword:`return`. La " +"gran diferencia entre :keyword:`!yield` y una sentencia :keyword:`!return` " +"es que al llegar a una sentencia :keyword:`!yield` se suspende el estado de " +"ejecución del generador y se conservan las variables locales. En la " +"siguiente llamada al método ``.next()`` del generador, la función se " +"reanudará la ejecución inmediatamente después de la sentencia :keyword:`!" +"yield`. (Por razones complicadas, la sentencia :keyword:`!yield` no está " +"permitida dentro del bloque :keyword:`try` de una sentencia :keyword:`!" +"try`...`; lea :pep:`255` para una explicación completa de la interacción " +"entre :keyword:`!yield` y las excepciones)" #: ../Doc/whatsnew/2.3.rst:169 msgid "Here's a sample usage of the :func:`generate_ints` generator::" -msgstr "" +msgstr "Este es un ejemplo de uso del generador :func:`generate_ints`::" #: ../Doc/whatsnew/2.3.rst:186 msgid "" "You could equally write ``for i in generate_ints(5)``, or ``a,b,c = " "generate_ints(3)``." msgstr "" +"También podrías escribir ``for i in generate_ints(5)``, o ``a,b,c = " +"generate_ints(3)``." #: ../Doc/whatsnew/2.3.rst:189 msgid "" @@ -243,6 +282,13 @@ msgid "" "indicated by raising :exc:`StopIteration` manually, or by just letting the " "flow of execution fall off the bottom of the function." msgstr "" +"Dentro de una función generadora, la expresión :keyword:`return` sólo puede " +"usarse sin un valor, y señala el final de la procesión de valores; después " +"el generador no puede retornar más valores. :keyword:`!return` con un valor, " +"como ``return 5``, es un error de sintaxis dentro de una función " +"generadora. El final de los resultados del generador también puede " +"indicarse levantando manualmente :exc:`StopIteration`, o simplemente dejando " +"que el flujo de ejecución caiga en el fondo de la función." #: ../Doc/whatsnew/2.3.rst:197 msgid "" @@ -256,6 +302,15 @@ msgid "" "The simplest one implements an in-order traversal of a tree using generators " "recursively. ::" msgstr "" +"Puedes conseguir el efecto de los generadores manualmente escribiendo tu " +"propia clase y almacenando todas las variables locales del generador como " +"variables de instancia. Por ejemplo, la devolución de una lista de enteros " +"podría hacerse estableciendo ``self.count`` a 0, y haciendo que el método :" +"meth:`next` incremente ``self.count`` y lo retorne. Sin embargo, para un " +"generador medianamente complicado, escribir la clase correspondiente sería " +"mucho más complicado. :file:`Lib/test/test_generators.py` contiene varios " +"ejemplos más interesantes. El más sencillo implementa un recorrido en orden " +"de un árbol utilizando generadores de forma recursiva ::" #: ../Doc/whatsnew/2.3.rst:215 msgid "" @@ -265,6 +320,11 @@ msgid "" "knight to every square of an $NxN$ chessboard without visiting any square " "twice)." msgstr "" +"Otros dos ejemplos en :file:`Lib/test/test_generators.py` producen " +"soluciones para el problema de las N reinas (colocar $N$ reinas en un " +"tablero de ajedrez $NxN$ de forma que ninguna reina amenace a otra) y el " +"recorrido del caballero (una ruta que lleva a un caballo a cada casilla de " +"un tablero de ajedrez $NxN$ sin visitar ninguna casilla dos veces)." #: ../Doc/whatsnew/2.3.rst:220 msgid "" @@ -275,6 +335,12 @@ msgid "" "\" at https://www.cs.arizona.edu/icon/docs/ipd266.htm gives an idea of what " "this looks like::" msgstr "" +"La idea de los generadores proviene de otros lenguajes de programación, " +"especialmente de Icon (https://www.cs.arizona.edu/icon/), donde la idea de " +"los generadores es fundamental. En Icon, cada expresión y llamada a una " +"función se comporta como un generador. Un ejemplo de \"*An Overview of the " +"Icon Programming Language*\" en https://www.cs.arizona.edu/icon/docs/ipd266." +"htm da una idea de cómo es esto::" #: ../Doc/whatsnew/2.3.rst:230 msgid "" @@ -284,6 +350,12 @@ msgid "" "Icon retries it with the second value of 23. 23 is greater than 5, so the " "comparison now succeeds, and the code prints the value 23 to the screen." msgstr "" +"En Icon la función :func:`find` retorna los índices en los que se encuentra " +"la subcadena \"o\": 3, 23, 33. En la expresión :keyword:`if`, a ``i`` se le " +"asigna primero un valor de 3, pero 3 es menor que 5, por lo que la " +"comparación falla, e Icon la reintenta con el segundo valor de 23. 23 es " +"mayor que 5, por lo que la comparación ahora tiene éxito, y el código " +"imprime el valor 23 en la pantalla." #: ../Doc/whatsnew/2.3.rst:236 msgid "" @@ -295,10 +367,17 @@ msgid "" "as a concrete object (the iterator) that can be passed around to other " "functions or stored in a data structure." msgstr "" +"Python no va tan lejos como Icon en la adopción de generadores como concepto " +"central. Los generadores se consideran parte del núcleo del lenguaje " +"Python, pero aprenderlos o utilizarlos no es obligatorio; si no resuelven " +"ningún problema que tengas, siéntete libre de ignorarlos. Una característica " +"novedosa de la interfaz de Python en comparación con la de Icon es que el " +"estado de un generador se representa como un objeto concreto (el iterador) " +"que puede pasarse a otras funciones o almacenarse en una estructura de datos." #: ../Doc/whatsnew/2.3.rst:248 msgid ":pep:`255` - Simple Generators" -msgstr "" +msgstr ":pep:`255` - Generadores simples" #: ../Doc/whatsnew/2.3.rst:248 msgid "" @@ -306,10 +385,13 @@ msgid "" "mostly by Neil Schemenauer and Tim Peters, with other fixes from the Python " "Labs crew." msgstr "" +"Escrito por Neil Schemenauer, Tim Peters, Magnus Lie Hetland. Implementado " +"principalmente por Neil Schemenauer y Tim Peters, con otras correcciones del " +"equipo de Python Labs." #: ../Doc/whatsnew/2.3.rst:257 msgid "PEP 263: Source Code Encodings" -msgstr "" +msgstr "PEP 263: Codificación del código fuente" #: ../Doc/whatsnew/2.3.rst:259 msgid "" @@ -318,6 +400,10 @@ msgid "" "comment in the first or second line of the source file. For example, a " "UTF-8 file can be declared with::" msgstr "" +"Los archivos fuente de Python ahora pueden declararse con diferentes " +"codificaciones de conjuntos de caracteres. Las codificaciones se declaran " +"incluyendo un comentario con formato especial en la primera o segunda línea " +"del archivo fuente. Por ejemplo, un archivo UTF-8 puede declararse con::" #: ../Doc/whatsnew/2.3.rst:267 msgid "" @@ -327,6 +413,11 @@ msgid "" "`DeprecationWarning` being signalled by Python 2.3; in 2.4 this will be a " "syntax error." msgstr "" +"Sin esta declaración de codificación, la codificación por defecto utilizada " +"es ASCII de 7 bits. Ejecutar o importar módulos que contengan literales de " +"cadena con caracteres de 8 bits y que no tengan una declaración de " +"codificación dará lugar a un :exc:`DeprecationWarning` señalado por Python " +"2.3; en 2.4 será un error de sintaxis." #: ../Doc/whatsnew/2.3.rst:273 msgid "" @@ -335,20 +426,28 @@ msgid "" "identifiers are still restricted to ASCII characters, so you can't have " "variable names that use characters outside of the usual alphanumerics." msgstr "" +"La declaración de codificación sólo afecta a los literales de cadena " +"Unicode, que se convertirán a Unicode utilizando la codificación " +"especificada. Ten en cuenta que los identificadores de Python siguen " +"restringidos a caracteres ASCII, por lo que no puedes tener nombres de " +"variables que utilicen caracteres fuera de los alfanuméricos habituales." #: ../Doc/whatsnew/2.3.rst:282 msgid ":pep:`263` - Defining Python Source Code Encodings" msgstr "" +":pep:`263` - Definición de las codificaciones del código fuente de Python" #: ../Doc/whatsnew/2.3.rst:282 msgid "" "Written by Marc-André Lemburg and Martin von Löwis; implemented by Suzuki " "Hisao and Martin von Löwis." msgstr "" +"Escrito por Marc-André Lemburg y Martin von Löwis; realizado por Suzuki " +"Hisao y Martin von Löwis." #: ../Doc/whatsnew/2.3.rst:289 msgid "PEP 273: Importing Modules from ZIP Archives" -msgstr "" +msgstr "PEP 273: Importar módulos desde archivos ZIP" #: ../Doc/whatsnew/2.3.rst:291 msgid "" @@ -357,6 +456,10 @@ msgid "" "be automatically imported if a ZIP archive's filename is added to ``sys." "path``. For example:" msgstr "" +"El nuevo módulo :mod:`zipimport` añade soporte para importar módulos desde " +"un archivo en formato ZIP. No es necesario importar el módulo " +"explícitamente; se importará automáticamente si se añade el nombre de un " +"archivo ZIP a ``sys.path``. Por ejemplo:" #: ../Doc/whatsnew/2.3.rst:314 msgid "" @@ -367,6 +470,13 @@ msgid "" "by adding the corresponding :file:`\\*.pyc` file, meaning that if a ZIP " "archive doesn't contain :file:`\\*.pyc` files, importing may be rather slow." msgstr "" +"Una entrada en ``sys.path`` puede ser ahora el nombre de un archivo ZIP. El " +"archivo ZIP puede contener cualquier tipo de ficheros, pero sólo se pueden " +"importar los ficheros llamados :file:`\\*.py`, :file:`\\*.pyc`, o :file:`\\*." +"pyo`. Si un archivo sólo contiene ficheros :file:`\\*.py`, Python no " +"intentará modificar el archivo añadiendo el correspondiente fichero :file:`" +"\\*.pyc`, lo que significa que si un archivo ZIP no contiene ficheros :file:`" +"\\*.pyc`, la importación puede ser bastante lenta." #: ../Doc/whatsnew/2.3.rst:321 msgid "" @@ -374,10 +484,13 @@ msgid "" "subdirectory; for example, the path :file:`/tmp/example.zip/lib/` would only " "import from the :file:`lib/` subdirectory within the archive." msgstr "" +"También se puede especificar una ruta dentro del archivo para importar sólo " +"de un subdirectorio; por ejemplo, la ruta :file:`/tmp/example.zip/lib/` sólo " +"importaría del subdirectorio :file:`lib/` dentro del archivo." #: ../Doc/whatsnew/2.3.rst:331 msgid ":pep:`273` - Import Modules from Zip Archives" -msgstr "" +msgstr ":pep:`273` - Importación de módulos desde archivos Zip" #: ../Doc/whatsnew/2.3.rst:329 msgid "" @@ -387,10 +500,15 @@ msgid "" "`302`. See section :ref:`section-pep302` for a description of the new import " "hooks." msgstr "" +"Escrito por James C. Ahlstrom, que también proporcionó una implementación. " +"Python 2.3 sigue la especificación en :pep:`273`, pero utiliza una " +"implementación escrita por Just van Rossum que utiliza los ganchos de " +"importación descritos en :pep:`302`. Vea la sección :ref:`section-pep302` " +"para una descripción de los nuevos ganchos de importación." #: ../Doc/whatsnew/2.3.rst:338 msgid "PEP 277: Unicode file name support for Windows NT" -msgstr "" +msgstr "PEP 277: Soporte de nombres de archivo Unicode para Windows NT" #: ../Doc/whatsnew/2.3.rst:340 msgid "" @@ -398,6 +516,10 @@ msgid "" "strings. Traditionally, Python has represented file names as byte strings, " "which is inadequate because it renders some file names inaccessible." msgstr "" +"En Windows NT, 2000 y XP, el sistema almacena los nombres de archivo como " +"cadenas Unicode. Tradicionalmente, Python ha representado los nombres de " +"archivo como cadenas de bytes, lo cual es inadecuado porque hace que algunos " +"nombres de archivo sean inaccesibles." #: ../Doc/whatsnew/2.3.rst:344 msgid "" @@ -407,12 +529,21 @@ msgid "" "listdir`, Python now returns a list of Unicode strings. A new function, :" "func:`os.getcwdu`, returns the current directory as a Unicode string." msgstr "" +"Python permite ahora utilizar cadenas Unicode arbitrarias (dentro de las " +"limitaciones del sistema de archivos) para todas las funciones que esperan " +"nombres de archivos, sobre todo la función incorporada :func:`open`. Si se " +"pasa una cadena Unicode a :func:`os.listdir`, Python retorna ahora una lista " +"de cadenas Unicode. Una nueva función, :func:`os.getcwdu`, retorna el " +"directorio actual como una cadena Unicode." #: ../Doc/whatsnew/2.3.rst:350 msgid "" "Byte strings still work as file names, and on Windows Python will " "transparently convert them to Unicode using the ``mbcs`` encoding." msgstr "" +"Las cadenas de bytes siguen funcionando como nombres de archivo, y en " +"Windows Python las convertirá de forma transparente a Unicode utilizando la " +"codificación ``mbcs``." #: ../Doc/whatsnew/2.3.rst:353 msgid "" @@ -422,24 +553,32 @@ msgid "" "strings are supported as file names by checking :attr:`os.path." "supports_unicode_filenames`, a Boolean value." msgstr "" +"Otros sistemas también permiten cadenas Unicode como nombres de archivo, " +"pero las convierten en cadenas de bytes antes de pasarlas al sistema, lo que " +"puede provocar un :exc:`UnicodeError`. Las aplicaciones pueden comprobar si " +"se admiten cadenas Unicode arbitrarias como nombres de archivo comprobando :" +"attr:`os.path.supports_unicode_filenames`, un valor booleano." #: ../Doc/whatsnew/2.3.rst:359 msgid "Under MacOS, :func:`os.listdir` may now return Unicode filenames." msgstr "" +"En MacOS, :func:`os.listdir` ahora puede retornar nombres de archivo Unicode." #: ../Doc/whatsnew/2.3.rst:365 msgid ":pep:`277` - Unicode file name support for Windows NT" -msgstr "" +msgstr ":pep:`277` - Soporte de nombres de archivo Unicode para Windows NT" #: ../Doc/whatsnew/2.3.rst:365 msgid "" "Written by Neil Hodgson; implemented by Neil Hodgson, Martin von Löwis, and " "Mark Hammond." msgstr "" +"Escrito por Neil Hodgson; realizado por Neil Hodgson, Martin von Löwis y " +"Mark Hammond." #: ../Doc/whatsnew/2.3.rst:375 msgid "PEP 278: Universal Newline Support" -msgstr "" +msgstr "PEP 278: Soporte universal de nuevas líneas" #: ../Doc/whatsnew/2.3.rst:377 msgid "" @@ -450,6 +589,14 @@ msgid "" "character 10), MacOS uses the carriage return (ASCII character 13), and " "Windows uses a two-character sequence of a carriage return plus a newline." msgstr "" +"Los tres principales sistemas operativos que se utilizan hoy en día son " +"Microsoft Windows, el sistema operativo Macintosh de Apple y los diversos " +"derivados de Unix. Una pequeña molestia del trabajo entre plataformas es " +"que estas tres plataformas utilizan diferentes caracteres para marcar el " +"final de las líneas en los archivos de texto. Unix utiliza el salto de " +"línea (carácter ASCII 10), MacOS utiliza el retorno de carro (carácter ASCII " +"13), y Windows utiliza una secuencia de dos caracteres de un retorno de " +"carro más una nueva línea." #: ../Doc/whatsnew/2.3.rst:384 msgid "" @@ -460,6 +607,13 @@ msgid "" "translated to a ``'\\n'`` in the strings returned by the various file " "methods such as :meth:`read` and :meth:`readline`." msgstr "" +"Los objetos de archivo de Python pueden ahora soportar convenciones de fin " +"de línea distintas de la que sigue la plataforma en la que se ejecuta " +"Python. Al abrir un archivo con el modo ``'U`` o ``'rU`` se abrirá un " +"archivo para su lectura en modo :term:`universal newlines`. Las tres " +"convenciones de final de línea se traducirán a un ``'\\n'`` en las cadenas " +"retornadas por los distintos métodos de archivo como :meth:`read` y :meth:" +"`readline`." #: ../Doc/whatsnew/2.3.rst:391 msgid "" @@ -468,6 +622,10 @@ msgid "" "modules can be shared between all three operating systems without needing to " "convert the line-endings." msgstr "" +"El soporte universal de nuevas líneas también se utiliza al importar módulos " +"y al ejecutar un archivo con la función :func:`execfile`. Esto significa " +"que los módulos de Python pueden ser compartidos entre los tres sistemas " +"operativos sin necesidad de convertir los finales de línea." #: ../Doc/whatsnew/2.3.rst:396 msgid "" @@ -475,18 +633,21 @@ msgid "" "`!--without-universal-newlines` switch when running Python's :program:" "`configure` script." msgstr "" +"Esta función puede desactivarse al compilar Python especificando la opción :" +"option:`!--without-universal-newlines` al ejecutar el script :program:" +"`configure` de Python." #: ../Doc/whatsnew/2.3.rst:403 msgid ":pep:`278` - Universal Newline Support" -msgstr "" +msgstr ":pep:`278` - Soporte universal de nuevas líneas" #: ../Doc/whatsnew/2.3.rst:404 msgid "Written and implemented by Jack Jansen." -msgstr "" +msgstr "Escrito y ejecutado por Jack Jansen." #: ../Doc/whatsnew/2.3.rst:412 msgid "PEP 279: enumerate()" -msgstr "" +msgstr "PEP 279: enumerate()" #: ../Doc/whatsnew/2.3.rst:414 msgid "" @@ -495,26 +656,32 @@ msgid "" "sequence, returns an iterator that will return ``(0, thing[0])``, ``(1, " "thing[1])``, ``(2, thing[2])``, and so forth." msgstr "" +"Una nueva función incorporada, :func:`enumerate`, hará que ciertos bucles " +"sean un poco más claros. ``enumerate(cosa)``, donde *cosa* es un iterador o " +"una secuencia, retorna un iterador que retornará ``(0, cosa[0])``, ``(1, " +"cosa[1])``, ``(2, cosa[2])``, y así sucesivamente." #: ../Doc/whatsnew/2.3.rst:419 msgid "A common idiom to change every element of a list looks like this::" msgstr "" +"Un modismo común para cambiar cada elemento de una lista tiene el siguiente " +"aspecto::" #: ../Doc/whatsnew/2.3.rst:426 msgid "This can be rewritten using :func:`enumerate` as::" -msgstr "" +msgstr "Esto se puede reescribir usando :func:`enumerate` como::" #: ../Doc/whatsnew/2.3.rst:435 msgid ":pep:`279` - The enumerate() built-in function" -msgstr "" +msgstr ":pep:`279` - La función incorporada enumerate()" #: ../Doc/whatsnew/2.3.rst:436 msgid "Written and implemented by Raymond D. Hettinger." -msgstr "" +msgstr "Escrito y ejecutado por Raymond D. Hettinger." #: ../Doc/whatsnew/2.3.rst:442 msgid "PEP 282: The logging Package" -msgstr "" +msgstr "PEP 282: El paquete de registro" #: ../Doc/whatsnew/2.3.rst:444 msgid "" @@ -527,6 +694,15 @@ msgid "" "log, or even e-mail them to a particular address; of course, it's also " "possible to write your own handler classes." msgstr "" +"Se ha añadido a Python 2.3 un paquete estándar para escribir registros, :mod:" +"`logging`. Proporciona un mecanismo potente y flexible para generar salidas " +"de registro que pueden ser filtradas y procesadas de varias maneras. Se " +"puede utilizar un archivo de configuración escrito en un formato estándar " +"para controlar el comportamiento de registro de un programa. Python incluye " +"manejadores que escribirán los registros en el error estándar o en un " +"archivo o socket, los enviarán al registro del sistema, o incluso los " +"enviarán por correo electrónico a una dirección particular; por supuesto, " +"también es posible escribir tus propias clases de manejadores." #: ../Doc/whatsnew/2.3.rst:453 msgid "" @@ -542,16 +718,30 @@ msgid "" "auth`` and ``server.network``. There's also a root :class:`Logger` that's " "the parent of all other loggers." msgstr "" +"La clase :class:`Logger` es la clase principal. La mayoría del código de la " +"aplicación tratará con uno o más objetos :class:`Logger`, cada uno utilizado " +"por un subsistema particular de la aplicación. Cada :class:`Logger` se " +"identifica con un nombre, y los nombres se organizan en una jerarquía " +"utilizando ``.`` como separador de componentes. Por ejemplo, puedes tener " +"instancias de :class:`Logger` llamadas ``servidor``, ``servidor.auth`` y " +"``servidor.network``. Estas dos últimas instancias están por debajo de " +"``servidor`` en la jerarquía. Esto significa que si aumentas la verbosidad " +"de ``servidor`` o diriges los mensajes de ``servidor`` a un gestor " +"diferente, los cambios también se aplicarán a los registros de ``servidor." +"auth`` y ``servidor.network``. También hay un :class:`Logger` raíz que es el " +"padre de todos los demás loggers." #: ../Doc/whatsnew/2.3.rst:464 msgid "" "For simple uses, the :mod:`logging` package contains some convenience " "functions that always use the root log::" msgstr "" +"Para usos sencillos, el paquete :mod:`logging` contiene algunas funciones de " +"conveniencia que siempre utilizan la raíz log::" #: ../Doc/whatsnew/2.3.rst:475 ../Doc/whatsnew/2.3.rst:500 msgid "This produces the following output::" -msgstr "" +msgstr "Esto produce la siguiente salida::" #: ../Doc/whatsnew/2.3.rst:481 msgid "" @@ -560,6 +750,10 @@ msgid "" "display of informational and debugging messages by calling the :meth:" "`setLevel` method on the root logger." msgstr "" +"En la configuración por defecto, los mensajes informativos y de depuración " +"se suprimen y la salida se envía al error estándar. Puede habilitar la " +"visualización de mensajes informativos y de depuración llamando al método :" +"meth:`setLevel` del registrador raíz." #: ../Doc/whatsnew/2.3.rst:486 msgid "" @@ -567,6 +761,10 @@ msgid "" "the functions for logging messages take the arguments ``(msg, arg1, " "arg2, ...)`` and log the string resulting from ``msg % (arg1, arg2, ...)``." msgstr "" +"Observe que la llamada :func:`warning` utiliza operadores de formato de " +"cadena; todas las funciones para el registro de mensajes toman los " +"argumentos ``(msg, arg1, arg2, ...)`` y registran la cadena resultante de " +"``msg % (arg1, arg2, ...)``." #: ../Doc/whatsnew/2.3.rst:490 msgid "" @@ -574,6 +772,10 @@ msgid "" "traceback. Any of the other functions will also record the traceback if you " "specify a true value for the keyword argument *exc_info*. ::" msgstr "" +"También hay una función :func:`exception` que registra el rastro más " +"reciente. Cualquiera de las otras funciones también registrará el rastro si " +"se especifica un valor verdadero para el argumento de la palabra clave " +"*exc_info*. ::" #: ../Doc/whatsnew/2.3.rst:508 msgid "" @@ -582,6 +784,10 @@ msgid "" "creating it if it doesn't exist yet. ``getLogger(None)`` returns the root " "logger. ::" msgstr "" +"Los programas un poco más avanzados utilizarán un logger distinto del logger " +"raíz. La función ``getLogger(nombre)`` se utiliza para obtener un registro " +"en particular, creándolo si aún no existe. ``getLogger(None)`` retorna el " +"logger raíz. ::" #: ../Doc/whatsnew/2.3.rst:519 msgid "" @@ -590,6 +796,10 @@ msgid "" "`Logger` can prevent this by setting its :attr:`propagate` attribute to :" "const:`False`." msgstr "" +"Los registros se suelen propagar hacia arriba en la jerarquía, por lo que un " +"mensaje registrado en ``servidor.auth`` también es visto por ``servidor`` y " +"``root``, pero un :class:`Logger` puede evitar esto estableciendo su " +"atributo :attr:`propagate` a :const:`False`." #: ../Doc/whatsnew/2.3.rst:523 msgid "" @@ -603,6 +813,16 @@ msgid "" "by a :class:`Formatter` class. All of these classes can be replaced by your " "own specially-written classes." msgstr "" +"Hay más clases proporcionadas por el paquete :mod:`logging` que pueden ser " +"personalizadas. Cuando una instancia de :class:`Logger` recibe la orden de " +"registrar un mensaje, crea una instancia de :class:`LogRecord` que se envía " +"a cualquier número de instancias de :class:`Handler` diferentes. Los " +"loggers y handlers también pueden tener una lista adjunta de filtros, y cada " +"filtro puede hacer que el :class:`LogRecord` sea ignorado o puede modificar " +"el registro antes de pasarlo. Cuando finalmente se emiten, las instancias " +"de :class:`LogRecord` se convierten en texto mediante una clase :class:" +"`Formatter`. Todas estas clases pueden ser reemplazadas por tus propias " +"clases especialmente escritas." #: ../Doc/whatsnew/2.3.rst:533 msgid "" @@ -612,18 +832,23 @@ msgid "" "documentation for all of the details. Reading :pep:`282` will also be " "helpful." msgstr "" +"Con todas estas características, el paquete :mod:`logging` debería " +"proporcionar suficiente flexibilidad incluso para las aplicaciones más " +"complicadas. Esto es sólo un resumen incompleto de sus características, así " +"que por favor consulte la documentación de referencia del paquete para todos " +"los detalles. La lectura de :pep:`282` también será útil." #: ../Doc/whatsnew/2.3.rst:541 msgid ":pep:`282` - A Logging System" -msgstr "" +msgstr ":pep:`282` - Un sistema de registro" #: ../Doc/whatsnew/2.3.rst:542 msgid "Written by Vinay Sajip and Trent Mick; implemented by Vinay Sajip." -msgstr "" +msgstr "Escrito por Vinay Sajip y Trent Mick; implementado por Vinay Sajip." #: ../Doc/whatsnew/2.3.rst:550 msgid "PEP 285: A Boolean Type" -msgstr "" +msgstr "PEP 285: Un tipo booleano" #: ../Doc/whatsnew/2.3.rst:552 msgid "" @@ -633,6 +858,11 @@ msgid "" "2.2.1, but the 2.2.1 versions are simply set to integer values of 1 and 0 " "and aren't a different type.)" msgstr "" +"Se ha añadido un tipo booleano a Python 2.3. Se añadieron dos nuevas " +"constantes al módulo :mod:`__builtin__`, :const:`True` y :const:`False`. " +"(Las constantes :const:`True` y :const:`False` se añadieron a los módulos " +"incorporados en Python 2.2.1, pero las versiones de 2.2.1 se ajustan " +"simplemente a valores enteros de 1 y 0 y no son un tipo diferente)" #: ../Doc/whatsnew/2.3.rst:558 msgid "" @@ -640,12 +870,17 @@ msgid "" "for it takes any Python value and converts it to :const:`True` or :const:" "`False`. ::" msgstr "" +"El objeto de tipo para este nuevo tipo se denomina :class:`bool`; su " +"constructor toma cualquier valor de Python y lo convierte en :const:`True` " +"o :const:`False`. ::" #: ../Doc/whatsnew/2.3.rst:570 msgid "" "Most of the standard library modules and built-in functions have been " "changed to return Booleans. ::" msgstr "" +"La mayoría de los módulos de la biblioteca estándar y las funciones " +"incorporadas se han modificado para retornar booleanos. ::" #: ../Doc/whatsnew/2.3.rst:581 msgid "" @@ -656,6 +891,12 @@ msgid "" "the statement is ``return True``, however, the meaning of the return value " "is quite clear." msgstr "" +"Los booleanos de Python se añadieron con el objetivo principal de hacer el " +"código más claro. Por ejemplo, si estás leyendo una función y te encuentras " +"con la sentencia ``return 1``, podrías preguntarte si el ``1`` representa un " +"valor de verdad booleano, un índice o un coeficiente que multiplica alguna " +"otra cantidad. Sin embargo, si la sentencia es ``return True``, el " +"significado del valor de retorno es bastante claro." #: ../Doc/whatsnew/2.3.rst:587 msgid "" @@ -669,6 +910,15 @@ msgid "" "a subclass of the :class:`int` class so that arithmetic using a Boolean " "still works. ::" msgstr "" +"Los booleanos de Python *no* se añadieron en aras de una comprobación de " +"tipos estricta. Un lenguaje muy estricto como Pascal también le impediría " +"realizar aritmética con booleanos, y requeriría que la expresión en una " +"declaración :keyword:`if` siempre se evaluara a un resultado booleano. " +"Python no es tan estricto y nunca lo será, como dice explícitamente :pep:" +"`285`. Esto significa que puede utilizar cualquier expresión en una " +"sentencia :keyword:`!if`, incluso las que se evalúan a una lista o tupla o " +"algún objeto aleatorio. El tipo Booleano es una subclase de la clase :class:" +"`int` por lo que la aritmética que utiliza un Booleano sigue funcionando. ::" #: ../Doc/whatsnew/2.3.rst:605 msgid "" @@ -677,18 +927,22 @@ msgid "" "difference that :func:`str` and :func:`repr` return the strings ``'True'`` " "and ``'False'`` instead of ``'1'`` and ``'0'``." msgstr "" +"Para resumir :const:`True` and :const:`False` en una frase: son formas " +"alternativas de deletrear los valores enteros 1 y 0, con la única diferencia " +"de que :func:`str` y :func:`repr` retornan las cadenas ``Verdadero`` y " +"``Falso`` en lugar de ``1`` y ``0``." #: ../Doc/whatsnew/2.3.rst:613 msgid ":pep:`285` - Adding a bool type" -msgstr "" +msgstr ":pep:`285` - Añadir un tipo booleano" #: ../Doc/whatsnew/2.3.rst:614 msgid "Written and implemented by GvR." -msgstr "" +msgstr "Escrito y ejecutado por GvR." #: ../Doc/whatsnew/2.3.rst:620 msgid "PEP 293: Codec Error Handling Callbacks" -msgstr "" +msgstr "PEP 293: Llamadas de retorno para el manejo de errores del códec" #: ../Doc/whatsnew/2.3.rst:622 msgid "" @@ -701,6 +955,14 @@ msgid "" "inserting an XML character reference or HTML entity reference into the " "converted string." msgstr "" +"Al codificar una cadena Unicode en una cadena de bytes, pueden encontrarse " +"caracteres no codificables. Hasta ahora, Python ha permitido especificar el " +"procesamiento del error como \"estricto\" (lanzando :exc:`UnicodeError`), " +"\"ignorar\" (saltando el carácter), o \"reemplazar\" (usando un signo de " +"interrogación en la cadena de salida), siendo \"estricto\" el comportamiento " +"por defecto. Puede ser deseable especificar un procesamiento alternativo de " +"tales errores, como insertar una referencia de carácter XML o una referencia " +"de entidad HTML en la cadena convertida." #: ../Doc/whatsnew/2.3.rst:630 msgid "" @@ -713,6 +975,15 @@ msgid "" "target encoding. The handler can then either raise an exception or return a " "replacement string." msgstr "" +"Python tiene ahora un marco flexible para añadir diferentes estrategias de " +"procesamiento. Se pueden añadir nuevos manejadores de errores con :func:" +"`codecs.register_error`, y los códecs pueden acceder al manejador de errores " +"con :func:`codecs.lookup_error`. Se ha añadido una API en C equivalente para " +"los códecs escritos en C. El gestor de errores obtiene la información de " +"estado necesaria, como la cadena que se está convirtiendo, la posición en la " +"cadena donde se ha detectado el error y la codificación de destino. El " +"controlador puede entonces lanzar una excepción o retornar una cadena de " +"reemplazo." #: ../Doc/whatsnew/2.3.rst:638 msgid "" @@ -720,24 +991,30 @@ msgid "" "\"backslashreplace\" uses Python backslash quoting to represent unencodable " "characters and \"xmlcharrefreplace\" emits XML character references." msgstr "" +"Se han implementado dos manejadores de error adicionales utilizando este " +"marco: \"backslashreplace\" utiliza las comillas de barra invertida de " +"Python para representar los caracteres no codificables y \"xmlcharrefreplace" +"\" emite referencias de caracteres XML." #: ../Doc/whatsnew/2.3.rst:645 msgid ":pep:`293` - Codec Error Handling Callbacks" -msgstr "" +msgstr ":pep:`293` - Retrollamadas de manejo de errores del códec" #: ../Doc/whatsnew/2.3.rst:646 msgid "Written and implemented by Walter Dörwald." -msgstr "" +msgstr "Escrito y ejecutado por Walter Dörwald." #: ../Doc/whatsnew/2.3.rst:654 msgid "PEP 301: Package Index and Metadata for Distutils" -msgstr "" +msgstr "PEP 301: Índice de paquetes y metadatos para Distutils" #: ../Doc/whatsnew/2.3.rst:656 msgid "" "Support for the long-requested Python catalog makes its first appearance in " "2.3." msgstr "" +"La compatibilidad con el catálogo de Python, largamente solicitada, hace su " +"primera aparición en 2.3." #: ../Doc/whatsnew/2.3.rst:658 msgid "" @@ -747,6 +1024,11 @@ msgid "" "it to a central catalog server. The resulting catalog is available from " "https://pypi.org." msgstr "" +"El corazón del catálogo es el nuevo comando :command:`register` de " +"Distutils. Ejecutando ``python setup.py register`` se recogen los metadatos " +"que describen un paquete, como su nombre, versión, mantenedor, descripción, " +"etc., y se envían a un servidor de catálogo central. El catálogo resultante " +"está disponible en https://pypi.org." #: ../Doc/whatsnew/2.3.rst:664 msgid "" @@ -755,30 +1037,38 @@ msgid "" "`Trove <http://catb.org/~esr/trove/>`_-style strings can be supplied to help " "classify the software." msgstr "" +"Para hacer el catálogo un poco más útil, se ha añadido un nuevo argumento " +"opcional de palabra clave *clasificadores* a la función Distutils :func:" +"`setup`. Se puede suministrar una lista de cadenas de estilo `Trove <http://" +"catb.org/~esr/trove/>`_ para ayudar a clasificar el software." #: ../Doc/whatsnew/2.3.rst:669 msgid "" "Here's an example :file:`setup.py` with classifiers, written to be " "compatible with older versions of the Distutils::" msgstr "" +"Aquí hay un ejemplo :file:`setup.py` con clasificadores, escrito para que " +"sea compatible con las versiones más antiguas de Distutils::" #: ../Doc/whatsnew/2.3.rst:688 msgid "" "The full list of classifiers can be obtained by running ``python setup.py " "register --list-classifiers``." msgstr "" +"La lista completa de clasificadores se puede obtener ejecutando ``python " +"setup.py register --list-classifiers``." #: ../Doc/whatsnew/2.3.rst:694 msgid ":pep:`301` - Package Index and Metadata for Distutils" -msgstr "" +msgstr ":pep:`301` - Índice de paquetes y metadatos para Distutils" #: ../Doc/whatsnew/2.3.rst:695 msgid "Written and implemented by Richard Jones." -msgstr "" +msgstr "Escrito y ejecutado por Richard Jones." #: ../Doc/whatsnew/2.3.rst:703 msgid "PEP 302: New Import Hooks" -msgstr "" +msgstr "PEP 302: Nuevos ganchos de importación" #: ../Doc/whatsnew/2.3.rst:705 msgid "" @@ -789,6 +1079,12 @@ msgid "" "and :mod:`iu` modules, but none of them has ever gained much acceptance, and " "none of them were easily usable from C code." msgstr "" +"Aunque ha sido posible escribir ganchos de importación personalizados desde " +"que se introdujo el módulo :mod:`ihooks` en Python 1.3, nadie ha estado " +"nunca realmente contento con él porque escribir nuevos ganchos de " +"importación es difícil y complicado. Se han propuesto varias alternativas, " +"como los módulos :mod:`imputil` y :mod:`iu`, pero ninguno de ellos ha tenido " +"mucha aceptación, y ninguno era fácilmente utilizable desde el código C." #: ../Doc/whatsnew/2.3.rst:712 msgid "" @@ -796,6 +1092,9 @@ msgid "" "McMillan's :mod:`iu` module. Three new items are added to the :mod:`sys` " "module:" msgstr "" +":pep:`302` toma prestadas ideas de sus predecesores, especialmente del " +"módulo :mod:`iu` de Gordon McMillan. Se añaden tres nuevos elementos al " +"módulo :mod:`sys`:" #: ../Doc/whatsnew/2.3.rst:716 msgid "" @@ -804,12 +1103,19 @@ msgid "" "an importer object that will handle imports from this path or raises an :exc:" "`ImportError` exception if it can't handle this path." msgstr "" +"``sys.path_hooks`` es una lista de objetos invocables; la mayoría de las " +"veces serán clases. Cada llamada toma una cadena que contiene una ruta y " +"retorna un objeto importador que manejará las importaciones desde esta ruta " +"o lanza una excepción :exc:`ImportError` si no puede manejar esta ruta." #: ../Doc/whatsnew/2.3.rst:721 msgid "" "``sys.path_importer_cache`` caches importer objects for each path, so ``sys." "path_hooks`` will only need to be traversed once for each path." msgstr "" +"``sys.path_importer_cache`` almacena en caché los objetos del importador " +"para cada ruta, por lo que ``sys.path_hooks`` sólo tendrá que ser recorrido " +"una vez para cada ruta." #: ../Doc/whatsnew/2.3.rst:724 msgid "" @@ -818,6 +1124,11 @@ msgid "" "can add objects to it. Additional built-in and frozen modules can be " "imported by an object added to this list." msgstr "" +"``sys.meta_path`` es una lista de objetos importadores que se recorrerán " +"antes de comprobar ``sys.path``. Esta lista está inicialmente vacía, pero " +"el código de usuario puede añadir objetos a ella. Los módulos adicionales " +"incorporados y congelados pueden ser importados por un objeto añadido a esta " +"lista." #: ../Doc/whatsnew/2.3.rst:729 msgid "" @@ -827,25 +1138,35 @@ msgid "" "has a single method, ``load_module(fullname)``, that creates and returns the " "corresponding module object." msgstr "" +"Los objetos importadores deben tener un único método, " +"``find_module(fullname, path=None)``. *fullname* será un nombre de módulo o " +"paquete, por ejemplo ``string`` o ``distutils.core``. :meth:`find_module` " +"debe retornar un objeto cargador que tenga un único método, " +"``load_module(fullname)``, que cree y retorne el objeto módulo " +"correspondiente." #: ../Doc/whatsnew/2.3.rst:735 msgid "" "Pseudo-code for Python's new import logic, therefore, looks something like " "this (simplified a bit; see :pep:`302` for the full details)::" msgstr "" +"Por lo tanto, el pseudocódigo de la nueva lógica de importación de Python es " +"algo así (simplificado un poco; véase :pep:`302` para los detalles " +"completos)::" #: ../Doc/whatsnew/2.3.rst:760 msgid ":pep:`302` - New Import Hooks" -msgstr "" +msgstr ":pep:`302` - Nuevos ganchos de importación" #: ../Doc/whatsnew/2.3.rst:761 msgid "" "Written by Just van Rossum and Paul Moore. Implemented by Just van Rossum." msgstr "" +"Escrito por Just van Rossum y Paul Moore. Implementado por Just van Rossum." #: ../Doc/whatsnew/2.3.rst:769 msgid "PEP 305: Comma-separated Files" -msgstr "" +msgstr "PEP 305: Archivos separados por comas" #: ../Doc/whatsnew/2.3.rst:771 msgid "" @@ -853,22 +1174,31 @@ msgid "" "databases and spreadsheets. Python 2.3 adds a parser for comma-separated " "files." msgstr "" +"Los archivos separados por comas son un formato frecuentemente utilizado " +"para exportar datos de bases de datos y hojas de cálculo. Python 2.3 añade " +"un analizador de archivos separados por comas." #: ../Doc/whatsnew/2.3.rst:774 msgid "Comma-separated format is deceptively simple at first glance::" msgstr "" +"El formato separado por comas es engañosamente sencillo a primera vista::" #: ../Doc/whatsnew/2.3.rst:778 msgid "" "Read a line and call ``line.split(',')``: what could be simpler? But toss in " "string data that can contain commas, and things get more complicated::" msgstr "" +"Leer una línea y llamar a ``line.split(',')``: ¿qué puede ser más sencillo? " +"Pero si se añaden datos de cadena que pueden contener comas, las cosas se " +"complican::" #: ../Doc/whatsnew/2.3.rst:783 msgid "" "A big ugly regular expression can parse this, but using the new :mod:`csv` " "package is much simpler::" msgstr "" +"Una expresión regular grande y fea puede analizar esto, pero usar el nuevo " +"paquete :mod:`csv` es mucho más sencillo::" #: ../Doc/whatsnew/2.3.rst:793 msgid "" @@ -876,6 +1206,9 @@ msgid "" "separator isn't limited to the comma and can be changed to any character, " "and so can the quoting and line-ending characters." msgstr "" +"La función :func:`reader` admite varias opciones. El separador de campos no " +"se limita a la coma y puede cambiarse por cualquier carácter, al igual que " +"las comillas y el final de línea." #: ../Doc/whatsnew/2.3.rst:797 msgid "" @@ -884,20 +1217,27 @@ msgid "" "class:`csv.writer` class will generate comma-separated files from a " "succession of tuples or lists, quoting strings that contain the delimiter." msgstr "" +"Se pueden definir y registrar diferentes dialectos de archivos separados por " +"comas; actualmente hay dos dialectos, ambos utilizados por Microsoft Excel. " +"Una clase :class:`csv.writer` independiente generará archivos separados por " +"comas a partir de una sucesión de tuplas o listas, citando cadenas que " +"contengan el delimitador." #: ../Doc/whatsnew/2.3.rst:806 msgid ":pep:`305` - CSV File API" -msgstr "" +msgstr ":pep:`305` - API de archivos CSV" #: ../Doc/whatsnew/2.3.rst:806 msgid "" "Written and implemented by Kevin Altis, Dave Cole, Andrew McNamara, Skip " "Montanaro, Cliff Wells." msgstr "" +"Escrito y realizado por Kevin Altis, Dave Cole, Andrew McNamara, Skip " +"Montanaro, Cliff Wells." #: ../Doc/whatsnew/2.3.rst:815 msgid "PEP 307: Pickle Enhancements" -msgstr "" +msgstr "PEP 307: Mejoras en Pickle" #: ../Doc/whatsnew/2.3.rst:817 msgid "" @@ -907,6 +1247,12 @@ msgid "" "quotes a trivial example where a new-style class results in a pickled string " "three times longer than that for a classic class." msgstr "" +"Los módulos :mod:`pickle` y :mod:`cPickle` recibieron cierta atención " +"durante el ciclo de desarrollo de la 2.3. En 2.2, las clases de estilo " +"nuevo podían ser desempaquetadas sin dificultad, pero no se desempaquetaba " +"de forma muy compacta; :pep:`307` cita un ejemplo trivial en el que una " +"clase de estilo nuevo da lugar a una cadena desempaquetada tres veces más " +"larga que la de una clase clásica." #: ../Doc/whatsnew/2.3.rst:823 msgid "" @@ -917,6 +1263,13 @@ msgid "" "format. A new constant, :const:`pickle.HIGHEST_PROTOCOL`, can be used to " "select the fanciest protocol available." msgstr "" +"La solución fue inventar un nuevo protocolo pickle. La función :func:" +"`pickle.dumps` soporta desde hace tiempo una bandera de texto o binario. En " +"la versión 2.3, esta bandera se ha redefinido, pasando de ser un booleano a " +"un entero: 0 es el antiguo formato pickle en modo texto, 1 es el antiguo " +"formato binario, y ahora 2 es un nuevo formato específico de 2.3. Una nueva " +"constante, :const:`pickle.HIGHEST_PROTOCOL`, puede utilizarse para " +"seleccionar el protocolo más elegante disponible." #: ../Doc/whatsnew/2.3.rst:830 msgid "" @@ -926,6 +1279,12 @@ msgid "" "this code was ever audited and therefore it's all been ripped out in 2.3. " "You should not unpickle untrusted data in any version of Python." msgstr "" +"El unpickling ya no se considera una operación segura. El :mod:`pickle` de " +"la versión 2.2 proporcionaba ganchos para tratar de evitar que las clases no " +"seguras fueran deserializadas (específicamente, un atributo :attr:" +"`__safe_for_unpickling__`), pero nada de este código fue nunca auditado y " +"por lo tanto todo ha sido eliminado en la versión 2.3. No se debe " +"deserializar datos no confiables en ninguna versión de Python." #: ../Doc/whatsnew/2.3.rst:836 msgid "" @@ -934,6 +1293,11 @@ msgid "" "`__getstate__`, :meth:`__setstate__`, and :meth:`__getnewargs__`. Consult :" "pep:`307` for the full semantics of these methods." msgstr "" +"Para reducir la sobrecarga de pickling de las clases de estilo nuevo, se ha " +"añadido una nueva interfaz para personalizar el pickling mediante tres " +"métodos especiales: :meth:`__getstate__`, :meth:`__setstate__`, y :meth:" +"`__getnewargs__`. Consulte :pep:`307` para conocer la semántica completa de " +"estos métodos." #: ../Doc/whatsnew/2.3.rst:841 msgid "" @@ -942,18 +1306,23 @@ msgid "" "Software Foundation will maintain a list of standardized codes; there's also " "a range of codes for private use. Currently no codes have been specified." msgstr "" +"Como forma de comprimir aún más los pickles, ahora es posible utilizar " +"códigos enteros en lugar de cadenas largas para identificar las clases " +"serializadas. La Python Software Foundation mantendrá una lista de códigos " +"estandarizados; también hay una gama de códigos para uso privado. " +"Actualmente no se ha especificado ningún código." #: ../Doc/whatsnew/2.3.rst:849 msgid ":pep:`307` - Extensions to the pickle protocol" -msgstr "" +msgstr ":pep:`307` - Extensiones del protocolo pickle" #: ../Doc/whatsnew/2.3.rst:850 msgid "Written and implemented by Guido van Rossum and Tim Peters." -msgstr "" +msgstr "Escrito y ejecutado por Guido van Rossum y Tim Peters." #: ../Doc/whatsnew/2.3.rst:858 msgid "Extended Slices" -msgstr "" +msgstr "Rebanadas ampliadas" #: ../Doc/whatsnew/2.3.rst:860 msgid "" @@ -965,21 +1334,33 @@ msgid "" "sequence types have never supported this feature, raising a :exc:`TypeError` " "if you tried it. Michael Hudson contributed a patch to fix this shortcoming." msgstr "" +"Desde la versión 1.4 de Python, la sintaxis de corte admite un tercer " +"argumento opcional \"paso\" o \"zancada\". Por ejemplo, estas son todas las " +"sintaxis legales de Python: ``L[1:10:2]``, ``L[:-1:1]``, ``L[::-1]``. Esto " +"se añadió a Python a petición de los desarrolladores de Numerical Python, " +"que utiliza ampliamente el tercer argumento. Sin embargo, los tipos de " +"secuencias de listas, tuplas y cadenas incorporados en Python nunca han " +"soportado esta característica, y lanzan un :exc:`TypeError` si lo intentas. " +"Michael Hudson ha contribuido con un parche para solucionar este problema." #: ../Doc/whatsnew/2.3.rst:868 msgid "" "For example, you can now easily extract the elements of a list that have " "even indexes::" msgstr "" +"Por ejemplo, ahora puede extraer fácilmente los elementos de una lista que " +"tengan índices pares::" #: ../Doc/whatsnew/2.3.rst:875 msgid "" "Negative values also work to make a copy of the same list in reverse order::" msgstr "" +"Los valores negativos también sirven para hacer una copia de la misma lista " +"en orden inverso::" #: ../Doc/whatsnew/2.3.rst:880 msgid "This also works for tuples, arrays, and strings::" -msgstr "" +msgstr "Esto también funciona para tuplas, arrays y cadenas::" #: ../Doc/whatsnew/2.3.rst:888 msgid "" @@ -988,6 +1369,10 @@ msgid "" "assignment to extended and regular slices. Assignment to a regular slice " "can be used to change the length of the sequence::" msgstr "" +"Si tienes una secuencia mutable, como una lista o un array, puedes asignar o " +"eliminar una rebanada extendida, pero hay algunas diferencias entre la " +"asignación a rebanadas extendidas y regulares. La asignación a una rebanada " +"regular se puede utilizar para cambiar la longitud de la secuencia::" #: ../Doc/whatsnew/2.3.rst:900 msgid "" @@ -995,20 +1380,25 @@ msgid "" "the list on the right hand side of the statement must contain the same " "number of items as the slice it is replacing::" msgstr "" +"Las rebanadas extendidas no son tan flexibles. Cuando se asigna a una " +"rebanada extendida, la lista a la derecha de la declaración debe contener el " +"mismo número de elementos que la rebanada que está reemplazando::" #: ../Doc/whatsnew/2.3.rst:917 msgid "Deletion is more straightforward::" -msgstr "" +msgstr "La eliminación es más sencilla::" #: ../Doc/whatsnew/2.3.rst:928 msgid "" "One can also now pass slice objects to the :meth:`__getitem__` methods of " "the built-in sequences::" msgstr "" +"Ahora también se pueden pasar objetos slice a los métodos :meth:" +"`__getitem__` de las secuencias incorporadas::" #: ../Doc/whatsnew/2.3.rst:934 msgid "Or use slice objects directly in subscripts::" -msgstr "" +msgstr "O utilizar los objetos de corte directamente en los subíndices::" #: ../Doc/whatsnew/2.3.rst:939 msgid "" @@ -1020,6 +1410,13 @@ msgid "" "phrase hides a welter of confusing details!). The method is intended to be " "used like this::" msgstr "" +"Para simplificar la implementación de secuencias que soportan el corte " +"extendido, los objetos slice tienen ahora un método ``indices(length)`` que, " +"dada la longitud de una secuencia, retorna una tupla ``(start, stop, step)`` " +"que puede pasarse directamente a :func:`range`. :meth:`indices` maneja los " +"índices omitidos y los que están fuera de los límites de una manera " +"consistente con los slices regulares (¡y esta frase inocua esconde un montón " +"de detalles confusos!). El método está pensado para ser utilizado así::" #: ../Doc/whatsnew/2.3.rst:957 msgid "" @@ -1028,28 +1425,38 @@ msgid "" "This is consistent with Python 2.2, where :class:`int`, :class:`str`, etc., " "underwent the same change." msgstr "" +"En este ejemplo también se puede ver que el objeto incorporado :class:" +"`slice` es ahora el objeto tipo para el tipo slice, y ya no es una función. " +"Esto es consistente con Python 2.2, donde :class:`int`, :class:`str`, etc., " +"sufrieron el mismo cambio." #: ../Doc/whatsnew/2.3.rst:966 msgid "Other Language Changes" -msgstr "" +msgstr "Otros cambios en el lenguaje" #: ../Doc/whatsnew/2.3.rst:968 msgid "" "Here are all of the changes that Python 2.3 makes to the core Python " "language." msgstr "" +"Estos son todos los cambios que Python 2.3 introduce en el núcleo del " +"lenguaje Python." #: ../Doc/whatsnew/2.3.rst:970 msgid "" "The :keyword:`yield` statement is now always a keyword, as described in " "section :ref:`section-generators` of this document." msgstr "" +"La expresión :keyword:`yield` es ahora siempre una palabra clave, como se " +"describe en la sección :ref:`section-generators` de este documento." #: ../Doc/whatsnew/2.3.rst:973 msgid "" "A new built-in function :func:`enumerate` was added, as described in " "section :ref:`section-enumerate` of this document." msgstr "" +"Se ha añadido una nueva función incorporada :func:`enumerate`, como se " +"describe en la sección :ref:`section-enumerate` de este documento." #: ../Doc/whatsnew/2.3.rst:976 msgid "" @@ -1057,6 +1464,9 @@ msgid "" "the built-in :class:`bool` type, as described in section :ref:`section-bool` " "of this document." msgstr "" +"Se han añadido dos nuevas constantes, :const:`True` y :const:`False` junto " +"con el tipo incorporado :class:`bool`, como se describe en la sección :ref:" +"`section-bool` de este documento." #: ../Doc/whatsnew/2.3.rst:980 msgid "" @@ -1066,12 +1476,19 @@ msgid "" "that ``isinstance(int(expression), int)`` is false, but that seems unlikely " "to cause problems in practice." msgstr "" +"El constructor de tipo :func:`int` ahora retornará un entero largo en lugar " +"de lanzar un :exc:`OverflowError` cuando una cadena o un número de punto " +"flotante es demasiado grande para caber en un entero. Esto puede llevar al " +"resultado paradójico de que ``isinstance(int(expresión), int)`` sea falso, " +"pero parece poco probable que cause problemas en la práctica." #: ../Doc/whatsnew/2.3.rst:986 msgid "" "Built-in types now support the extended slicing syntax, as described in " "section :ref:`section-slices` of this document." msgstr "" +"Los tipos incorporados ahora soportan la sintaxis de rebanado extendida, " +"como se describe en la sección :ref:`section-slices` de este documento." #: ../Doc/whatsnew/2.3.rst:989 msgid "" @@ -1080,6 +1497,10 @@ msgid "" "accepts numbers, meaning that you can't use it to concatenate a bunch of " "strings. (Contributed by Alex Martelli.)" msgstr "" +"Una nueva función incorporada, ``suma(iterable, start=0)``, suma los " +"elementos numéricos en el objeto iterable y retorna su suma. :func:`suma` " +"sólo acepta números, lo que significa que no se puede utilizar para " +"concatenar un montón de cadenas. (Contribución de Alex Martelli)" #: ../Doc/whatsnew/2.3.rst:994 msgid "" @@ -1088,6 +1509,10 @@ msgid "" "consistent with slice indexing, so when *pos* is -1 the value will be " "inserted before the last element, and so forth." msgstr "" +"``list.insert(pos, valor)`` solía insertar *valor* al principio de la lista " +"cuando *pos* era negativo. El comportamiento ha sido cambiado para ser " +"consistente con la indexación de las rebanadas, así que cuando *pos* es -1 " +"el valor será insertado antes del último elemento, y así sucesivamente." #: ../Doc/whatsnew/2.3.rst:999 msgid "" @@ -1095,6 +1520,9 @@ msgid "" "returns its index, now takes optional *start* and *stop* arguments to limit " "the search to only part of the list." msgstr "" +"``list.index(value)``, que busca *valor* dentro de la lista y retorna su " +"índice, ahora toma los argumentos opcionales *start* y *stop* para limitar " +"la búsqueda sólo a una parte de la lista." #: ../Doc/whatsnew/2.3.rst:1003 msgid "" @@ -1103,6 +1531,11 @@ msgid "" "dictionary. If the requested key isn't present in the dictionary, *default* " "is returned if it's specified and :exc:`KeyError` raised if it isn't. ::" msgstr "" +"Los diccionarios tienen un nuevo método, ``pop(key[, *default*])``, que " +"retorna el valor correspondiente a *key* y elimina ese par clave/valor del " +"diccionario. Si la clave solicitada no está presente en el diccionario, se " +"retorna *default* si está especificada y se lanza :exc:`KeyError` si no lo " +"está:" #: ../Doc/whatsnew/2.3.rst:1025 msgid "" @@ -1110,20 +1543,26 @@ msgid "" "creates a dictionary with keys taken from the supplied iterator *iterable* " "and all values set to *value*, defaulting to ``None``." msgstr "" +"También hay un nuevo método de clase, ``dict.fromkeys(iterable, value)``, " +"que crea un diccionario con claves tomadas del iterador *iterable* " +"suministrado y todos los valores establecidos a *value*, por defecto a " +"``None``." #: ../Doc/whatsnew/2.3.rst:1029 msgid "(Patches contributed by Raymond Hettinger.)" -msgstr "" +msgstr "(Parches aportados por Raymond Hettinger)" #: ../Doc/whatsnew/2.3.rst:1031 msgid "" "Also, the :func:`dict` constructor now accepts keyword arguments to simplify " "creating small dictionaries::" msgstr "" +"Además, el constructor :func:`dict` ahora acepta argumentos de palabras " +"clave para simplificar la creación de pequeños diccionarios::" #: ../Doc/whatsnew/2.3.rst:1037 msgid "(Contributed by Just van Rossum.)" -msgstr "" +msgstr "(Contribución de Just van Rossum.)" #: ../Doc/whatsnew/2.3.rst:1039 msgid "" @@ -1132,6 +1571,10 @@ msgid "" "Python with the :option:`-O` switch will still generate code that doesn't " "execute any assertions." msgstr "" +"La expresión :keyword:`assert` ya no comprueba la bandera ``debug__``, por " +"lo que ya no se pueden desactivar las aserciones asignando a ``__debug__``. " +"Ejecutar Python con la opción :option:`-O` seguirá generando código que no " +"ejecute ninguna aserción." #: ../Doc/whatsnew/2.3.rst:1044 msgid "" @@ -1141,6 +1584,12 @@ msgid "" "now use the type objects available in the :mod:`types` module.) For example, " "you can create a new module object with the following code:" msgstr "" +"La mayoría de los objetos de tipo son ahora invocables, por lo que puedes " +"usarlos para crear nuevos objetos como funciones, clases y módulos. (Esto " +"significa que el módulo :mod:`new` puede quedar obsoleto en una futura " +"versión de Python, porque ahora puedes utilizar los objetos de tipo " +"disponibles en el módulo :mod:`types`) Por ejemplo, puede crear un nuevo " +"objeto de módulo con el siguiente código:" #: ../Doc/whatsnew/2.3.rst:1059 msgid "" @@ -1151,6 +1600,12 @@ msgid "" "PendingDeprecationWarning:: <-W>` on the command line or use :func:`warnings." "filterwarnings`." msgstr "" +"Se ha añadido una nueva advertencia, :exc:`PendingDeprecationWarning` para " +"indicar las características que están en proceso de ser obsoletas. La " +"advertencia no se imprimirá por defecto. Para comprobar el uso de funciones " +"que quedarán obsoletas en el futuro, proporcione :option:`-Walways::" +"PendingDeprecationWarning:: <-W>` en la línea de comandos o utilice :func:" +"`warnings.filterwarnings`." #: ../Doc/whatsnew/2.3.rst:1065 msgid "" @@ -1158,6 +1613,9 @@ msgid "" "occurred\"``, has begun. Raising a string will now trigger :exc:" "`PendingDeprecationWarning`." msgstr "" +"Ha comenzado el proceso de desaprobación de las excepciones basadas en " +"cadenas, como en ``lanzamiento de \"Error ocurred”``. Al lanzar una cadena, " +"ahora se activará :exc:`PendingDeprecationWarning`." #: ../Doc/whatsnew/2.3.rst:1069 msgid "" @@ -1165,6 +1623,9 @@ msgid "" "warning. In a future version of Python, ``None`` may finally become a " "keyword." msgstr "" +"El uso de ``None`` como nombre de una variable ahora resultará en una " +"advertencia :exc:`SyntaxWarning`. En una futura versión de Python, ``None`` " +"podría convertirse en una palabra clave." #: ../Doc/whatsnew/2.3.rst:1072 msgid "" @@ -1176,6 +1637,15 @@ msgid "" "encoding used by the file; Unicode strings written to the file will be " "automatically converted to bytes using the given encoding." msgstr "" +"El método :meth:`xreadlines` de los objetos archivo, introducido en Python " +"2.1, ya no es necesario porque los archivos se comportan ahora como su " +"propio iterador. :meth:`xreadlines` se introdujo originalmente como una " +"forma más rápida de recorrer todas las líneas de un archivo, pero ahora se " +"puede escribir simplemente ``for line in file_obj``. Los objetos archivo " +"también tienen un nuevo atributo :attr:`encoding` de sólo lectura que " +"proporciona la codificación utilizada por el archivo; las cadenas Unicode " +"escritas en el archivo se convertirán automáticamente a bytes utilizando la " +"codificación dada." #: ../Doc/whatsnew/2.3.rst:1080 msgid "" @@ -1192,6 +1662,19 @@ msgid "" "pipermail/python-dev/2002-October/029035.html. Samuele Pedroni first pointed " "out the problem and also implemented the fix by coding the C3 algorithm." msgstr "" +"El orden de resolución de los métodos utilizados por las clases del nuevo " +"estilo ha cambiado, aunque sólo notarás la diferencia si tienes una " +"jerarquía de herencia realmente complicada. Las clases clásicas no se ven " +"afectadas por este cambio. Python 2.2 originalmente utilizaba una " +"ordenación topológica de los ancestros de una clase, pero 2.3 ahora utiliza " +"el algoritmo C3 como se describe en el artículo `\"A Monotonic Superclass " +"Linearization for Dylan\" <http://citeseerx.ist.psu.edu/viewdoc/summary?" +"doi=10.1.1.19.3910>`_. Para entender la motivación de este cambio, lea el " +"artículo de Michele Simionato `\"Python 2.3 Method Resolution Order\" " +"<http://www.phyast.pitt.edu/~micheles/mro.html>`_, o lea el hilo en python-" +"dev que comienza con el mensaje en https://mail.python.org/pipermail/python-" +"dev/2002-October/029035.html. Samuele Pedroni fue el primero en señalar el " +"problema y también implementó la solución codificando el algoritmo C3." #: ../Doc/whatsnew/2.3.rst:1093 msgid "" @@ -1203,6 +1686,13 @@ msgid "" "number using ``sys.setcheckinterval(N)``. The limit can be retrieved with " "the new :func:`sys.getcheckinterval` function." msgstr "" +"Python ejecuta programas multihilo cambiando entre hilos después de ejecutar " +"N bytecodes. El valor por defecto de N se ha incrementado de 10 a 100 " +"bytecodes, acelerando las aplicaciones de un solo hilo al reducir la " +"sobrecarga de cambio. Algunas aplicaciones multihilo pueden sufrir un " +"tiempo de respuesta más lento, pero eso se arregla fácilmente estableciendo " +"el límite a un número menor usando ``sys.setcheckinterval(N)``. El límite " +"puede recuperarse con la nueva función :func:`sys.getcheckinterval`." #: ../Doc/whatsnew/2.3.rst:1101 msgid "" @@ -1211,10 +1701,15 @@ msgid "" "``'.'`` in front of the type name. For example, in Python 2.2, if you " "created a socket and printed its :attr:`__class__`, you'd get this output::" msgstr "" +"Un cambio menor pero de gran alcance es que los nombres de los tipos de " +"extensión definidos por los módulos incluidos con Python ahora contienen el " +"módulo y un ``.'`` delante del nombre del tipo. Por ejemplo, en Python 2.2, " +"si creabas un socket e imprimías su :attr:`__class__`, obtendrías esta " +"salida::" #: ../Doc/whatsnew/2.3.rst:1110 msgid "In 2.3, you get this::" -msgstr "" +msgstr "En 2.3, se obtiene esto::" #: ../Doc/whatsnew/2.3.rst:1115 msgid "" @@ -1225,10 +1720,16 @@ msgid "" "lines of those relating to assigning to an instance's :attr:`~instance." "__class__` attribute." msgstr "" +"Se ha eliminado una de las incompatibilidades señaladas entre las clases de " +"estilo antiguo y las de estilo nuevo: ahora se pueden asignar a los " +"atributos :attr:`~definición.__name__` y :attr:`~clase.__bases__` de las " +"clases de estilo nuevo. Hay algunas restricciones sobre lo que se puede " +"asignar a :attr:`~class.__bases__` en la línea de las relacionadas con la " +"asignación al atributo :attr:`~instance.__class__` de una instancia." #: ../Doc/whatsnew/2.3.rst:1125 msgid "String Changes" -msgstr "" +msgstr "Cambios en las cadenas de texto" #: ../Doc/whatsnew/2.3.rst:1127 msgid "" @@ -1238,12 +1739,20 @@ msgid "" "and ``X in Y`` will return :const:`True` if *X* is a substring of *Y*. If " "*X* is the empty string, the result is always :const:`True`. ::" msgstr "" +"El operador :keyword:`in` ahora funciona de forma diferente para las " +"cadenas. Antes, cuando se evaluaba ``X en Y`` donde *X* y *Y* eran cadenas, " +"*X* sólo podía ser un único carácter. Esto ha cambiado; *X* puede ser una " +"cadena de cualquier longitud, y ``X en Y`` retornará :const:`True` si *X* es " +"una subcadena de *Y*. Si *X* es una cadena vacía, el resultado es siempre :" +"const:`True`. ::" #: ../Doc/whatsnew/2.3.rst:1140 msgid "" "Note that this doesn't tell you where the substring starts; if you need that " "information, use the :meth:`find` string method." msgstr "" +"Tenga en cuenta que esto no le dice dónde empieza la subcadena; si necesita " +"esa información, utilice el método :meth:`find` string." #: ../Doc/whatsnew/2.3.rst:1143 msgid "" @@ -1251,16 +1760,22 @@ msgid "" "have an optional argument for specifying the characters to strip. The " "default is still to remove all whitespace characters::" msgstr "" +"Los métodos de cadena :meth:`strip`, :meth:`lstrip` y :meth:`rstrip` tienen " +"ahora un argumento opcional para especificar los caracteres a eliminar. El " +"valor por defecto sigue siendo eliminar todos los caracteres de espacio en " +"blanco::" #: ../Doc/whatsnew/2.3.rst:1157 msgid "(Suggested by Simon Brunning and implemented by Walter Dörwald.)" -msgstr "" +msgstr "(Sugerido por Simon Brunning y aplicado por Walter Dörwald)" #: ../Doc/whatsnew/2.3.rst:1159 msgid "" "The :meth:`startswith` and :meth:`endswith` string methods now accept " "negative numbers for the *start* and *end* parameters." msgstr "" +"Los métodos de cadena :meth:`startswith` y :meth:`endswith` ahora aceptan " +"números negativos para los parámetros *start* y *end*." #: ../Doc/whatsnew/2.3.rst:1162 msgid "" @@ -1269,10 +1784,14 @@ msgid "" "left until it's the specified width. Note that the ``%`` operator is still " "more flexible and powerful than :meth:`zfill`. ::" msgstr "" +"Otro nuevo método de cadena es :meth:`zfill`, originalmente una función del " +"módulo :mod:`string`. :meth:`zfill` rellena una cadena numérica con ceros a " +"la izquierda hasta que tenga el ancho especificado. Tenga en cuenta que el " +"operador ``%`` sigue siendo más flexible y potente que :meth:`zfill`. ::" #: ../Doc/whatsnew/2.3.rst:1174 msgid "(Contributed by Walter Dörwald.)" -msgstr "" +msgstr "(Contribución de Walter Dörwald.)" #: ../Doc/whatsnew/2.3.rst:1176 msgid "" @@ -1281,6 +1800,11 @@ msgid "" "basestring)`` will return :const:`True` for either kind of string. It's a " "completely abstract type, so you can't create :class:`basestring` instances." msgstr "" +"Se ha añadido un nuevo tipo de objeto, :class:`basestring`. Tanto las " +"cadenas de 8 bits como las cadenas Unicode heredan de este tipo, por lo que " +"``isinstance(obj, basestring)`` retornará :const:`True` para cualquier tipo " +"de cadena. Es un tipo completamente abstracto, por lo que no se pueden " +"crear instancias de :class:`basestring`." #: ../Doc/whatsnew/2.3.rst:1181 msgid "" @@ -1288,22 +1812,30 @@ msgid "" "the usual way when the only reference to them is from the internal " "dictionary of interned strings. (Implemented by Oren Tirosh.)" msgstr "" +"Las cadenas internas ya no son inmortales y ahora serán recolectadas de la " +"forma habitual cuando la única referencia a ellas sea desde el diccionario " +"interno de cadenas internas. (Implementado por Oren Tirosh)" #: ../Doc/whatsnew/2.3.rst:1189 msgid "Optimizations" -msgstr "" +msgstr "Optimizaciones" #: ../Doc/whatsnew/2.3.rst:1191 msgid "" "The creation of new-style class instances has been made much faster; they're " "now faster than classic classes!" msgstr "" +"La creación de instancias de clases de estilo nuevo se ha hecho mucho más " +"rápida; ¡ahora son más rápidas que las clases clásicas!" #: ../Doc/whatsnew/2.3.rst:1194 msgid "" "The :meth:`sort` method of list objects has been extensively rewritten by " "Tim Peters, and the implementation is significantly faster." msgstr "" +"El método :meth:`sort` de los objetos de la lista ha sido ampliamente " +"reescrito por Tim Peters, y la implementación es significativamente más " +"rápida." #: ../Doc/whatsnew/2.3.rst:1197 msgid "" @@ -1313,6 +1845,11 @@ msgid "" "(Original patch by Christopher A. Craig, and significantly reworked by Tim " "Peters.)" msgstr "" +"La multiplicación de enteros largos es ahora mucho más rápida gracias a una " +"implementación de la multiplicación Karatsuba, un algoritmo que escala mejor " +"que el O(n\\*n) requerido para el algoritmo de multiplicación de la escuela " +"primaria. (Parche original de Christopher A. Craig, y reelaborado " +"significativamente por Tim Peters)" #: ../Doc/whatsnew/2.3.rst:1202 msgid "" @@ -1320,6 +1857,10 @@ msgid "" "increase, depending on your compiler's idiosyncrasies. See section :ref:" "`23section-other` for a longer explanation. (Removed by Michael Hudson.)" msgstr "" +"El opcode ``SET_LINENO`` ha desaparecido. Esto puede proporcionar un " +"pequeño aumento de velocidad, dependiendo de la idiosincrasia de su " +"compilador. Vea la sección :ref:`23section-other` para una explicación más " +"larga. (Eliminado por Michael Hudson)" #: ../Doc/whatsnew/2.3.rst:1206 msgid "" @@ -1327,6 +1868,9 @@ msgid "" "xrange(n)`` slightly faster than ``for i in range(n)``. (Patch by Raymond " "Hettinger.)" msgstr "" +"Los objetos :func:`xrange` tienen ahora su propio iterador, haciendo que " +"``for i in xrange(n)`` sea ligeramente más rápido que ``for i in " +"range(n)``. (Parche de Raymond Hettinger)" #: ../Doc/whatsnew/2.3.rst:1210 msgid "" @@ -1335,16 +1879,22 @@ msgid "" "(Implemented mostly by GvR, but lots of people have contributed single " "changes.)" msgstr "" +"Se han realizado una serie de pequeños reajustes en varios puntos " +"conflictivos para mejorar el rendimiento, como por ejemplo alinear una " +"función o eliminar algo de código. (Implementado principalmente por GvR, " +"pero mucha gente ha contribuido con cambios individuales)" #: ../Doc/whatsnew/2.3.rst:1214 msgid "" "The net result of the 2.3 optimizations is that Python 2.3 runs the pystone " "benchmark around 25% faster than Python 2.2." msgstr "" +"El resultado neto de las optimizaciones de la versión 2.3 es que Python 2.3 " +"ejecuta el benchmark pystone alrededor de un 25% f más rápido que Python 2.2." #: ../Doc/whatsnew/2.3.rst:1221 msgid "New, Improved, and Deprecated Modules" -msgstr "" +msgstr "Módulos nuevos, mejorados y obsoletos" #: ../Doc/whatsnew/2.3.rst:1223 msgid "" @@ -1354,6 +1904,12 @@ msgid "" "source tree for a more complete list of changes, or look through the CVS " "logs for all the details." msgstr "" +"Como es habitual, la biblioteca estándar de Python ha recibido una serie de " +"mejoras y correcciones de errores. Aquí hay una lista parcial de los " +"cambios más notables, ordenados alfabéticamente por nombre de módulo. " +"Consulte el archivo :file:`Misc/NEWS` en el árbol de fuentes para obtener " +"una lista más completa de los cambios, o busque en los registros de CVS para " +"obtener todos los detalles." #: ../Doc/whatsnew/2.3.rst:1228 msgid "" @@ -1362,6 +1918,11 @@ msgid "" "assignment operator to add another array's contents, and the ``*=`` " "assignment operator to repeat an array. (Contributed by Jason Orendorff.)" msgstr "" +"El módulo :mod:`array` soporta ahora matrices de caracteres Unicode que " +"utilizan el carácter de formato ``'u``. Las matrices también soportan ahora " +"el uso del operador de asignación ``+=`` para añadir el contenido de otra " +"matriz, y el operador de asignación ``*=`` para repetir una matriz. " +"(Contribución de Jason Orendorff)" #: ../Doc/whatsnew/2.3.rst:1233 msgid "" @@ -1369,6 +1930,10 @@ msgid "" "<http://pybsddb.sourceforge.net>`_ package, providing a more complete " "interface to the transactional features of the BerkeleyDB library." msgstr "" +"El módulo :mod:`bsddb` ha sido reemplazado por la versión 4.1.6 del paquete " +"`PyBSDDB <http://pybsddb.sourceforge.net>`_, proporcionando una interfaz más " +"completa para las características transaccionales de la biblioteca " +"BerkeleyDB." #: ../Doc/whatsnew/2.3.rst:1237 msgid "" @@ -1385,6 +1950,19 @@ msgid "" "importing it as :mod:`bsddb3`, you will have to change your ``import`` " "statements to import it as :mod:`bsddb`." msgstr "" +"La antigua versión del módulo ha sido renombrada como :mod:`bsddb185` y ya " +"no se construye automáticamente; tendrás que editar :file:`Modules/Setup` " +"para activarlo. Ten en cuenta que el nuevo paquete :mod:`bsddb` está " +"pensado para ser compatible con el módulo antiguo, así que asegúrate de " +"enviar errores si descubres alguna incompatibilidad. Al actualizar a Python " +"2.3, si el nuevo intérprete se compila con una nueva versión de la " +"biblioteca BerkeleyDB subyacente, es casi seguro que tendrá que convertir " +"sus archivos de base de datos a la nueva versión. Puede hacerlo fácilmente " +"con los nuevos scripts :file:`db2pickle.py` y :file:`pickle2db.py` que " +"encontrará en el directorio :file:`Tools/scripts` de la distribución. Si ya " +"ha estado utilizando el paquete PyBSDDB e importándolo como :mod:`bsddb3`, " +"tendrá que cambiar sus sentencias ``import`` para importarlo como :mod:" +"`bsddb`." #: ../Doc/whatsnew/2.3.rst:1249 msgid "" @@ -1392,12 +1970,19 @@ msgid "" "library. bz2-compressed data is usually smaller than corresponding :mod:" "`zlib`\\ -compressed data. (Contributed by Gustavo Niemeyer.)" msgstr "" +"El nuevo módulo :mod:`bz2` es una interfaz para la biblioteca de compresión " +"de datos bz2. Los datos comprimidos con bz2 suelen ser más pequeños que los " +"correspondientes datos comprimidos con :mod:`zlib`. (Contribución de Gustavo " +"Niemeyer)" #: ../Doc/whatsnew/2.3.rst:1253 msgid "" "A set of standard date/time types has been added in the new :mod:`datetime` " "module. See the following section for more details." msgstr "" +"Se ha añadido un conjunto de tipos de fecha/hora estándar en el nuevo " +"módulo :mod:`datetime`. Consulte la siguiente sección para obtener más " +"detalles." #: ../Doc/whatsnew/2.3.rst:1256 msgid "" @@ -1408,12 +1993,20 @@ msgid "" "includes the header file :file:`sample.h`, you would create the :class:" "`Extension` object like this::" msgstr "" +"La clase Distutils :class:`Extension` soporta ahora un argumento constructor " +"extra llamado *depends* para listar archivos fuente adicionales de los que " +"depende una extensión. Esto permite a Distutils recompilar el módulo si se " +"modifica alguno de los archivos de dependencia. Por ejemplo, si :file:" +"`sampmodule.c` incluye el fichero de cabecera :file:`sample.h`, se crearía " +"el objeto :class:`Extension` así::" #: ../Doc/whatsnew/2.3.rst:1267 msgid "" "Modifying :file:`sample.h` would then cause the module to be recompiled. " "(Contributed by Jeremy Hylton.)" msgstr "" +"La modificación de :file:`sample.h` haría que el módulo se recompilara. " +"(Contribución de Jeremy Hylton)" #: ../Doc/whatsnew/2.3.rst:1270 msgid "" @@ -1422,6 +2015,10 @@ msgid "" "environment variables, using them to override the settings in Python's " "configuration (contributed by Robert Weber)." msgstr "" +"Otros cambios menores en Distutils: ahora comprueba las variables de " +"entorno :envvar:`CC`, :envvar:`CFLAGS`, :envvar:`CPP`, :envvar:`LDFLAGS` y :" +"envvar:`CPPFLAGS`, utilizándolas para anular los ajustes de la configuración " +"de Python (contribución de Robert Weber)." #: ../Doc/whatsnew/2.3.rst:1275 msgid "" @@ -1430,12 +2027,18 @@ msgid "" "private ones as well. The :func:`DocTestSuite` function creates a :class:" "`unittest.TestSuite` object from a set of :mod:`doctest` tests." msgstr "" +"Anteriormente el módulo :mod:`doctest` sólo buscaba casos de prueba en los " +"docstrings de los métodos y funciones públicos, pero ahora también examina " +"los privados. La función :func:`DocTestSuite` crea un objeto :class:" +"`unittest.TestSuite` a partir de un conjunto de pruebas :mod:`doctest`." #: ../Doc/whatsnew/2.3.rst:1280 msgid "" "The new ``gc.get_referents(object)`` function returns a list of all the " "objects referenced by *object*." msgstr "" +"La nueva función ``gc.get_referents(object)`` retorna una lista de todos los " +"objetos referenciados por *object*." #: ../Doc/whatsnew/2.3.rst:1283 msgid "" @@ -1446,20 +2049,29 @@ msgid "" "mode processing continues, meaning that options and arguments can be mixed. " "For example::" msgstr "" +"El módulo :mod:`getopt` ha ganado una nueva función, :func:`gnu_getopt`, que " +"admite los mismos argumentos que la función :func:`getopt` existente, pero " +"utiliza el modo de exploración al estilo GNU. La función :func:`getopt` " +"existente deja de procesar las opciones tan pronto como se encuentra un " +"argumento que no es una opción, pero en el modo GNU el procesamiento " +"continúa, lo que significa que las opciones y los argumentos pueden " +"mezclarse. Por ejemplo::" #: ../Doc/whatsnew/2.3.rst:1294 msgid "(Contributed by Peter Åstrand.)" -msgstr "" +msgstr "(Contribución de Peter Åstrand.)" #: ../Doc/whatsnew/2.3.rst:1296 msgid "" "The :mod:`grp`, :mod:`pwd`, and :mod:`resource` modules now return enhanced " "tuples::" msgstr "" +"Los módulos :mod:`grp`, :mod:`pwd` y :mod:`resource` retornan ahora tuplas " +"mejoradas::" #: ../Doc/whatsnew/2.3.rst:1304 msgid "The :mod:`gzip` module can now handle files exceeding 2 GiB." -msgstr "" +msgstr "El módulo :mod:`gzip` ahora puede manejar archivos de más de 2 GiB." #: ../Doc/whatsnew/2.3.rst:1306 msgid "" @@ -1471,6 +2083,14 @@ msgid "" "is O(lg n). (See https://xlinux.nist.gov/dads//HTML/priorityque.html for " "more information about the priority queue data structure.)" msgstr "" +"El nuevo módulo :mod:`heapq` contiene una implementación de un algoritmo de " +"colas de montón. Un montón es una estructura de datos similar a un array " +"que mantiene los elementos en un orden parcialmente ordenado de forma que, " +"para cada índice *k*, ``heap[k] <= heap[2*k+1]`` y ``heap[k] <= heap[2*k" +"+2]``. Esto hace que sea rápido eliminar el elemento más pequeño, y la " +"inserción de un nuevo elemento manteniendo la propiedad del montón es *O(lg " +"n)*. (Véase https://xlinux.nist.gov/dads//HTML/priorityque.html para más " +"información sobre la estructura de datos de la cola de prioridad)" #: ../Doc/whatsnew/2.3.rst:1314 msgid "" @@ -1479,10 +2099,14 @@ msgid "" "on top of some other mutable Python sequence type. Here's an example that " "uses a Python list::" msgstr "" +"El módulo :mod:`heapq` proporciona las funciones :func:`heappush` y :func:" +"`heappop` para añadir y eliminar elementos manteniendo la propiedad del " +"montón sobre algún otro tipo de secuencia mutable de Python. Aquí hay un " +"ejemplo que utiliza una lista de Python::" #: ../Doc/whatsnew/2.3.rst:1332 msgid "(Contributed by Kevin O'Connor.)" -msgstr "" +msgstr "(Contribución de Kevin O'Connor.)" #: ../Doc/whatsnew/2.3.rst:1334 msgid "" @@ -1493,1019 +2117,9 @@ msgid "" "operations. IDLE's core code has been incorporated into the standard library " "as the :mod:`idlelib` package." msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1340 -msgid "" -"The :mod:`imaplib` module now supports IMAP over SSL. (Contributed by Piers " -"Lauder and Tino Lange.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1343 -msgid "" -"The :mod:`itertools` contains a number of useful functions for use with " -"iterators, inspired by various functions provided by the ML and Haskell " -"languages. For example, ``itertools.ifilter(predicate, iterator)`` returns " -"all elements in the iterator for which the function :func:`predicate` " -"returns :const:`True`, and ``itertools.repeat(obj, N)`` returns ``obj`` *N* " -"times. There are a number of other functions in the module; see the " -"package's reference documentation for details. (Contributed by Raymond " -"Hettinger.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1352 -msgid "" -"Two new functions in the :mod:`math` module, ``degrees(rads)`` and " -"``radians(degs)``, convert between radians and degrees. Other functions in " -"the :mod:`math` module such as :func:`math.sin` and :func:`math.cos` have " -"always required input values measured in radians. Also, an optional *base* " -"argument was added to :func:`math.log` to make it easier to compute " -"logarithms for bases other than ``e`` and ``10``. (Contributed by Raymond " -"Hettinger.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1359 -msgid "" -"Several new POSIX functions (:func:`getpgid`, :func:`killpg`, :func:" -"`lchown`, :func:`loadavg`, :func:`major`, :func:`makedev`, :func:`minor`, " -"and :func:`mknod`) were added to the :mod:`posix` module that underlies the :" -"mod:`os` module. (Contributed by Gustavo Niemeyer, Geert Jansen, and Denis " -"S. Otkidach.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1365 -msgid "" -"In the :mod:`os` module, the :func:`\\*stat` family of functions can now " -"report fractions of a second in a timestamp. Such time stamps are " -"represented as floats, similar to the value returned by :func:`time.time`." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1369 -msgid "" -"During testing, it was found that some applications will break if time " -"stamps are floats. For compatibility, when using the tuple interface of " -"the :class:`stat_result` time stamps will be represented as integers. When " -"using named fields (a feature first introduced in Python 2.2), time stamps " -"are still represented as integers, unless :func:`os.stat_float_times` is " -"invoked to enable float return values::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1382 -msgid "In Python 2.4, the default will change to always returning floats." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1384 -msgid "" -"Application developers should enable this feature only if all their " -"libraries work properly when confronted with floating point time stamps, or " -"if they use the tuple API. If used, the feature should be activated on an " -"application level instead of trying to enable it on a per-use basis." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1389 -msgid "" -"The :mod:`optparse` module contains a new parser for command-line arguments " -"that can convert option values to a particular Python type and will " -"automatically generate a usage message. See the following section for more " -"details." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1394 -msgid "" -"The old and never-documented :mod:`linuxaudiodev` module has been " -"deprecated, and a new version named :mod:`ossaudiodev` has been added. The " -"module was renamed because the OSS sound drivers can be used on platforms " -"other than Linux, and the interface has also been tidied and brought up to " -"date in various ways. (Contributed by Greg Ward and Nicholas FitzRoy-Dale.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1400 -msgid "" -"The new :mod:`platform` module contains a number of functions that try to " -"determine various properties of the platform you're running on. There are " -"functions for getting the architecture, CPU type, the Windows OS version, " -"and even the Linux distribution version. (Contributed by Marc-André Lemburg.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1405 -msgid "" -"The parser objects provided by the :mod:`pyexpat` module can now optionally " -"buffer character data, resulting in fewer calls to your character data " -"handler and therefore faster performance. Setting the parser object's :attr:" -"`buffer_text` attribute to :const:`True` will enable buffering." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1410 -msgid "" -"The ``sample(population, k)`` function was added to the :mod:`random` " -"module. *population* is a sequence or :class:`xrange` object containing the " -"elements of a population, and :func:`sample` chooses *k* elements from the " -"population without replacing chosen elements. *k* can be any value up to " -"``len(population)``. For example::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1432 -msgid "" -"The :mod:`random` module now uses a new algorithm, the Mersenne Twister, " -"implemented in C. It's faster and more extensively studied than the " -"previous algorithm." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1436 -msgid "(All changes contributed by Raymond Hettinger.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1438 -msgid "" -"The :mod:`readline` module also gained a number of new functions: :func:" -"`get_history_item`, :func:`get_current_history_length`, and :func:" -"`redisplay`." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1442 -msgid "" -"The :mod:`rexec` and :mod:`Bastion` modules have been declared dead, and " -"attempts to import them will fail with a :exc:`RuntimeError`. New-style " -"classes provide new ways to break out of the restricted execution " -"environment provided by :mod:`rexec`, and no one has interest in fixing them " -"or time to do so. If you have applications using :mod:`rexec`, rewrite them " -"to use something else." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1448 -msgid "" -"(Sticking with Python 2.2 or 2.1 will not make your applications any safer " -"because there are known bugs in the :mod:`rexec` module in those versions. " -"To repeat: if you're using :mod:`rexec`, stop using it immediately.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1452 -msgid "" -"The :mod:`rotor` module has been deprecated because the algorithm it uses " -"for encryption is not believed to be secure. If you need encryption, use " -"one of the several AES Python modules that are available separately." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1456 -msgid "" -"The :mod:`shutil` module gained a ``move(src, dest)`` function that " -"recursively moves a file or directory to a new location." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1459 -msgid "" -"Support for more advanced POSIX signal handling was added to the :mod:" -"`signal` but then removed again as it proved impossible to make it work " -"reliably across platforms." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1463 -msgid "" -"The :mod:`socket` module now supports timeouts. You can call the " -"``settimeout(t)`` method on a socket object to set a timeout of *t* seconds. " -"Subsequent socket operations that take longer than *t* seconds to complete " -"will abort and raise a :exc:`socket.timeout` exception." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1468 -msgid "" -"The original timeout implementation was by Tim O'Malley. Michael Gilfix " -"integrated it into the Python :mod:`socket` module and shepherded it through " -"a lengthy review. After the code was checked in, Guido van Rossum rewrote " -"parts of it. (This is a good example of a collaborative development process " -"in action.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1474 -msgid "" -"On Windows, the :mod:`socket` module now ships with Secure Sockets Layer " -"(SSL) support." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1477 -msgid "" -"The value of the C :const:`PYTHON_API_VERSION` macro is now exposed at the " -"Python level as ``sys.api_version``. The current exception can be cleared " -"by calling the new :func:`sys.exc_clear` function." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1481 -msgid "" -"The new :mod:`tarfile` module allows reading from and writing to :program:" -"`tar`\\ -format archive files. (Contributed by Lars Gustäbel.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1484 -msgid "" -"The new :mod:`textwrap` module contains functions for wrapping strings " -"containing paragraphs of text. The ``wrap(text, width)`` function takes a " -"string and returns a list containing the text split into lines of no more " -"than the chosen width. The ``fill(text, width)`` function returns a single " -"string, reformatted to fit into lines no longer than the chosen width. (As " -"you can guess, :func:`fill` is built on top of :func:`wrap`. For example::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1506 -msgid "" -"The module also contains a :class:`TextWrapper` class that actually " -"implements the text wrapping strategy. Both the :class:`TextWrapper` class " -"and the :func:`wrap` and :func:`fill` functions support a number of " -"additional keyword arguments for fine-tuning the formatting; consult the " -"module's documentation for details. (Contributed by Greg Ward.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1512 -msgid "" -"The :mod:`thread` and :mod:`threading` modules now have companion modules, :" -"mod:`dummy_thread` and :mod:`dummy_threading`, that provide a do-nothing " -"implementation of the :mod:`thread` module's interface for platforms where " -"threads are not supported. The intention is to simplify thread-aware " -"modules (ones that *don't* rely on threads to run) by putting the following " -"code at the top::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1524 -msgid "" -"In this example, :mod:`_threading` is used as the module name to make it " -"clear that the module being used is not necessarily the actual :mod:" -"`threading` module. Code can call functions and use classes in :mod:" -"`_threading` whether or not threads are supported, avoiding an :keyword:`if` " -"statement and making the code slightly clearer. This module will not " -"magically make multithreaded code run without threads; code that waits for " -"another thread to return or to do something will simply hang forever." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1532 -msgid "" -"The :mod:`time` module's :func:`strptime` function has long been an " -"annoyance because it uses the platform C library's :func:`strptime` " -"implementation, and different platforms sometimes have odd bugs. Brett " -"Cannon contributed a portable implementation that's written in pure Python " -"and should behave identically on all platforms." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1538 -msgid "" -"The new :mod:`timeit` module helps measure how long snippets of Python code " -"take to execute. The :file:`timeit.py` file can be run directly from the " -"command line, or the module's :class:`Timer` class can be imported and used " -"directly. Here's a short example that figures out whether it's faster to " -"convert an 8-bit string to Unicode by appending an empty Unicode string to " -"it or by using the :func:`unicode` function::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1558 -msgid "" -"The :mod:`Tix` module has received various bug fixes and updates for the " -"current version of the Tix package." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1561 -msgid "" -"The :mod:`Tkinter` module now works with a thread-enabled version of Tcl. " -"Tcl's threading model requires that widgets only be accessed from the thread " -"in which they're created; accesses from another thread can cause Tcl to " -"panic. For certain Tcl interfaces, :mod:`Tkinter` will now automatically " -"avoid this when a widget is accessed from a different thread by marshalling " -"a command, passing it to the correct thread, and waiting for the results. " -"Other interfaces can't be handled automatically but :mod:`Tkinter` will now " -"raise an exception on such an access so that you can at least find out about " -"the problem. See https://mail.python.org/pipermail/python-dev/2002-" -"December/031107.html for a more detailed explanation of this change. " -"(Implemented by Martin von Löwis.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1572 -msgid "" -"Calling Tcl methods through :mod:`_tkinter` no longer returns only strings. " -"Instead, if Tcl returns other objects those objects are converted to their " -"Python equivalent, if one exists, or wrapped with a :class:`_tkinter." -"Tcl_Obj` object if no Python equivalent exists. This behavior can be " -"controlled through the :meth:`wantobjects` method of :class:`tkapp` objects." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1578 -msgid "" -"When using :mod:`_tkinter` through the :mod:`Tkinter` module (as most " -"Tkinter applications will), this feature is always activated. It should not " -"cause compatibility problems, since Tkinter would always convert string " -"results to Python types where possible." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1583 -msgid "" -"If any incompatibilities are found, the old behavior can be restored by " -"setting the :attr:`wantobjects` variable in the :mod:`Tkinter` module to " -"false before creating the first :class:`tkapp` object. ::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1590 -msgid "Any breakage caused by this change should be reported as a bug." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1592 -msgid "" -"The :mod:`UserDict` module has a new :class:`DictMixin` class which defines " -"all dictionary methods for classes that already have a minimum mapping " -"interface. This greatly simplifies writing classes that need to be " -"substitutable for dictionaries, such as the classes in the :mod:`shelve` " -"module." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1598 -msgid "" -"Adding the mix-in as a superclass provides the full dictionary interface " -"whenever the class defines :meth:`__getitem__`, :meth:`__setitem__`, :meth:" -"`__delitem__`, and :meth:`keys`. For example::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1639 -msgid "(Contributed by Raymond Hettinger.)" -msgstr "(Contribución de Raymond Hettinger.)" - -#: ../Doc/whatsnew/2.3.rst:1641 -msgid "" -"The DOM implementation in :mod:`xml.dom.minidom` can now generate XML output " -"in a particular encoding by providing an optional encoding argument to the :" -"meth:`toxml` and :meth:`toprettyxml` methods of DOM nodes." -msgstr "" -"La implementación de DOM en :mod:`xml.dom.minidom` puede ahora generar la " -"salida XML en una codificación particular proporcionando un argumento de " -"codificación opcional a los métodos :meth:`toxml` y :meth:`toprettyxml` de " -"los nodos DOM." - -#: ../Doc/whatsnew/2.3.rst:1645 -msgid "" -"The :mod:`xmlrpclib` module now supports an XML-RPC extension for handling " -"nil data values such as Python's ``None``. Nil values are always supported " -"on unmarshalling an XML-RPC response. To generate requests containing " -"``None``, you must supply a true value for the *allow_none* parameter when " -"creating a :class:`Marshaller` instance." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1651 -msgid "" -"The new :mod:`DocXMLRPCServer` module allows writing self-documenting XML-" -"RPC servers. Run it in demo mode (as a program) to see it in action. " -"Pointing the Web browser to the RPC server produces pydoc-style " -"documentation; pointing xmlrpclib to the server allows invoking the actual " -"methods. (Contributed by Brian Quinlan.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1657 -msgid "" -"Support for internationalized domain names (RFCs 3454, 3490, 3491, and 3492) " -"has been added. The \"idna\" encoding can be used to convert between a " -"Unicode domain name and the ASCII-compatible encoding (ACE) of that name. ::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1664 -msgid "" -"The :mod:`socket` module has also been extended to transparently convert " -"Unicode hostnames to the ACE version before passing them to the C library. " -"Modules that deal with hostnames such as :mod:`httplib` and :mod:`ftplib`) " -"also support Unicode host names; :mod:`httplib` also sends HTTP ``Host`` " -"headers using the ACE version of the domain name. :mod:`urllib` supports " -"Unicode URLs with non-ASCII host names as long as the ``path`` part of the " -"URL is ASCII only." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1672 -msgid "" -"To implement this change, the :mod:`stringprep` module, the " -"``mkstringprep`` tool and the ``punycode`` encoding have been added." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1679 -msgid "Date/Time Type" -msgstr "Tipo de fecha/hora" - -#: ../Doc/whatsnew/2.3.rst:1681 -msgid "" -"Date and time types suitable for expressing timestamps were added as the :" -"mod:`datetime` module. The types don't support different calendars or many " -"fancy features, and just stick to the basics of representing time." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1685 -msgid "" -"The three primary types are: :class:`date`, representing a day, month, and " -"year; :class:`~datetime.time`, consisting of hour, minute, and second; and :" -"class:`~datetime.datetime`, which contains all the attributes of both :class:" -"`date` and :class:`~datetime.time`. There's also a :class:`timedelta` class " -"representing differences between two points in time, and time zone logic is " -"implemented by classes inheriting from the abstract :class:`tzinfo` class." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1692 -msgid "" -"You can create instances of :class:`date` and :class:`~datetime.time` by " -"either supplying keyword arguments to the appropriate constructor, e.g. " -"``datetime.date(year=1972, month=10, day=15)``, or by using one of a number " -"of class methods. For example, the :meth:`date.today` class method returns " -"the current local date." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1698 -msgid "" -"Once created, instances of the date/time classes are all immutable. There " -"are a number of methods for producing formatted strings from objects::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1710 -msgid "" -"The :meth:`replace` method allows modifying one or more fields of a :class:" -"`date` or :class:`~datetime.datetime` instance, returning a new instance::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1720 -msgid "" -"Instances can be compared, hashed, and converted to strings (the result is " -"the same as that of :meth:`isoformat`). :class:`date` and :class:`~datetime." -"datetime` instances can be subtracted from each other, and added to :class:" -"`timedelta` instances. The largest missing feature is that there's no " -"standard library support for parsing strings and getting back a :class:" -"`date` or :class:`~datetime.datetime`." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1727 -msgid "" -"For more information, refer to the module's reference documentation. " -"(Contributed by Tim Peters.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1734 -msgid "The optparse Module" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1736 -msgid "" -"The :mod:`getopt` module provides simple parsing of command-line arguments. " -"The new :mod:`optparse` module (originally named Optik) provides more " -"elaborate command-line parsing that follows the Unix conventions, " -"automatically creates the output for :option:`!--help`, and can perform " -"different actions for different options." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1742 -msgid "" -"You start by creating an instance of :class:`OptionParser` and telling it " -"what your program's options are. ::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1756 -msgid "" -"Parsing a command line is then done by calling the :meth:`parse_args` " -"method. ::" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1762 -msgid "" -"This returns an object containing all of the option values, and a list of " -"strings containing the remaining arguments." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1765 -msgid "" -"Invoking the script with the various arguments now works as you'd expect it " -"to. Note that the length argument is automatically converted to an integer." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1778 -msgid "The help message is automatically generated for you:" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1793 -msgid "See the module's documentation for more details." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1796 -msgid "" -"Optik was written by Greg Ward, with suggestions from the readers of the " -"Getopt SIG." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1805 -msgid "Pymalloc: A Specialized Object Allocator" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1807 -msgid "" -"Pymalloc, a specialized object allocator written by Vladimir Marangozov, was " -"a feature added to Python 2.1. Pymalloc is intended to be faster than the " -"system :c:func:`malloc` and to have less memory overhead for allocation " -"patterns typical of Python programs. The allocator uses C's :c:func:`malloc` " -"function to get large pools of memory and then fulfills smaller memory " -"requests from these pools." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1813 -msgid "" -"In 2.1 and 2.2, pymalloc was an experimental feature and wasn't enabled by " -"default; you had to explicitly enable it when compiling Python by providing " -"the :option:`!--with-pymalloc` option to the :program:`configure` script. " -"In 2.3, pymalloc has had further enhancements and is now enabled by default; " -"you'll have to supply :option:`!--without-pymalloc` to disable it." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1819 -msgid "" -"This change is transparent to code written in Python; however, pymalloc may " -"expose bugs in C extensions. Authors of C extension modules should test " -"their code with pymalloc enabled, because some incorrect code may cause core " -"dumps at runtime." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1824 -msgid "" -"There's one particularly common error that causes problems. There are a " -"number of memory allocation functions in Python's C API that have previously " -"just been aliases for the C library's :c:func:`malloc` and :c:func:`free`, " -"meaning that if you accidentally called mismatched functions the error " -"wouldn't be noticeable. When the object allocator is enabled, these " -"functions aren't aliases of :c:func:`malloc` and :c:func:`free` any more, " -"and calling the wrong function to free memory may get you a core dump. For " -"example, if memory was allocated using :c:func:`PyObject_Malloc`, it has to " -"be freed using :c:func:`PyObject_Free`, not :c:func:`free`. A few modules " -"included with Python fell afoul of this and had to be fixed; doubtless there " -"are more third-party modules that will have the same problem." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1836 -msgid "" -"As part of this change, the confusing multiple interfaces for allocating " -"memory have been consolidated down into two API families. Memory allocated " -"with one family must not be manipulated with functions from the other " -"family. There is one family for allocating chunks of memory and another " -"family of functions specifically for allocating Python objects." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1842 -msgid "" -"To allocate and free an undistinguished chunk of memory use the \"raw memory" -"\" family: :c:func:`PyMem_Malloc`, :c:func:`PyMem_Realloc`, and :c:func:" -"`PyMem_Free`." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1845 -msgid "" -"The \"object memory\" family is the interface to the pymalloc facility " -"described above and is biased towards a large number of \"small\" " -"allocations: :c:func:`PyObject_Malloc`, :c:func:`PyObject_Realloc`, and :c:" -"func:`PyObject_Free`." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1849 -msgid "" -"To allocate and free Python objects, use the \"object\" family :c:func:" -"`PyObject_New`, :c:func:`PyObject_NewVar`, and :c:func:`PyObject_Del`." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1852 -msgid "" -"Thanks to lots of work by Tim Peters, pymalloc in 2.3 also provides " -"debugging features to catch memory overwrites and doubled frees in both " -"extension modules and in the interpreter itself. To enable this support, " -"compile a debugging version of the Python interpreter by running :program:" -"`configure` with :option:`!--with-pydebug`." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1858 -msgid "" -"To aid extension writers, a header file :file:`Misc/pymemcompat.h` is " -"distributed with the source to Python 2.3 that allows Python extensions to " -"use the 2.3 interfaces to memory allocation while compiling against any " -"version of Python since 1.5.2. You would copy the file from Python's source " -"distribution and bundle it with the source of your extension." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1869 -msgid "https://hg.python.org/cpython/file/default/Objects/obmalloc.c" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1868 -msgid "" -"For the full details of the pymalloc implementation, see the comments at the " -"top of the file :file:`Objects/obmalloc.c` in the Python source code. The " -"above link points to the file within the python.org SVN browser." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1876 -msgid "Build and C API Changes" -msgstr "Cambios en la API de construcción y C" - -#: ../Doc/whatsnew/2.3.rst:1878 -msgid "Changes to Python's build process and to the C API include:" -msgstr "" -"Los cambios en el proceso de build de Python y en la API de C incluyen:" - -#: ../Doc/whatsnew/2.3.rst:1880 -msgid "" -"The cycle detection implementation used by the garbage collection has proven " -"to be stable, so it's now been made mandatory. You can no longer compile " -"Python without it, and the :option:`!--with-cycle-gc` switch to :program:" -"`configure` has been removed." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1885 -msgid "" -"Python can now optionally be built as a shared library (:file:`libpython2.3." -"so`) by supplying :option:`!--enable-shared` when running Python's :program:" -"`configure` script. (Contributed by Ondrej Palkovsky.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1889 -msgid "" -"The :c:macro:`DL_EXPORT` and :c:macro:`DL_IMPORT` macros are now deprecated. " -"Initialization functions for Python extension modules should now be declared " -"using the new macro :c:macro:`PyMODINIT_FUNC`, while the Python core will " -"generally use the :c:macro:`PyAPI_FUNC` and :c:macro:`PyAPI_DATA` macros." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1894 -msgid "" -"The interpreter can be compiled without any docstrings for the built-in " -"functions and modules by supplying :option:`!--without-doc-strings` to the :" -"program:`configure` script. This makes the Python executable about 10% " -"smaller, but will also mean that you can't get help for Python's built-ins. " -"(Contributed by Gustavo Niemeyer.)" -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1900 -msgid "" -"The :c:func:`PyArg_NoArgs` macro is now deprecated, and code that uses it " -"should be changed. For Python 2.2 and later, the method definition table " -"can specify the :const:`METH_NOARGS` flag, signalling that there are no " -"arguments, and the argument checking can then be removed. If compatibility " -"with pre-2.2 versions of Python is important, the code could use " -"``PyArg_ParseTuple(args, \"\")`` instead, but this will be slower than " -"using :const:`METH_NOARGS`." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1907 -msgid "" -":c:func:`PyArg_ParseTuple` accepts new format characters for various sizes " -"of unsigned integers: ``B`` for :c:type:`unsigned char`, ``H`` for :c:type:" -"`unsigned short int`, ``I`` for :c:type:`unsigned int`, and ``K`` for :c:" -"type:`unsigned long long`." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1912 -msgid "" -"A new function, ``PyObject_DelItemString(mapping, char *key)`` was added as " -"shorthand for ``PyObject_DelItem(mapping, PyString_New(key))``." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1915 -msgid "" -"File objects now manage their internal string buffer differently, increasing " -"it exponentially when needed. This results in the benchmark tests in :file:" -"`Lib/test/test_bufio.py` speeding up considerably (from 57 seconds to 1.7 " -"seconds, according to one measurement)." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1920 -msgid "" -"It's now possible to define class and static methods for a C extension type " -"by setting either the :const:`METH_CLASS` or :const:`METH_STATIC` flags in a " -"method's :c:type:`PyMethodDef` structure." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1924 -msgid "" -"Python now includes a copy of the Expat XML parser's source code, removing " -"any dependence on a system version or local installation of Expat." -msgstr "" - -#: ../Doc/whatsnew/2.3.rst:1927 -msgid "" -"If you dynamically allocate type objects in your extension, you should be " -"aware of a change in the rules relating to the :attr:`__module__` and :attr:" -"`~definition.__name__` attributes. In summary, you will want to ensure the " -"type's dictionary contains a ``'__module__'`` key; making the module name " -"the part of the type name leading up to the final period will no longer have " -"the desired effect. For more detail, read the API reference documentation " -"or the source." -msgstr "" -"Si asigna dinámicamente objetos de tipo en su extensión, debe tener en " -"cuenta un cambio en las reglas relacionadas con los atributos :attr:" -"`__module__` y :attr:`~definition.__name__`. En resumen, querrá asegurarse " -"de que el diccionario del tipo contiene una clave ``'__module__``; hacer que " -"el nombre del módulo sea la parte del nombre del tipo que va hasta el punto " -"final ya no tendrá el efecto deseado. Para más detalles, lea la " -"documentación de referencia de la API o el código fuente." - -#: ../Doc/whatsnew/2.3.rst:1938 -msgid "Port-Specific Changes" -msgstr "Cambios específicos en los ports" - -#: ../Doc/whatsnew/2.3.rst:1940 -msgid "" -"Support for a port to IBM's OS/2 using the EMX runtime environment was " -"merged into the main Python source tree. EMX is a POSIX emulation layer " -"over the OS/2 system APIs. The Python port for EMX tries to support all the " -"POSIX-like capability exposed by the EMX runtime, and mostly succeeds; :func:" -"`fork` and :func:`fcntl` are restricted by the limitations of the underlying " -"emulation layer. The standard OS/2 port, which uses IBM's Visual Age " -"compiler, also gained support for case-sensitive import semantics as part of " -"the integration of the EMX port into CVS. (Contributed by Andrew MacIntyre.)" -msgstr "" -"El soporte para una adaptación al sistema OS/2 de IBM utilizando el entorno " -"de ejecución EMX se ha incorporado al árbol principal de fuentes de Python. " -"EMX es una capa de emulación POSIX sobre las APIs del sistema OS/2. El " -"puerto de Python para EMX intenta soportar todas las capacidades tipo POSIX " -"expuestas por el tiempo de ejecución de EMX, y en su mayoría lo consigue; :" -"func:`fork` y :func:`fcntl` están restringidas por las limitaciones de la " -"capa de emulación subyacente. El puerto estándar de OS/2, que utiliza el " -"compilador Visual Age de IBM, también obtuvo soporte para la semántica de " -"importación sensible a mayúsculas y minúsculas como parte de la integración " -"del puerto EMX en CVS. (Contribución de Andrew MacIntyre)." - -#: ../Doc/whatsnew/2.3.rst:1949 -msgid "" -"On MacOS, most toolbox modules have been weaklinked to improve backward " -"compatibility. This means that modules will no longer fail to load if a " -"single routine is missing on the current OS version. Instead calling the " -"missing routine will raise an exception. (Contributed by Jack Jansen.)" -msgstr "" -"En MacOS, la mayoría de los módulos de la caja de herramientas se han " -"debilitado para mejorar la compatibilidad con versiones anteriores. Esto " -"significa que los módulos ya no fallarán al cargarse si falta una rutina en " -"la versión actual del sistema operativo. En su lugar, llamar a la rutina que " -"falta lanzará una excepción. (Contribución de Jack Jansen.)" - -#: ../Doc/whatsnew/2.3.rst:1954 -msgid "" -"The RPM spec files, found in the :file:`Misc/RPM/` directory in the Python " -"source distribution, were updated for 2.3. (Contributed by Sean " -"Reifschneider.)" -msgstr "" -"Los archivos de especificaciones RPM, que se encuentran en el directorio :" -"file:`Misc/RPM/` en la distribución de fuentes de Python, fueron " -"actualizados para 2.3. (Contribución de Sean Reifschneider)." - -#: ../Doc/whatsnew/2.3.rst:1957 -msgid "" -"Other new platforms now supported by Python include AtheOS (http://www." -"atheos.cx/), GNU/Hurd, and OpenVMS." -msgstr "" -"Otras plataformas nuevas que ahora soporta Python son AtheOS (http://www." -"atheos.cx/), GNU/Hurd y OpenVMS." - -#: ../Doc/whatsnew/2.3.rst:1966 -msgid "Other Changes and Fixes" -msgstr "Otros cambios y correcciones" - -#: ../Doc/whatsnew/2.3.rst:1968 -msgid "" -"As usual, there were a bunch of other improvements and bugfixes scattered " -"throughout the source tree. A search through the CVS change logs finds " -"there were 523 patches applied and 514 bugs fixed between Python 2.2 and " -"2.3. Both figures are likely to be underestimates." -msgstr "" -"Como es habitual, hay un montón de otras mejoras y correcciones de errores " -"repartidas por el árbol de fuentes. Una búsqueda en los registros de " -"cambios de CVS revela que se aplicaron 523 parches y se corrigieron 514 " -"errores entre Python 2.2 y 2.3. Es probable que ambas cifras estén " -"subestimadas." - -#: ../Doc/whatsnew/2.3.rst:1973 -msgid "Some of the more notable changes are:" -msgstr "Algunos de los cambios más notables son:" - -#: ../Doc/whatsnew/2.3.rst:1975 -msgid "" -"If the :envvar:`PYTHONINSPECT` environment variable is set, the Python " -"interpreter will enter the interactive prompt after running a Python " -"program, as if Python had been invoked with the :option:`-i` option. The " -"environment variable can be set before running the Python interpreter, or it " -"can be set by the Python program as part of its execution." -msgstr "" -"Si se establece la variable de entorno :envvar:`PYTHONINSPECT`, el " -"intérprete de Python entrará en el prompt interactivo después de ejecutar un " -"programa Python, como si Python hubiera sido invocado con la opción :option:" -"`-i`. La variable de entorno se puede establecer antes de ejecutar el " -"intérprete de Python, o puede ser establecida por el programa de Python como " -"parte de su ejecución." - -#: ../Doc/whatsnew/2.3.rst:1981 -msgid "" -"The :file:`regrtest.py` script now provides a way to allow \"all resources " -"except *foo*.\" A resource name passed to the :option:`!-u` option can now " -"be prefixed with a hyphen (``'-'``) to mean \"remove this resource.\" For " -"example, the option '``-uall,-bsddb``' could be used to enable the use of " -"all resources except ``bsddb``." -msgstr "" -"El script :file:`regrtest.py` ahora proporciona una forma de permitir " -"\"todos los recursos excepto *foo*\". Un nombre de recurso pasado a la " -"opción :option:`!-u` puede ahora llevar un prefijo (``'-'``) para significar " -"\"eliminar este recurso\". Por ejemplo, la opción ``-uall,-bsddb`` podría " -"utilizarse para habilitar el uso de todos los recursos excepto ``bsddb``." - -#: ../Doc/whatsnew/2.3.rst:1987 -msgid "" -"The tools used to build the documentation now work under Cygwin as well as " -"Unix." -msgstr "" -"Las herramientas utilizadas para construir la documentación ahora funcionan " -"tanto en Cygwin como en Unix." - -#: ../Doc/whatsnew/2.3.rst:1990 -msgid "" -"The ``SET_LINENO`` opcode has been removed. Back in the mists of time, this " -"opcode was needed to produce line numbers in tracebacks and support trace " -"functions (for, e.g., :mod:`pdb`). Since Python 1.5, the line numbers in " -"tracebacks have been computed using a different mechanism that works with " -"\"python -O\". For Python 2.3 Michael Hudson implemented a similar scheme " -"to determine when to call the trace function, removing the need for " -"``SET_LINENO`` entirely." -msgstr "" -"The ``SET_LINENO`` opcode has been removed. Back in the mists of time, this " -"opcode was needed to produce line numbers in tracebacks and support trace " -"functions (for, e.g., :mod:`pdb`). Since Python 1.5, the line numbers in " -"tracebacks have been computed using a different mechanism that works with " -"\"python -O\". For Python 2.3 Michael Hudson implemented a similar scheme " -"to determine when to call the trace function, removing the need for " -"``SET_LINENO`` entirely." - -#: ../Doc/whatsnew/2.3.rst:1998 -msgid "" -"It would be difficult to detect any resulting difference from Python code, " -"apart from a slight speed up when Python is run without :option:`-O`." -msgstr "" -"Sería difícil detectar cualquier diferencia resultante del código Python, " -"aparte de un ligero aumento de velocidad cuando se ejecuta Python sin :" -"option:`-O`." - -#: ../Doc/whatsnew/2.3.rst:2001 -msgid "" -"C extensions that access the :attr:`f_lineno` field of frame objects should " -"instead call ``PyCode_Addr2Line(f->f_code, f->f_lasti)``. This will have the " -"added effect of making the code work as desired under \"python -O\" in " -"earlier versions of Python." -msgstr "" -"Las extensiones en C que acceden al campo :attr:`f_lineno` de los objetos " -"frame deben llamar en su lugar a ``PyCode_Addr2Line(f->f_code, f-" -">f_lasti)``. Esto tendrá el efecto añadido de hacer que el código funcione " -"como se desea bajo \"python -O\" en versiones anteriores de Python." - -#: ../Doc/whatsnew/2.3.rst:2006 -msgid "" -"A nifty new feature is that trace functions can now assign to the :attr:" -"`f_lineno` attribute of frame objects, changing the line that will be " -"executed next. A ``jump`` command has been added to the :mod:`pdb` debugger " -"taking advantage of this new feature. (Implemented by Richie Hindle.)" -msgstr "" -"Una nueva característica ingeniosa es que las funciones de rastreo pueden " -"ahora asignar al atributo :attr:`f_lineno` de los objetos marco, cambiando " -"la línea que se ejecutará a continuación. Se ha añadido un comando ``jump`` " -"al depurador :mod:`pdb` aprovechando esta nueva característica. " -"(Implementado por Richie Hindle)." - -#: ../Doc/whatsnew/2.3.rst:2015 -msgid "Porting to Python 2.3" -msgstr "Adaptación a Python 2.3" - -#: ../Doc/whatsnew/2.3.rst:2017 -msgid "" -"This section lists previously described changes that may require changes to " -"your code:" -msgstr "" -"Esta sección enumera los cambios descritos anteriormente que pueden requerir " -"cambios en su código:" - -#: ../Doc/whatsnew/2.3.rst:2020 -msgid "" -":keyword:`yield` is now always a keyword; if it's used as a variable name in " -"your code, a different name must be chosen." -msgstr "" -":keyword:`yield` es ahora siempre una palabra clave; si se utiliza como " -"nombre de variable en su código, debe elegirse un nombre diferente." - -#: ../Doc/whatsnew/2.3.rst:2023 -msgid "" -"For strings *X* and *Y*, ``X in Y`` now works if *X* is more than one " -"character long." -msgstr "" -"Para las cadenas *X* y *Y*, ``X en Y`` ahora funciona si *X* tiene más de un " -"carácter." - -#: ../Doc/whatsnew/2.3.rst:2026 -msgid "" -"The :func:`int` type constructor will now return a long integer instead of " -"raising an :exc:`OverflowError` when a string or floating-point number is " -"too large to fit into an integer." -msgstr "" -"El constructor de tipo :func:`int` ahora retornará un entero largo en lugar " -"de lanzar un :exc:`OverflowError` cuando una cadena o un número de punto " -"flotante es demasiado grande para caber en un entero." - -#: ../Doc/whatsnew/2.3.rst:2030 -msgid "" -"If you have Unicode strings that contain 8-bit characters, you must declare " -"the file's encoding (UTF-8, Latin-1, or whatever) by adding a comment to the " -"top of the file. See section :ref:`section-encodings` for more information." -msgstr "" -"Si tiene cadenas Unicode que contienen caracteres de 8 bits, debe declarar " -"la codificación del archivo (UTF-8, Latin-1, o la que sea) añadiendo un " -"comentario al principio del archivo. Consulte la sección :ref:`section-" -"encodings` para más información." - -#: ../Doc/whatsnew/2.3.rst:2034 -msgid "" -"Calling Tcl methods through :mod:`_tkinter` no longer returns only strings. " -"Instead, if Tcl returns other objects those objects are converted to their " -"Python equivalent, if one exists, or wrapped with a :class:`_tkinter." -"Tcl_Obj` object if no Python equivalent exists." -msgstr "" -"La llamada a métodos Tcl a través de :mod:`_tkinter` ya no retorna sólo " -"cadenas. En su lugar, si Tcl retorna otros objetos, éstos se convierten a su " -"equivalente en Python, si existe, o se envuelven con un objeto :class:" -"`_tkinter.Tcl_Obj` si no hay equivalente en Python." - -#: ../Doc/whatsnew/2.3.rst:2039 -msgid "" -"Large octal and hex literals such as ``0xffffffff`` now trigger a :exc:" -"`FutureWarning`. Currently they're stored as 32-bit numbers and result in a " -"negative value, but in Python 2.4 they'll become positive long integers." -msgstr "" -"Los octales largos y hexadecimales grandes como ``0xffffff`` ahora activan " -"un :exc:`FutureWarning`. Actualmente se almacenan como números de 32 bits y " -"resultan en un valor negativo, pero en Python 2.4 se convertirán en enteros " -"largos positivos." - -#: ../Doc/whatsnew/2.3.rst:2043 -msgid "" -"There are a few ways to fix this warning. If you really need a positive " -"number, just add an ``L`` to the end of the literal. If you're trying to " -"get a 32-bit integer with low bits set and have previously used an " -"expression such as ``~(1 << 31)``, it's probably clearest to start with all " -"bits set and clear the desired upper bits. For example, to clear just the " -"top bit (bit 31), you could write ``0xffffffffL &~(1L<<31)``." -msgstr "" -"Hay algunas formas de arreglar esta advertencia. Si realmente necesitas un " -"número positivo, simplemente añade una ``L`` al final del literal. Si está " -"tratando de obtener un entero de 32 bits con los bits inferiores " -"establecidos y ha utilizado previamente una expresión como ``~(1 << 31)``, " -"probablemente sea más claro comenzar con todos los bits establecidos y " -"borrar los bits superiores deseados. Por ejemplo, para borrar sólo el bit " -"superior (el 31), podrías escribir ``0xffffffL &~(1L<<31)``." - -#: ../Doc/whatsnew/2.3.rst:2050 -msgid "You can no longer disable assertions by assigning to ``__debug__``." -msgstr "" -"Ya no se pueden desactivar las aserciones asignándolas a ``__debug__``." - -#: ../Doc/whatsnew/2.3.rst:2052 -msgid "" -"The Distutils :func:`setup` function has gained various new keyword " -"arguments such as *depends*. Old versions of the Distutils will abort if " -"passed unknown keywords. A solution is to check for the presence of the " -"new :func:`get_distutil_options` function in your :file:`setup.py` and only " -"uses the new keywords with a version of the Distutils that supports them::" -msgstr "" -"La función Distutils :func:`setup` ha ganado varios argumentos de palabra " -"clave nuevos como *depends*. Las versiones antiguas de Distutils abortan si " -"se les pasan palabras clave desconocidas. Una solución es comprobar la " -"presencia de la nueva función :func:`get_distutil_options` en su :file:" -"`setup.py` y sólo utilizar las nuevas palabras clave con una versión de las " -"Distutils que las soporte::" - -#: ../Doc/whatsnew/2.3.rst:2065 -msgid "" -"Using ``None`` as a variable name will now result in a :exc:`SyntaxWarning` " -"warning." -msgstr "" -"El uso de ``None`` como nombre de variable ahora resultará en una " -"advertencia :exc:`SyntaxWarning`." - -#: ../Doc/whatsnew/2.3.rst:2068 -msgid "" -"Names of extension types defined by the modules included with Python now " -"contain the module and a ``'.'`` in front of the type name." -msgstr "" -"Los nombres de los tipos de extensión definidos por los módulos incluidos en " -"Python contienen ahora el módulo y un ``.'`` delante del nombre del tipo." - -#: ../Doc/whatsnew/2.3.rst:2077 -msgid "Acknowledgements" -msgstr "Agradecimientos" - -#: ../Doc/whatsnew/2.3.rst:2079 -msgid "" -"The author would like to thank the following people for offering " -"suggestions, corrections and assistance with various drafts of this article: " -"Jeff Bauer, Simon Brunning, Brett Cannon, Michael Chermside, Andrew Dalke, " -"Scott David Daniels, Fred L. Drake, Jr., David Fraser, Kelly Gerber, " -"Raymond Hettinger, Michael Hudson, Chris Lambert, Detlef Lannert, Martin von " -"Löwis, Andrew MacIntyre, Lalo Martins, Chad Netzer, Gustavo Niemeyer, Neal " -"Norwitz, Hans Nowak, Chris Reedy, Francesco Ricciardi, Vinay Sajip, Neil " -"Schemenauer, Roman Suzi, Jason Tishler, Just van Rossum." -msgstr "" -"El autor desea agradecer a las siguientes personas por ofrecer sugerencias, " -"correcciones y asistencia en los diversos borradores de este artículo:Jeff " -"Bauer, Simon Brunning, Brett Cannon, Michael Chermside, Andrew Dalke, Scott " -"David Daniels, Fred L. Drake, Jr, David Fraser, Kelly Gerber, Raymond " -"Hettinger, Michael Hudson, Chris Lambert, Detlef Lannert, Martin von Löwis, " -"Andrew MacIntyre, Lalo Martins, Chad Netzer, Gustavo Niemeyer, Neal Norwitz, " -"Hans Nowak, Chris Reedy, Francesco Ricciardi, Vinay Sajip, Neil Schemenauer, " -"Roman Suzi, Jason Tishler, Just van Rossum." +"El entorno de desarrollo integrado IDLE ha sido actualizado utilizando el " +"código del proyecto IDLEfork (http://idlefork.sourceforge.net). La " +"característica más notable es que el código que se está desarrollando se " +"ejecuta ahora en un subproceso, lo que significa que ya no es necesario " +"realizar operaciones manuales de ``reload()``. El código central de IDLE ha " +"sido incorporado a la biblioteca estándar como el paquete :mod:`idlelib`."
django-cms__django-filer-1383
Field verbose_name should use gettext_lazy Hi, model field verbose_names should use gettext_lazy, because it creates migrations based on user language settings. https://github.com/django-cms/django-filer/blob/master/filer/models/foldermodels.py#L9 This is migration generated after upgrade to django-filer 3.0 ![image](https://github.com/django-cms/django-filer/assets/10236315/f3cdd97e-dcfe-449b-a928-a574976417da) Thanks.
[]
[]
diff --git a/MANIFEST.in b/MANIFEST.in index 046cfc193..5748b57fe 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,5 @@ include LICENSE include README.rst -exclude mptt/__init__.py recursive-include filer/locale * recursive-include filer/static * recursive-include filer/templates * diff --git a/filer/static/filer/css/maps/admin_filer.cms.icons.css.map b/filer/static/filer/css/maps/admin_filer.cms.icons.css.map new file mode 100644 index 000000000..2f69314ec --- /dev/null +++ b/filer/static/filer/css/maps/admin_filer.cms.icons.css.map @@ -0,0 +1 @@ +{"version":3,"sources":["components/_iconography.scss"],"names":[],"mappings":"AAIA,WACI,mCAAA,CACA,qDAAA,CACA,0WAAA,CAKA,kBAAA,CACA,iBAAA,CAGJ,YACI,oBAAA,CACA,iCAAA,CACA,iBAAA,CACA,mBAAA,CACA,iCAAA,CAAA,yBAAA,CACA,kCAAA,CACA,iCAAA,CAqDA,8BACI,eAAA,CADJ,8BACI,eAAA,CADJ,iCACI,eAAA,CADJ,4BACI,eAAA,CADJ,0BACI,eAAA,CADJ,wBACI,eAAA,CADJ,kCACI,eAAA,CADJ,2BACI,eAAA,CADJ,oCACI,eAAA,CADJ,0BACI,eAAA,CADJ,4BACI,eAAA,CADJ,2BACI,eAAA,CADJ,0BACI,eAAA","file":"../admin_filer.cms.icons.css","sourcesContent":["//######################################################################################################################\n// #ICONOGRAPHY#\n\n// default font file generated by gulp\n@font-face {\n font-family: \"django-filer-iconfont\";\n src: url(\"../fonts/django-filer-iconfont.eot?v=3.2.0\");\n src: url(\"../fonts/django-filer-iconfont.eot?v=3.2.0#iefix\") format(\"eot\"),\n url(\"../fonts/django-filer-iconfont.woff2?v=3.2.0\") format(\"woff2\"),\n url(\"../fonts/django-filer-iconfont.woff?v=3.2.0\") format(\"woff\"),\n url(\"../fonts/django-filer-iconfont.ttf?v=3.2.0\") format(\"truetype\"),\n url(\"../fonts/django-filer-iconfont.svg?v=3.2.0#django-filer-iconfont\") format(\"svg\");\n font-weight: normal;\n font-style: normal;\n}\n\n%icon {\n display: inline-block;\n font-family: django-filer-iconfont;\n font-size: inherit;\n text-rendering: auto;\n transform: translate(0, 0);\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n\n@function icon-char($filename) {\n $char: \"\";\n\n @if $filename == arrow-down {\n $char: \"E001\";\n }\n @if $filename == caret-down {\n $char: \"E002\";\n }\n @if $filename == chevron-right {\n $char: \"E003\";\n }\n @if $filename == download {\n $char: \"E004\";\n }\n @if $filename == expand {\n $char: \"E005\";\n }\n @if $filename == link {\n $char: \"E006\";\n }\n @if $filename == move-to-folder {\n $char: \"E007\";\n }\n @if $filename == picture {\n $char: \"E008\";\n }\n @if $filename == remove-selection {\n $char: \"E009\";\n }\n @if $filename == select {\n $char: \"E00A\";\n }\n @if $filename == th-large {\n $char: \"E00B\";\n }\n @if $filename == th-list {\n $char: \"E00C\";\n }\n @if $filename == upload {\n $char: \"E00D\";\n }\n\n @return $char;\n}\n\n.filer-icon {\n @extend %icon;\n}\n@mixin icon($filename, $insert: before) {\n &:#{$insert} {\n content: #{\"\\\"\\\\\"}#{icon-char($filename) + \"\\\"\"};\n }\n}\n\n// #####################################################################################################################\n// #ICONS:start#\n// use unicode characters for accessibility reasons and use aria-hidden=\"true\" for decorative icons\n// DOCS: http://filamentgroup.com/lab/bulletproof_icon_fonts.html\n\n.filer-icon-arrow-down {\n @include icon(arrow-down);\n}\n\n.filer-icon-caret-down {\n @include icon(caret-down);\n}\n\n.filer-icon-chevron-right {\n @include icon(chevron-right);\n}\n\n.filer-icon-download {\n @include icon(download);\n}\n\n.filer-icon-expand {\n @include icon(expand);\n}\n\n.filer-icon-link {\n @include icon(link);\n}\n\n.filer-icon-move-to-folder {\n @include icon(move-to-folder);\n}\n\n.filer-icon-picture {\n @include icon(picture);\n}\n\n.filer-icon-remove-selection {\n @include icon(remove-selection);\n}\n\n.filer-icon-select {\n @include icon(select);\n}\n\n.filer-icon-th-large {\n @include icon(th-large);\n}\n\n.filer-icon-th-list {\n @include icon(th-list);\n}\n\n.filer-icon-upload {\n @include icon(upload);\n}\n"]} \ No newline at end of file diff --git a/filer/static/filer/css/maps/admin_filer.fa.icons.css.map b/filer/static/filer/css/maps/admin_filer.fa.icons.css.map new file mode 100644 index 000000000..9f7fc4584 --- /dev/null +++ b/filer/static/filer/css/maps/admin_filer.fa.icons.css.map @@ -0,0 +1 @@ +{"version":3,"sources":["admin_filer.fa.icons.css","libs/_font-awesome.min.scss"],"names":[],"mappings":"AAAA;;;ECAA,CAGG,WAAA,yBAAA,CAAA,mDAAA,CAAA,4WAAA,CAAA,kBAAA,CAAA,iBAAA,CAAA,IAAA,oBAAA,CAAA,4CAAA,CAAA,iBAAA,CAAA,mBAAA,CAAA,kCAAA,CAAA,iCAAA,CAAA,OAAA,sBAAA,CAAA,iBAAA,CAAA,mBAAA,CAAA,OAAA,aAAA,CAAA,OAAA,aAAA,CAAA,OAAA,aAAA,CAAA,OAAA,aAAA,CAAA,OAAA,kBAAA,CAAA,iBAAA,CAAA,OAAA,cAAA,CAAA,wBAAA,CAAA,oBAAA,CAAA,UAAA,iBAAA,CAAA,OAAA,iBAAA,CAAA,kBAAA,CAAA,kBAAA,CAAA,eAAA,CAAA,iBAAA,CAAA,aAAA,kBAAA,CAAA,WAAA,wBAAA,CAAA,uBAAA,CAAA,kBAAA,CAAA,cAAA,UAAA,CAAA,eAAA,WAAA,CAAA,iBAAA,iBAAA,CAAA,kBAAA,gBAAA,CAAA,YAAA,WAAA,CAAA,WAAA,UAAA,CAAA,cAAA,iBAAA,CAAA,eAAA,gBAAA,CAAA,SAAA,4CAAA,CAAA,oCAAA,CAAA,UAAA,8CAAA,CAAA,sCAAA,CAAA,2BAAA,GAAA,8BAAA,CAAA,sBAAA,CAAA,KAAA,gCAAA,CAAA,wBAAA,CAAA,CAAA,mBAAA,GAAA,8BAAA,CAAA,sBAAA,CAAA,KAAA,gCAAA,CAAA,wBAAA,CAAA,CAAA,cAAA,+DAAA,CAAA,+BAAA,CAAA,uBAAA,CAAA,eAAA,+DAAA,CAAA,gCAAA,CAAA,wBAAA,CAAA,eAAA,+DAAA,CAAA,gCAAA,CAAA,wBAAA,CAAA,oBAAA,yEAAA,CAAA,8BAAA,CAAA,sBAAA,CAAA,kBAAA,yEAAA,CAAA,8BAAA,CAAA,sBAAA,CAAA,gHAAA,mBAAA,CAAA,WAAA,CAAA,UAAA,iBAAA,CAAA,oBAAA,CAAA,SAAA,CAAA,UAAA,CAAA,eAAA,CAAA,qBAAA,CAAA,0BAAA,iBAAA,CAAA,MAAA,CAAA,UAAA,CAAA,iBAAA,CAAA,aAAA,mBAAA,CAAA,aAAA,aAAA,CAAA,YAAA,UAAA,CAAA,iBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,cAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,oDAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,+BAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,+BAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,0CAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,eAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,qCAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,uDAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,2CAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,eAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,yCAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,8BAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,eAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,mDAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,4CAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,eAAA,WAAA,CAAA,iCAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,0CAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,+BAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,8BAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,kCAAA,WAAA,CAAA,iCAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,mCAAA,WAAA,CAAA,mCAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,oCAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,sDAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,8BAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,oCAAA,WAAA,CAAA,0CAAA,WAAA,CAAA,uCAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,uCAAA,WAAA,CAAA,kCAAA,WAAA,CAAA,2CAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,iCAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,sCAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,8BAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,0CAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,uCAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,+CAAA,WAAA,CAAA,4EAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,0CAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,+BAAA,WAAA,CAAA,gCAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,+BAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,gCAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,sDAAA,WAAA,CAAA,kDAAA,WAAA,CAAA,wDAAA,WAAA,CAAA,+BAAA,WAAA,CAAA,eAAA,WAAA,CAAA,iCAAA,WAAA,CAAA,gCAAA,WAAA,CAAA,4DAAA,WAAA,CAAA,kDAAA,WAAA,CAAA,8BAAA,WAAA,CAAA,kCAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,eAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,sCAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,eAAA,WAAA,CAAA,cAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,gCAAA,WAAA,CAAA,+BAAA,WAAA,CAAA,sDAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,uCAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,6DAAA,WAAA,CAAA,kDAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,8BAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,eAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,eAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,qCAAA,WAAA,CAAA,+BAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,0EAAA,WAAA,CAAA,gDAAA,WAAA,CAAA,gDAAA,WAAA,CAAA,gDAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,wGAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,+BAAA,WAAA,CAAA,gCAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,eAAA,WAAA,CAAA,2EAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,cAAA,WAAA,CAAA,oCAAA,WAAA,CAAA,uCAAA,WAAA,CAAA,2CAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,6CAAA,WAAA,CAAA,eAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,cAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,eAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,cAAA,WAAA,CAAA,mDAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,gBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,2CAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,gCAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,sCAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,6CAAA,WAAA,CAAA,uDAAA,WAAA,CAAA,6CAAA,WAAA,CAAA,gDAAA,WAAA,CAAA,8CAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,kDAAA,WAAA,CAAA,iDAAA,WAAA,CAAA,gDAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,8CAAA,WAAA,CAAA,+CAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,0BAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,cAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,yBAAA,WAAA,CAAA,gCAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,uBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,6BAAA,WAAA,CAAA,oCAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,kBAAA,WAAA,CAAA,2BAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,4BAAA,WAAA,CAAA,oBAAA,WAAA,CAAA,mBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,eAAA,WAAA,CAAA,sBAAA,WAAA,CAAA,wBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,iBAAA,WAAA,CAAA,qBAAA,WAAA,CAAA,qBAAA,WAAA","file":"../admin_filer.fa.icons.css","sourcesContent":["/*!\n * Font Awesome 4.4.0 by @davegandy - http://fontawesome.io - @fontawesome\n * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)\n */@font-face{font-family:\"FontAwesome\";src:url(\"../fonts/fontawesome-webfont.eot?v=4.4.0\");src:url(\"../fonts/fontawesome-webfont.eot?#iefix&v=4.4.0\") format(\"embedded-opentype\"),url(\"../fonts/fontawesome-webfont.woff2?v=4.4.0\") format(\"woff2\"),url(\"../fonts/fontawesome-webfont.woff?v=4.4.0\") format(\"woff\"),url(\"../fonts/fontawesome-webfont.ttf?v=4.4.0\") format(\"truetype\"),url(\"../fonts/fontawesome-webfont.svg?v=4.4.0#fontawesomeregular\") format(\"svg\");font-weight:normal;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:\"\"}.fa-music:before{content:\"\"}.fa-search:before{content:\"\"}.fa-envelope-o:before{content:\"\"}.fa-heart:before{content:\"\"}.fa-star:before{content:\"\"}.fa-star-o:before{content:\"\"}.fa-user:before{content:\"\"}.fa-film:before{content:\"\"}.fa-th-large:before{content:\"\"}.fa-th:before{content:\"\"}.fa-th-list:before{content:\"\"}.fa-check:before{content:\"\"}.fa-remove:before,.fa-close:before,.fa-times:before{content:\"\"}.fa-search-plus:before{content:\"\"}.fa-search-minus:before{content:\"\"}.fa-power-off:before{content:\"\"}.fa-signal:before{content:\"\"}.fa-gear:before,.fa-cog:before{content:\"\"}.fa-trash-o:before{content:\"\"}.fa-home:before{content:\"\"}.fa-file-o:before{content:\"\"}.fa-clock-o:before{content:\"\"}.fa-road:before{content:\"\"}.fa-download:before{content:\"\"}.fa-arrow-circle-o-down:before{content:\"\"}.fa-arrow-circle-o-up:before{content:\"\"}.fa-inbox:before{content:\"\"}.fa-play-circle-o:before{content:\"\"}.fa-rotate-right:before,.fa-repeat:before{content:\"\"}.fa-refresh:before{content:\"\"}.fa-list-alt:before{content:\"\"}.fa-lock:before{content:\"\"}.fa-flag:before{content:\"\"}.fa-headphones:before{content:\"\"}.fa-volume-off:before{content:\"\"}.fa-volume-down:before{content:\"\"}.fa-volume-up:before{content:\"\"}.fa-qrcode:before{content:\"\"}.fa-barcode:before{content:\"\"}.fa-tag:before{content:\"\"}.fa-tags:before{content:\"\"}.fa-book:before{content:\"\"}.fa-bookmark:before{content:\"\"}.fa-print:before{content:\"\"}.fa-camera:before{content:\"\"}.fa-font:before{content:\"\"}.fa-bold:before{content:\"\"}.fa-italic:before{content:\"\"}.fa-text-height:before{content:\"\"}.fa-text-width:before{content:\"\"}.fa-align-left:before{content:\"\"}.fa-align-center:before{content:\"\"}.fa-align-right:before{content:\"\"}.fa-align-justify:before{content:\"\"}.fa-list:before{content:\"\"}.fa-dedent:before,.fa-outdent:before{content:\"\"}.fa-indent:before{content:\"\"}.fa-video-camera:before{content:\"\"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:\"\"}.fa-pencil:before{content:\"\"}.fa-map-marker:before{content:\"\"}.fa-adjust:before{content:\"\"}.fa-tint:before{content:\"\"}.fa-edit:before,.fa-pencil-square-o:before{content:\"\"}.fa-share-square-o:before{content:\"\"}.fa-check-square-o:before{content:\"\"}.fa-arrows:before{content:\"\"}.fa-step-backward:before{content:\"\"}.fa-fast-backward:before{content:\"\"}.fa-backward:before{content:\"\"}.fa-play:before{content:\"\"}.fa-pause:before{content:\"\"}.fa-stop:before{content:\"\"}.fa-forward:before{content:\"\"}.fa-fast-forward:before{content:\"\"}.fa-step-forward:before{content:\"\"}.fa-eject:before{content:\"\"}.fa-chevron-left:before{content:\"\"}.fa-chevron-right:before{content:\"\"}.fa-plus-circle:before{content:\"\"}.fa-minus-circle:before{content:\"\"}.fa-times-circle:before{content:\"\"}.fa-check-circle:before{content:\"\"}.fa-question-circle:before{content:\"\"}.fa-info-circle:before{content:\"\"}.fa-crosshairs:before{content:\"\"}.fa-times-circle-o:before{content:\"\"}.fa-check-circle-o:before{content:\"\"}.fa-ban:before{content:\"\"}.fa-arrow-left:before{content:\"\"}.fa-arrow-right:before{content:\"\"}.fa-arrow-up:before{content:\"\"}.fa-arrow-down:before{content:\"\"}.fa-mail-forward:before,.fa-share:before{content:\"\"}.fa-expand:before{content:\"\"}.fa-compress:before{content:\"\"}.fa-plus:before{content:\"\"}.fa-minus:before{content:\"\"}.fa-asterisk:before{content:\"\"}.fa-exclamation-circle:before{content:\"\"}.fa-gift:before{content:\"\"}.fa-leaf:before{content:\"\"}.fa-fire:before{content:\"\"}.fa-eye:before{content:\"\"}.fa-eye-slash:before{content:\"\"}.fa-warning:before,.fa-exclamation-triangle:before{content:\"\"}.fa-plane:before{content:\"\"}.fa-calendar:before{content:\"\"}.fa-random:before{content:\"\"}.fa-comment:before{content:\"\"}.fa-magnet:before{content:\"\"}.fa-chevron-up:before{content:\"\"}.fa-chevron-down:before{content:\"\"}.fa-retweet:before{content:\"\"}.fa-shopping-cart:before{content:\"\"}.fa-folder:before{content:\"\"}.fa-folder-open:before{content:\"\"}.fa-arrows-v:before{content:\"\"}.fa-arrows-h:before{content:\"\"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:\"\"}.fa-twitter-square:before{content:\"\"}.fa-facebook-square:before{content:\"\"}.fa-camera-retro:before{content:\"\"}.fa-key:before{content:\"\"}.fa-gears:before,.fa-cogs:before{content:\"\"}.fa-comments:before{content:\"\"}.fa-thumbs-o-up:before{content:\"\"}.fa-thumbs-o-down:before{content:\"\"}.fa-star-half:before{content:\"\"}.fa-heart-o:before{content:\"\"}.fa-sign-out:before{content:\"\"}.fa-linkedin-square:before{content:\"\"}.fa-thumb-tack:before{content:\"\"}.fa-external-link:before{content:\"\"}.fa-sign-in:before{content:\"\"}.fa-trophy:before{content:\"\"}.fa-github-square:before{content:\"\"}.fa-upload:before{content:\"\"}.fa-lemon-o:before{content:\"\"}.fa-phone:before{content:\"\"}.fa-square-o:before{content:\"\"}.fa-bookmark-o:before{content:\"\"}.fa-phone-square:before{content:\"\"}.fa-twitter:before{content:\"\"}.fa-facebook-f:before,.fa-facebook:before{content:\"\"}.fa-github:before{content:\"\"}.fa-unlock:before{content:\"\"}.fa-credit-card:before{content:\"\"}.fa-feed:before,.fa-rss:before{content:\"\"}.fa-hdd-o:before{content:\"\"}.fa-bullhorn:before{content:\"\"}.fa-bell:before{content:\"\"}.fa-certificate:before{content:\"\"}.fa-hand-o-right:before{content:\"\"}.fa-hand-o-left:before{content:\"\"}.fa-hand-o-up:before{content:\"\"}.fa-hand-o-down:before{content:\"\"}.fa-arrow-circle-left:before{content:\"\"}.fa-arrow-circle-right:before{content:\"\"}.fa-arrow-circle-up:before{content:\"\"}.fa-arrow-circle-down:before{content:\"\"}.fa-globe:before{content:\"\"}.fa-wrench:before{content:\"\"}.fa-tasks:before{content:\"\"}.fa-filter:before{content:\"\"}.fa-briefcase:before{content:\"\"}.fa-arrows-alt:before{content:\"\"}.fa-group:before,.fa-users:before{content:\"\"}.fa-chain:before,.fa-link:before{content:\"\"}.fa-cloud:before{content:\"\"}.fa-flask:before{content:\"\"}.fa-cut:before,.fa-scissors:before{content:\"\"}.fa-copy:before,.fa-files-o:before{content:\"\"}.fa-paperclip:before{content:\"\"}.fa-save:before,.fa-floppy-o:before{content:\"\"}.fa-square:before{content:\"\"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:\"\"}.fa-list-ul:before{content:\"\"}.fa-list-ol:before{content:\"\"}.fa-strikethrough:before{content:\"\"}.fa-underline:before{content:\"\"}.fa-table:before{content:\"\"}.fa-magic:before{content:\"\"}.fa-truck:before{content:\"\"}.fa-pinterest:before{content:\"\"}.fa-pinterest-square:before{content:\"\"}.fa-google-plus-square:before{content:\"\"}.fa-google-plus:before{content:\"\"}.fa-money:before{content:\"\"}.fa-caret-down:before{content:\"\"}.fa-caret-up:before{content:\"\"}.fa-caret-left:before{content:\"\"}.fa-caret-right:before{content:\"\"}.fa-columns:before{content:\"\"}.fa-unsorted:before,.fa-sort:before{content:\"\"}.fa-sort-down:before,.fa-sort-desc:before{content:\"\"}.fa-sort-up:before,.fa-sort-asc:before{content:\"\"}.fa-envelope:before{content:\"\"}.fa-linkedin:before{content:\"\"}.fa-rotate-left:before,.fa-undo:before{content:\"\"}.fa-legal:before,.fa-gavel:before{content:\"\"}.fa-dashboard:before,.fa-tachometer:before{content:\"\"}.fa-comment-o:before{content:\"\"}.fa-comments-o:before{content:\"\"}.fa-flash:before,.fa-bolt:before{content:\"\"}.fa-sitemap:before{content:\"\"}.fa-umbrella:before{content:\"\"}.fa-paste:before,.fa-clipboard:before{content:\"\"}.fa-lightbulb-o:before{content:\"\"}.fa-exchange:before{content:\"\"}.fa-cloud-download:before{content:\"\"}.fa-cloud-upload:before{content:\"\"}.fa-user-md:before{content:\"\"}.fa-stethoscope:before{content:\"\"}.fa-suitcase:before{content:\"\"}.fa-bell-o:before{content:\"\"}.fa-coffee:before{content:\"\"}.fa-cutlery:before{content:\"\"}.fa-file-text-o:before{content:\"\"}.fa-building-o:before{content:\"\"}.fa-hospital-o:before{content:\"\"}.fa-ambulance:before{content:\"\"}.fa-medkit:before{content:\"\"}.fa-fighter-jet:before{content:\"\"}.fa-beer:before{content:\"\"}.fa-h-square:before{content:\"\"}.fa-plus-square:before{content:\"\"}.fa-angle-double-left:before{content:\"\"}.fa-angle-double-right:before{content:\"\"}.fa-angle-double-up:before{content:\"\"}.fa-angle-double-down:before{content:\"\"}.fa-angle-left:before{content:\"\"}.fa-angle-right:before{content:\"\"}.fa-angle-up:before{content:\"\"}.fa-angle-down:before{content:\"\"}.fa-desktop:before{content:\"\"}.fa-laptop:before{content:\"\"}.fa-tablet:before{content:\"\"}.fa-mobile-phone:before,.fa-mobile:before{content:\"\"}.fa-circle-o:before{content:\"\"}.fa-quote-left:before{content:\"\"}.fa-quote-right:before{content:\"\"}.fa-spinner:before{content:\"\"}.fa-circle:before{content:\"\"}.fa-mail-reply:before,.fa-reply:before{content:\"\"}.fa-github-alt:before{content:\"\"}.fa-folder-o:before{content:\"\"}.fa-folder-open-o:before{content:\"\"}.fa-smile-o:before{content:\"\"}.fa-frown-o:before{content:\"\"}.fa-meh-o:before{content:\"\"}.fa-gamepad:before{content:\"\"}.fa-keyboard-o:before{content:\"\"}.fa-flag-o:before{content:\"\"}.fa-flag-checkered:before{content:\"\"}.fa-terminal:before{content:\"\"}.fa-code:before{content:\"\"}.fa-mail-reply-all:before,.fa-reply-all:before{content:\"\"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:\"\"}.fa-location-arrow:before{content:\"\"}.fa-crop:before{content:\"\"}.fa-code-fork:before{content:\"\"}.fa-unlink:before,.fa-chain-broken:before{content:\"\"}.fa-question:before{content:\"\"}.fa-info:before{content:\"\"}.fa-exclamation:before{content:\"\"}.fa-superscript:before{content:\"\"}.fa-subscript:before{content:\"\"}.fa-eraser:before{content:\"\"}.fa-puzzle-piece:before{content:\"\"}.fa-microphone:before{content:\"\"}.fa-microphone-slash:before{content:\"\"}.fa-shield:before{content:\"\"}.fa-calendar-o:before{content:\"\"}.fa-fire-extinguisher:before{content:\"\"}.fa-rocket:before{content:\"\"}.fa-maxcdn:before{content:\"\"}.fa-chevron-circle-left:before{content:\"\"}.fa-chevron-circle-right:before{content:\"\"}.fa-chevron-circle-up:before{content:\"\"}.fa-chevron-circle-down:before{content:\"\"}.fa-html5:before{content:\"\"}.fa-css3:before{content:\"\"}.fa-anchor:before{content:\"\"}.fa-unlock-alt:before{content:\"\"}.fa-bullseye:before{content:\"\"}.fa-ellipsis-h:before{content:\"\"}.fa-ellipsis-v:before{content:\"\"}.fa-rss-square:before{content:\"\"}.fa-play-circle:before{content:\"\"}.fa-ticket:before{content:\"\"}.fa-minus-square:before{content:\"\"}.fa-minus-square-o:before{content:\"\"}.fa-level-up:before{content:\"\"}.fa-level-down:before{content:\"\"}.fa-check-square:before{content:\"\"}.fa-pencil-square:before{content:\"\"}.fa-external-link-square:before{content:\"\"}.fa-share-square:before{content:\"\"}.fa-compass:before{content:\"\"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:\"\"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:\"\"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:\"\"}.fa-euro:before,.fa-eur:before{content:\"\"}.fa-gbp:before{content:\"\"}.fa-dollar:before,.fa-usd:before{content:\"\"}.fa-rupee:before,.fa-inr:before{content:\"\"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:\"\"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:\"\"}.fa-won:before,.fa-krw:before{content:\"\"}.fa-bitcoin:before,.fa-btc:before{content:\"\"}.fa-file:before{content:\"\"}.fa-file-text:before{content:\"\"}.fa-sort-alpha-asc:before{content:\"\"}.fa-sort-alpha-desc:before{content:\"\"}.fa-sort-amount-asc:before{content:\"\"}.fa-sort-amount-desc:before{content:\"\"}.fa-sort-numeric-asc:before{content:\"\"}.fa-sort-numeric-desc:before{content:\"\"}.fa-thumbs-up:before{content:\"\"}.fa-thumbs-down:before{content:\"\"}.fa-youtube-square:before{content:\"\"}.fa-youtube:before{content:\"\"}.fa-xing:before{content:\"\"}.fa-xing-square:before{content:\"\"}.fa-youtube-play:before{content:\"\"}.fa-dropbox:before{content:\"\"}.fa-stack-overflow:before{content:\"\"}.fa-instagram:before{content:\"\"}.fa-flickr:before{content:\"\"}.fa-adn:before{content:\"\"}.fa-bitbucket:before{content:\"\"}.fa-bitbucket-square:before{content:\"\"}.fa-tumblr:before{content:\"\"}.fa-tumblr-square:before{content:\"\"}.fa-long-arrow-down:before{content:\"\"}.fa-long-arrow-up:before{content:\"\"}.fa-long-arrow-left:before{content:\"\"}.fa-long-arrow-right:before{content:\"\"}.fa-apple:before{content:\"\"}.fa-windows:before{content:\"\"}.fa-android:before{content:\"\"}.fa-linux:before{content:\"\"}.fa-dribbble:before{content:\"\"}.fa-skype:before{content:\"\"}.fa-foursquare:before{content:\"\"}.fa-trello:before{content:\"\"}.fa-female:before{content:\"\"}.fa-male:before{content:\"\"}.fa-gittip:before,.fa-gratipay:before{content:\"\"}.fa-sun-o:before{content:\"\"}.fa-moon-o:before{content:\"\"}.fa-archive:before{content:\"\"}.fa-bug:before{content:\"\"}.fa-vk:before{content:\"\"}.fa-weibo:before{content:\"\"}.fa-renren:before{content:\"\"}.fa-pagelines:before{content:\"\"}.fa-stack-exchange:before{content:\"\"}.fa-arrow-circle-o-right:before{content:\"\"}.fa-arrow-circle-o-left:before{content:\"\"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:\"\"}.fa-dot-circle-o:before{content:\"\"}.fa-wheelchair:before{content:\"\"}.fa-vimeo-square:before{content:\"\"}.fa-turkish-lira:before,.fa-try:before{content:\"\"}.fa-plus-square-o:before{content:\"\"}.fa-space-shuttle:before{content:\"\"}.fa-slack:before{content:\"\"}.fa-envelope-square:before{content:\"\"}.fa-wordpress:before{content:\"\"}.fa-openid:before{content:\"\"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:\"\"}.fa-mortar-board:before,.fa-graduation-cap:before{content:\"\"}.fa-yahoo:before{content:\"\"}.fa-google:before{content:\"\"}.fa-reddit:before{content:\"\"}.fa-reddit-square:before{content:\"\"}.fa-stumbleupon-circle:before{content:\"\"}.fa-stumbleupon:before{content:\"\"}.fa-delicious:before{content:\"\"}.fa-digg:before{content:\"\"}.fa-pied-piper:before{content:\"\"}.fa-pied-piper-alt:before{content:\"\"}.fa-drupal:before{content:\"\"}.fa-joomla:before{content:\"\"}.fa-language:before{content:\"\"}.fa-fax:before{content:\"\"}.fa-building:before{content:\"\"}.fa-child:before{content:\"\"}.fa-paw:before{content:\"\"}.fa-spoon:before{content:\"\"}.fa-cube:before{content:\"\"}.fa-cubes:before{content:\"\"}.fa-behance:before{content:\"\"}.fa-behance-square:before{content:\"\"}.fa-steam:before{content:\"\"}.fa-steam-square:before{content:\"\"}.fa-recycle:before{content:\"\"}.fa-automobile:before,.fa-car:before{content:\"\"}.fa-cab:before,.fa-taxi:before{content:\"\"}.fa-tree:before{content:\"\"}.fa-spotify:before{content:\"\"}.fa-deviantart:before{content:\"\"}.fa-soundcloud:before{content:\"\"}.fa-database:before{content:\"\"}.fa-file-pdf-o:before{content:\"\"}.fa-file-word-o:before{content:\"\"}.fa-file-excel-o:before{content:\"\"}.fa-file-powerpoint-o:before{content:\"\"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:\"\"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:\"\"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:\"\"}.fa-file-movie-o:before,.fa-file-video-o:before{content:\"\"}.fa-file-code-o:before{content:\"\"}.fa-vine:before{content:\"\"}.fa-codepen:before{content:\"\"}.fa-jsfiddle:before{content:\"\"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:\"\"}.fa-circle-o-notch:before{content:\"\"}.fa-ra:before,.fa-rebel:before{content:\"\"}.fa-ge:before,.fa-empire:before{content:\"\"}.fa-git-square:before{content:\"\"}.fa-git:before{content:\"\"}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:\"\"}.fa-tencent-weibo:before{content:\"\"}.fa-qq:before{content:\"\"}.fa-wechat:before,.fa-weixin:before{content:\"\"}.fa-send:before,.fa-paper-plane:before{content:\"\"}.fa-send-o:before,.fa-paper-plane-o:before{content:\"\"}.fa-history:before{content:\"\"}.fa-circle-thin:before{content:\"\"}.fa-header:before{content:\"\"}.fa-paragraph:before{content:\"\"}.fa-sliders:before{content:\"\"}.fa-share-alt:before{content:\"\"}.fa-share-alt-square:before{content:\"\"}.fa-bomb:before{content:\"\"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:\"\"}.fa-tty:before{content:\"\"}.fa-binoculars:before{content:\"\"}.fa-plug:before{content:\"\"}.fa-slideshare:before{content:\"\"}.fa-twitch:before{content:\"\"}.fa-yelp:before{content:\"\"}.fa-newspaper-o:before{content:\"\"}.fa-wifi:before{content:\"\"}.fa-calculator:before{content:\"\"}.fa-paypal:before{content:\"\"}.fa-google-wallet:before{content:\"\"}.fa-cc-visa:before{content:\"\"}.fa-cc-mastercard:before{content:\"\"}.fa-cc-discover:before{content:\"\"}.fa-cc-amex:before{content:\"\"}.fa-cc-paypal:before{content:\"\"}.fa-cc-stripe:before{content:\"\"}.fa-bell-slash:before{content:\"\"}.fa-bell-slash-o:before{content:\"\"}.fa-trash:before{content:\"\"}.fa-copyright:before{content:\"\"}.fa-at:before{content:\"\"}.fa-eyedropper:before{content:\"\"}.fa-paint-brush:before{content:\"\"}.fa-birthday-cake:before{content:\"\"}.fa-area-chart:before{content:\"\"}.fa-pie-chart:before{content:\"\"}.fa-line-chart:before{content:\"\"}.fa-lastfm:before{content:\"\"}.fa-lastfm-square:before{content:\"\"}.fa-toggle-off:before{content:\"\"}.fa-toggle-on:before{content:\"\"}.fa-bicycle:before{content:\"\"}.fa-bus:before{content:\"\"}.fa-ioxhost:before{content:\"\"}.fa-angellist:before{content:\"\"}.fa-cc:before{content:\"\"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:\"\"}.fa-meanpath:before{content:\"\"}.fa-buysellads:before{content:\"\"}.fa-connectdevelop:before{content:\"\"}.fa-dashcube:before{content:\"\"}.fa-forumbee:before{content:\"\"}.fa-leanpub:before{content:\"\"}.fa-sellsy:before{content:\"\"}.fa-shirtsinbulk:before{content:\"\"}.fa-simplybuilt:before{content:\"\"}.fa-skyatlas:before{content:\"\"}.fa-cart-plus:before{content:\"\"}.fa-cart-arrow-down:before{content:\"\"}.fa-diamond:before{content:\"\"}.fa-ship:before{content:\"\"}.fa-user-secret:before{content:\"\"}.fa-motorcycle:before{content:\"\"}.fa-street-view:before{content:\"\"}.fa-heartbeat:before{content:\"\"}.fa-venus:before{content:\"\"}.fa-mars:before{content:\"\"}.fa-mercury:before{content:\"\"}.fa-intersex:before,.fa-transgender:before{content:\"\"}.fa-transgender-alt:before{content:\"\"}.fa-venus-double:before{content:\"\"}.fa-mars-double:before{content:\"\"}.fa-venus-mars:before{content:\"\"}.fa-mars-stroke:before{content:\"\"}.fa-mars-stroke-v:before{content:\"\"}.fa-mars-stroke-h:before{content:\"\"}.fa-neuter:before{content:\"\"}.fa-genderless:before{content:\"\"}.fa-facebook-official:before{content:\"\"}.fa-pinterest-p:before{content:\"\"}.fa-whatsapp:before{content:\"\"}.fa-server:before{content:\"\"}.fa-user-plus:before{content:\"\"}.fa-user-times:before{content:\"\"}.fa-hotel:before,.fa-bed:before{content:\"\"}.fa-viacoin:before{content:\"\"}.fa-train:before{content:\"\"}.fa-subway:before{content:\"\"}.fa-medium:before{content:\"\"}.fa-yc:before,.fa-y-combinator:before{content:\"\"}.fa-optin-monster:before{content:\"\"}.fa-opencart:before{content:\"\"}.fa-expeditedssl:before{content:\"\"}.fa-battery-4:before,.fa-battery-full:before{content:\"\"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:\"\"}.fa-battery-2:before,.fa-battery-half:before{content:\"\"}.fa-battery-1:before,.fa-battery-quarter:before{content:\"\"}.fa-battery-0:before,.fa-battery-empty:before{content:\"\"}.fa-mouse-pointer:before{content:\"\"}.fa-i-cursor:before{content:\"\"}.fa-object-group:before{content:\"\"}.fa-object-ungroup:before{content:\"\"}.fa-sticky-note:before{content:\"\"}.fa-sticky-note-o:before{content:\"\"}.fa-cc-jcb:before{content:\"\"}.fa-cc-diners-club:before{content:\"\"}.fa-clone:before{content:\"\"}.fa-balance-scale:before{content:\"\"}.fa-hourglass-o:before{content:\"\"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:\"\"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:\"\"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:\"\"}.fa-hourglass:before{content:\"\"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:\"\"}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:\"\"}.fa-hand-scissors-o:before{content:\"\"}.fa-hand-lizard-o:before{content:\"\"}.fa-hand-spock-o:before{content:\"\"}.fa-hand-pointer-o:before{content:\"\"}.fa-hand-peace-o:before{content:\"\"}.fa-trademark:before{content:\"\"}.fa-registered:before{content:\"\"}.fa-creative-commons:before{content:\"\"}.fa-gg:before{content:\"\"}.fa-gg-circle:before{content:\"\"}.fa-tripadvisor:before{content:\"\"}.fa-odnoklassniki:before{content:\"\"}.fa-odnoklassniki-square:before{content:\"\"}.fa-get-pocket:before{content:\"\"}.fa-wikipedia-w:before{content:\"\"}.fa-safari:before{content:\"\"}.fa-chrome:before{content:\"\"}.fa-firefox:before{content:\"\"}.fa-opera:before{content:\"\"}.fa-internet-explorer:before{content:\"\"}.fa-tv:before,.fa-television:before{content:\"\"}.fa-contao:before{content:\"\"}.fa-500px:before{content:\"\"}.fa-amazon:before{content:\"\"}.fa-calendar-plus-o:before{content:\"\"}.fa-calendar-minus-o:before{content:\"\"}.fa-calendar-times-o:before{content:\"\"}.fa-calendar-check-o:before{content:\"\"}.fa-industry:before{content:\"\"}.fa-map-pin:before{content:\"\"}.fa-map-signs:before{content:\"\"}.fa-map-o:before{content:\"\"}.fa-map:before{content:\"\"}.fa-commenting:before{content:\"\"}.fa-commenting-o:before{content:\"\"}.fa-houzz:before{content:\"\"}.fa-vimeo:before{content:\"\"}.fa-black-tie:before{content:\"\"}.fa-fonticons:before{content:\"\"}","/*!\n * Font Awesome 4.4.0 by @davegandy - http://fontawesome.io - @fontawesome\n * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)\n */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.4.0');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.4.0') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff2?v=4.4.0') format('woff2'),url('../fonts/fontawesome-webfont.woff?v=4.4.0') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.4.0') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.4.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:\"\\f000\"}.fa-music:before{content:\"\\f001\"}.fa-search:before{content:\"\\f002\"}.fa-envelope-o:before{content:\"\\f003\"}.fa-heart:before{content:\"\\f004\"}.fa-star:before{content:\"\\f005\"}.fa-star-o:before{content:\"\\f006\"}.fa-user:before{content:\"\\f007\"}.fa-film:before{content:\"\\f008\"}.fa-th-large:before{content:\"\\f009\"}.fa-th:before{content:\"\\f00a\"}.fa-th-list:before{content:\"\\f00b\"}.fa-check:before{content:\"\\f00c\"}.fa-remove:before,.fa-close:before,.fa-times:before{content:\"\\f00d\"}.fa-search-plus:before{content:\"\\f00e\"}.fa-search-minus:before{content:\"\\f010\"}.fa-power-off:before{content:\"\\f011\"}.fa-signal:before{content:\"\\f012\"}.fa-gear:before,.fa-cog:before{content:\"\\f013\"}.fa-trash-o:before{content:\"\\f014\"}.fa-home:before{content:\"\\f015\"}.fa-file-o:before{content:\"\\f016\"}.fa-clock-o:before{content:\"\\f017\"}.fa-road:before{content:\"\\f018\"}.fa-download:before{content:\"\\f019\"}.fa-arrow-circle-o-down:before{content:\"\\f01a\"}.fa-arrow-circle-o-up:before{content:\"\\f01b\"}.fa-inbox:before{content:\"\\f01c\"}.fa-play-circle-o:before{content:\"\\f01d\"}.fa-rotate-right:before,.fa-repeat:before{content:\"\\f01e\"}.fa-refresh:before{content:\"\\f021\"}.fa-list-alt:before{content:\"\\f022\"}.fa-lock:before{content:\"\\f023\"}.fa-flag:before{content:\"\\f024\"}.fa-headphones:before{content:\"\\f025\"}.fa-volume-off:before{content:\"\\f026\"}.fa-volume-down:before{content:\"\\f027\"}.fa-volume-up:before{content:\"\\f028\"}.fa-qrcode:before{content:\"\\f029\"}.fa-barcode:before{content:\"\\f02a\"}.fa-tag:before{content:\"\\f02b\"}.fa-tags:before{content:\"\\f02c\"}.fa-book:before{content:\"\\f02d\"}.fa-bookmark:before{content:\"\\f02e\"}.fa-print:before{content:\"\\f02f\"}.fa-camera:before{content:\"\\f030\"}.fa-font:before{content:\"\\f031\"}.fa-bold:before{content:\"\\f032\"}.fa-italic:before{content:\"\\f033\"}.fa-text-height:before{content:\"\\f034\"}.fa-text-width:before{content:\"\\f035\"}.fa-align-left:before{content:\"\\f036\"}.fa-align-center:before{content:\"\\f037\"}.fa-align-right:before{content:\"\\f038\"}.fa-align-justify:before{content:\"\\f039\"}.fa-list:before{content:\"\\f03a\"}.fa-dedent:before,.fa-outdent:before{content:\"\\f03b\"}.fa-indent:before{content:\"\\f03c\"}.fa-video-camera:before{content:\"\\f03d\"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:\"\\f03e\"}.fa-pencil:before{content:\"\\f040\"}.fa-map-marker:before{content:\"\\f041\"}.fa-adjust:before{content:\"\\f042\"}.fa-tint:before{content:\"\\f043\"}.fa-edit:before,.fa-pencil-square-o:before{content:\"\\f044\"}.fa-share-square-o:before{content:\"\\f045\"}.fa-check-square-o:before{content:\"\\f046\"}.fa-arrows:before{content:\"\\f047\"}.fa-step-backward:before{content:\"\\f048\"}.fa-fast-backward:before{content:\"\\f049\"}.fa-backward:before{content:\"\\f04a\"}.fa-play:before{content:\"\\f04b\"}.fa-pause:before{content:\"\\f04c\"}.fa-stop:before{content:\"\\f04d\"}.fa-forward:before{content:\"\\f04e\"}.fa-fast-forward:before{content:\"\\f050\"}.fa-step-forward:before{content:\"\\f051\"}.fa-eject:before{content:\"\\f052\"}.fa-chevron-left:before{content:\"\\f053\"}.fa-chevron-right:before{content:\"\\f054\"}.fa-plus-circle:before{content:\"\\f055\"}.fa-minus-circle:before{content:\"\\f056\"}.fa-times-circle:before{content:\"\\f057\"}.fa-check-circle:before{content:\"\\f058\"}.fa-question-circle:before{content:\"\\f059\"}.fa-info-circle:before{content:\"\\f05a\"}.fa-crosshairs:before{content:\"\\f05b\"}.fa-times-circle-o:before{content:\"\\f05c\"}.fa-check-circle-o:before{content:\"\\f05d\"}.fa-ban:before{content:\"\\f05e\"}.fa-arrow-left:before{content:\"\\f060\"}.fa-arrow-right:before{content:\"\\f061\"}.fa-arrow-up:before{content:\"\\f062\"}.fa-arrow-down:before{content:\"\\f063\"}.fa-mail-forward:before,.fa-share:before{content:\"\\f064\"}.fa-expand:before{content:\"\\f065\"}.fa-compress:before{content:\"\\f066\"}.fa-plus:before{content:\"\\f067\"}.fa-minus:before{content:\"\\f068\"}.fa-asterisk:before{content:\"\\f069\"}.fa-exclamation-circle:before{content:\"\\f06a\"}.fa-gift:before{content:\"\\f06b\"}.fa-leaf:before{content:\"\\f06c\"}.fa-fire:before{content:\"\\f06d\"}.fa-eye:before{content:\"\\f06e\"}.fa-eye-slash:before{content:\"\\f070\"}.fa-warning:before,.fa-exclamation-triangle:before{content:\"\\f071\"}.fa-plane:before{content:\"\\f072\"}.fa-calendar:before{content:\"\\f073\"}.fa-random:before{content:\"\\f074\"}.fa-comment:before{content:\"\\f075\"}.fa-magnet:before{content:\"\\f076\"}.fa-chevron-up:before{content:\"\\f077\"}.fa-chevron-down:before{content:\"\\f078\"}.fa-retweet:before{content:\"\\f079\"}.fa-shopping-cart:before{content:\"\\f07a\"}.fa-folder:before{content:\"\\f07b\"}.fa-folder-open:before{content:\"\\f07c\"}.fa-arrows-v:before{content:\"\\f07d\"}.fa-arrows-h:before{content:\"\\f07e\"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:\"\\f080\"}.fa-twitter-square:before{content:\"\\f081\"}.fa-facebook-square:before{content:\"\\f082\"}.fa-camera-retro:before{content:\"\\f083\"}.fa-key:before{content:\"\\f084\"}.fa-gears:before,.fa-cogs:before{content:\"\\f085\"}.fa-comments:before{content:\"\\f086\"}.fa-thumbs-o-up:before{content:\"\\f087\"}.fa-thumbs-o-down:before{content:\"\\f088\"}.fa-star-half:before{content:\"\\f089\"}.fa-heart-o:before{content:\"\\f08a\"}.fa-sign-out:before{content:\"\\f08b\"}.fa-linkedin-square:before{content:\"\\f08c\"}.fa-thumb-tack:before{content:\"\\f08d\"}.fa-external-link:before{content:\"\\f08e\"}.fa-sign-in:before{content:\"\\f090\"}.fa-trophy:before{content:\"\\f091\"}.fa-github-square:before{content:\"\\f092\"}.fa-upload:before{content:\"\\f093\"}.fa-lemon-o:before{content:\"\\f094\"}.fa-phone:before{content:\"\\f095\"}.fa-square-o:before{content:\"\\f096\"}.fa-bookmark-o:before{content:\"\\f097\"}.fa-phone-square:before{content:\"\\f098\"}.fa-twitter:before{content:\"\\f099\"}.fa-facebook-f:before,.fa-facebook:before{content:\"\\f09a\"}.fa-github:before{content:\"\\f09b\"}.fa-unlock:before{content:\"\\f09c\"}.fa-credit-card:before{content:\"\\f09d\"}.fa-feed:before,.fa-rss:before{content:\"\\f09e\"}.fa-hdd-o:before{content:\"\\f0a0\"}.fa-bullhorn:before{content:\"\\f0a1\"}.fa-bell:before{content:\"\\f0f3\"}.fa-certificate:before{content:\"\\f0a3\"}.fa-hand-o-right:before{content:\"\\f0a4\"}.fa-hand-o-left:before{content:\"\\f0a5\"}.fa-hand-o-up:before{content:\"\\f0a6\"}.fa-hand-o-down:before{content:\"\\f0a7\"}.fa-arrow-circle-left:before{content:\"\\f0a8\"}.fa-arrow-circle-right:before{content:\"\\f0a9\"}.fa-arrow-circle-up:before{content:\"\\f0aa\"}.fa-arrow-circle-down:before{content:\"\\f0ab\"}.fa-globe:before{content:\"\\f0ac\"}.fa-wrench:before{content:\"\\f0ad\"}.fa-tasks:before{content:\"\\f0ae\"}.fa-filter:before{content:\"\\f0b0\"}.fa-briefcase:before{content:\"\\f0b1\"}.fa-arrows-alt:before{content:\"\\f0b2\"}.fa-group:before,.fa-users:before{content:\"\\f0c0\"}.fa-chain:before,.fa-link:before{content:\"\\f0c1\"}.fa-cloud:before{content:\"\\f0c2\"}.fa-flask:before{content:\"\\f0c3\"}.fa-cut:before,.fa-scissors:before{content:\"\\f0c4\"}.fa-copy:before,.fa-files-o:before{content:\"\\f0c5\"}.fa-paperclip:before{content:\"\\f0c6\"}.fa-save:before,.fa-floppy-o:before{content:\"\\f0c7\"}.fa-square:before{content:\"\\f0c8\"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:\"\\f0c9\"}.fa-list-ul:before{content:\"\\f0ca\"}.fa-list-ol:before{content:\"\\f0cb\"}.fa-strikethrough:before{content:\"\\f0cc\"}.fa-underline:before{content:\"\\f0cd\"}.fa-table:before{content:\"\\f0ce\"}.fa-magic:before{content:\"\\f0d0\"}.fa-truck:before{content:\"\\f0d1\"}.fa-pinterest:before{content:\"\\f0d2\"}.fa-pinterest-square:before{content:\"\\f0d3\"}.fa-google-plus-square:before{content:\"\\f0d4\"}.fa-google-plus:before{content:\"\\f0d5\"}.fa-money:before{content:\"\\f0d6\"}.fa-caret-down:before{content:\"\\f0d7\"}.fa-caret-up:before{content:\"\\f0d8\"}.fa-caret-left:before{content:\"\\f0d9\"}.fa-caret-right:before{content:\"\\f0da\"}.fa-columns:before{content:\"\\f0db\"}.fa-unsorted:before,.fa-sort:before{content:\"\\f0dc\"}.fa-sort-down:before,.fa-sort-desc:before{content:\"\\f0dd\"}.fa-sort-up:before,.fa-sort-asc:before{content:\"\\f0de\"}.fa-envelope:before{content:\"\\f0e0\"}.fa-linkedin:before{content:\"\\f0e1\"}.fa-rotate-left:before,.fa-undo:before{content:\"\\f0e2\"}.fa-legal:before,.fa-gavel:before{content:\"\\f0e3\"}.fa-dashboard:before,.fa-tachometer:before{content:\"\\f0e4\"}.fa-comment-o:before{content:\"\\f0e5\"}.fa-comments-o:before{content:\"\\f0e6\"}.fa-flash:before,.fa-bolt:before{content:\"\\f0e7\"}.fa-sitemap:before{content:\"\\f0e8\"}.fa-umbrella:before{content:\"\\f0e9\"}.fa-paste:before,.fa-clipboard:before{content:\"\\f0ea\"}.fa-lightbulb-o:before{content:\"\\f0eb\"}.fa-exchange:before{content:\"\\f0ec\"}.fa-cloud-download:before{content:\"\\f0ed\"}.fa-cloud-upload:before{content:\"\\f0ee\"}.fa-user-md:before{content:\"\\f0f0\"}.fa-stethoscope:before{content:\"\\f0f1\"}.fa-suitcase:before{content:\"\\f0f2\"}.fa-bell-o:before{content:\"\\f0a2\"}.fa-coffee:before{content:\"\\f0f4\"}.fa-cutlery:before{content:\"\\f0f5\"}.fa-file-text-o:before{content:\"\\f0f6\"}.fa-building-o:before{content:\"\\f0f7\"}.fa-hospital-o:before{content:\"\\f0f8\"}.fa-ambulance:before{content:\"\\f0f9\"}.fa-medkit:before{content:\"\\f0fa\"}.fa-fighter-jet:before{content:\"\\f0fb\"}.fa-beer:before{content:\"\\f0fc\"}.fa-h-square:before{content:\"\\f0fd\"}.fa-plus-square:before{content:\"\\f0fe\"}.fa-angle-double-left:before{content:\"\\f100\"}.fa-angle-double-right:before{content:\"\\f101\"}.fa-angle-double-up:before{content:\"\\f102\"}.fa-angle-double-down:before{content:\"\\f103\"}.fa-angle-left:before{content:\"\\f104\"}.fa-angle-right:before{content:\"\\f105\"}.fa-angle-up:before{content:\"\\f106\"}.fa-angle-down:before{content:\"\\f107\"}.fa-desktop:before{content:\"\\f108\"}.fa-laptop:before{content:\"\\f109\"}.fa-tablet:before{content:\"\\f10a\"}.fa-mobile-phone:before,.fa-mobile:before{content:\"\\f10b\"}.fa-circle-o:before{content:\"\\f10c\"}.fa-quote-left:before{content:\"\\f10d\"}.fa-quote-right:before{content:\"\\f10e\"}.fa-spinner:before{content:\"\\f110\"}.fa-circle:before{content:\"\\f111\"}.fa-mail-reply:before,.fa-reply:before{content:\"\\f112\"}.fa-github-alt:before{content:\"\\f113\"}.fa-folder-o:before{content:\"\\f114\"}.fa-folder-open-o:before{content:\"\\f115\"}.fa-smile-o:before{content:\"\\f118\"}.fa-frown-o:before{content:\"\\f119\"}.fa-meh-o:before{content:\"\\f11a\"}.fa-gamepad:before{content:\"\\f11b\"}.fa-keyboard-o:before{content:\"\\f11c\"}.fa-flag-o:before{content:\"\\f11d\"}.fa-flag-checkered:before{content:\"\\f11e\"}.fa-terminal:before{content:\"\\f120\"}.fa-code:before{content:\"\\f121\"}.fa-mail-reply-all:before,.fa-reply-all:before{content:\"\\f122\"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:\"\\f123\"}.fa-location-arrow:before{content:\"\\f124\"}.fa-crop:before{content:\"\\f125\"}.fa-code-fork:before{content:\"\\f126\"}.fa-unlink:before,.fa-chain-broken:before{content:\"\\f127\"}.fa-question:before{content:\"\\f128\"}.fa-info:before{content:\"\\f129\"}.fa-exclamation:before{content:\"\\f12a\"}.fa-superscript:before{content:\"\\f12b\"}.fa-subscript:before{content:\"\\f12c\"}.fa-eraser:before{content:\"\\f12d\"}.fa-puzzle-piece:before{content:\"\\f12e\"}.fa-microphone:before{content:\"\\f130\"}.fa-microphone-slash:before{content:\"\\f131\"}.fa-shield:before{content:\"\\f132\"}.fa-calendar-o:before{content:\"\\f133\"}.fa-fire-extinguisher:before{content:\"\\f134\"}.fa-rocket:before{content:\"\\f135\"}.fa-maxcdn:before{content:\"\\f136\"}.fa-chevron-circle-left:before{content:\"\\f137\"}.fa-chevron-circle-right:before{content:\"\\f138\"}.fa-chevron-circle-up:before{content:\"\\f139\"}.fa-chevron-circle-down:before{content:\"\\f13a\"}.fa-html5:before{content:\"\\f13b\"}.fa-css3:before{content:\"\\f13c\"}.fa-anchor:before{content:\"\\f13d\"}.fa-unlock-alt:before{content:\"\\f13e\"}.fa-bullseye:before{content:\"\\f140\"}.fa-ellipsis-h:before{content:\"\\f141\"}.fa-ellipsis-v:before{content:\"\\f142\"}.fa-rss-square:before{content:\"\\f143\"}.fa-play-circle:before{content:\"\\f144\"}.fa-ticket:before{content:\"\\f145\"}.fa-minus-square:before{content:\"\\f146\"}.fa-minus-square-o:before{content:\"\\f147\"}.fa-level-up:before{content:\"\\f148\"}.fa-level-down:before{content:\"\\f149\"}.fa-check-square:before{content:\"\\f14a\"}.fa-pencil-square:before{content:\"\\f14b\"}.fa-external-link-square:before{content:\"\\f14c\"}.fa-share-square:before{content:\"\\f14d\"}.fa-compass:before{content:\"\\f14e\"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:\"\\f150\"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:\"\\f151\"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:\"\\f152\"}.fa-euro:before,.fa-eur:before{content:\"\\f153\"}.fa-gbp:before{content:\"\\f154\"}.fa-dollar:before,.fa-usd:before{content:\"\\f155\"}.fa-rupee:before,.fa-inr:before{content:\"\\f156\"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:\"\\f157\"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:\"\\f158\"}.fa-won:before,.fa-krw:before{content:\"\\f159\"}.fa-bitcoin:before,.fa-btc:before{content:\"\\f15a\"}.fa-file:before{content:\"\\f15b\"}.fa-file-text:before{content:\"\\f15c\"}.fa-sort-alpha-asc:before{content:\"\\f15d\"}.fa-sort-alpha-desc:before{content:\"\\f15e\"}.fa-sort-amount-asc:before{content:\"\\f160\"}.fa-sort-amount-desc:before{content:\"\\f161\"}.fa-sort-numeric-asc:before{content:\"\\f162\"}.fa-sort-numeric-desc:before{content:\"\\f163\"}.fa-thumbs-up:before{content:\"\\f164\"}.fa-thumbs-down:before{content:\"\\f165\"}.fa-youtube-square:before{content:\"\\f166\"}.fa-youtube:before{content:\"\\f167\"}.fa-xing:before{content:\"\\f168\"}.fa-xing-square:before{content:\"\\f169\"}.fa-youtube-play:before{content:\"\\f16a\"}.fa-dropbox:before{content:\"\\f16b\"}.fa-stack-overflow:before{content:\"\\f16c\"}.fa-instagram:before{content:\"\\f16d\"}.fa-flickr:before{content:\"\\f16e\"}.fa-adn:before{content:\"\\f170\"}.fa-bitbucket:before{content:\"\\f171\"}.fa-bitbucket-square:before{content:\"\\f172\"}.fa-tumblr:before{content:\"\\f173\"}.fa-tumblr-square:before{content:\"\\f174\"}.fa-long-arrow-down:before{content:\"\\f175\"}.fa-long-arrow-up:before{content:\"\\f176\"}.fa-long-arrow-left:before{content:\"\\f177\"}.fa-long-arrow-right:before{content:\"\\f178\"}.fa-apple:before{content:\"\\f179\"}.fa-windows:before{content:\"\\f17a\"}.fa-android:before{content:\"\\f17b\"}.fa-linux:before{content:\"\\f17c\"}.fa-dribbble:before{content:\"\\f17d\"}.fa-skype:before{content:\"\\f17e\"}.fa-foursquare:before{content:\"\\f180\"}.fa-trello:before{content:\"\\f181\"}.fa-female:before{content:\"\\f182\"}.fa-male:before{content:\"\\f183\"}.fa-gittip:before,.fa-gratipay:before{content:\"\\f184\"}.fa-sun-o:before{content:\"\\f185\"}.fa-moon-o:before{content:\"\\f186\"}.fa-archive:before{content:\"\\f187\"}.fa-bug:before{content:\"\\f188\"}.fa-vk:before{content:\"\\f189\"}.fa-weibo:before{content:\"\\f18a\"}.fa-renren:before{content:\"\\f18b\"}.fa-pagelines:before{content:\"\\f18c\"}.fa-stack-exchange:before{content:\"\\f18d\"}.fa-arrow-circle-o-right:before{content:\"\\f18e\"}.fa-arrow-circle-o-left:before{content:\"\\f190\"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:\"\\f191\"}.fa-dot-circle-o:before{content:\"\\f192\"}.fa-wheelchair:before{content:\"\\f193\"}.fa-vimeo-square:before{content:\"\\f194\"}.fa-turkish-lira:before,.fa-try:before{content:\"\\f195\"}.fa-plus-square-o:before{content:\"\\f196\"}.fa-space-shuttle:before{content:\"\\f197\"}.fa-slack:before{content:\"\\f198\"}.fa-envelope-square:before{content:\"\\f199\"}.fa-wordpress:before{content:\"\\f19a\"}.fa-openid:before{content:\"\\f19b\"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:\"\\f19c\"}.fa-mortar-board:before,.fa-graduation-cap:before{content:\"\\f19d\"}.fa-yahoo:before{content:\"\\f19e\"}.fa-google:before{content:\"\\f1a0\"}.fa-reddit:before{content:\"\\f1a1\"}.fa-reddit-square:before{content:\"\\f1a2\"}.fa-stumbleupon-circle:before{content:\"\\f1a3\"}.fa-stumbleupon:before{content:\"\\f1a4\"}.fa-delicious:before{content:\"\\f1a5\"}.fa-digg:before{content:\"\\f1a6\"}.fa-pied-piper:before{content:\"\\f1a7\"}.fa-pied-piper-alt:before{content:\"\\f1a8\"}.fa-drupal:before{content:\"\\f1a9\"}.fa-joomla:before{content:\"\\f1aa\"}.fa-language:before{content:\"\\f1ab\"}.fa-fax:before{content:\"\\f1ac\"}.fa-building:before{content:\"\\f1ad\"}.fa-child:before{content:\"\\f1ae\"}.fa-paw:before{content:\"\\f1b0\"}.fa-spoon:before{content:\"\\f1b1\"}.fa-cube:before{content:\"\\f1b2\"}.fa-cubes:before{content:\"\\f1b3\"}.fa-behance:before{content:\"\\f1b4\"}.fa-behance-square:before{content:\"\\f1b5\"}.fa-steam:before{content:\"\\f1b6\"}.fa-steam-square:before{content:\"\\f1b7\"}.fa-recycle:before{content:\"\\f1b8\"}.fa-automobile:before,.fa-car:before{content:\"\\f1b9\"}.fa-cab:before,.fa-taxi:before{content:\"\\f1ba\"}.fa-tree:before{content:\"\\f1bb\"}.fa-spotify:before{content:\"\\f1bc\"}.fa-deviantart:before{content:\"\\f1bd\"}.fa-soundcloud:before{content:\"\\f1be\"}.fa-database:before{content:\"\\f1c0\"}.fa-file-pdf-o:before{content:\"\\f1c1\"}.fa-file-word-o:before{content:\"\\f1c2\"}.fa-file-excel-o:before{content:\"\\f1c3\"}.fa-file-powerpoint-o:before{content:\"\\f1c4\"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:\"\\f1c5\"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:\"\\f1c6\"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:\"\\f1c7\"}.fa-file-movie-o:before,.fa-file-video-o:before{content:\"\\f1c8\"}.fa-file-code-o:before{content:\"\\f1c9\"}.fa-vine:before{content:\"\\f1ca\"}.fa-codepen:before{content:\"\\f1cb\"}.fa-jsfiddle:before{content:\"\\f1cc\"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:\"\\f1cd\"}.fa-circle-o-notch:before{content:\"\\f1ce\"}.fa-ra:before,.fa-rebel:before{content:\"\\f1d0\"}.fa-ge:before,.fa-empire:before{content:\"\\f1d1\"}.fa-git-square:before{content:\"\\f1d2\"}.fa-git:before{content:\"\\f1d3\"}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:\"\\f1d4\"}.fa-tencent-weibo:before{content:\"\\f1d5\"}.fa-qq:before{content:\"\\f1d6\"}.fa-wechat:before,.fa-weixin:before{content:\"\\f1d7\"}.fa-send:before,.fa-paper-plane:before{content:\"\\f1d8\"}.fa-send-o:before,.fa-paper-plane-o:before{content:\"\\f1d9\"}.fa-history:before{content:\"\\f1da\"}.fa-circle-thin:before{content:\"\\f1db\"}.fa-header:before{content:\"\\f1dc\"}.fa-paragraph:before{content:\"\\f1dd\"}.fa-sliders:before{content:\"\\f1de\"}.fa-share-alt:before{content:\"\\f1e0\"}.fa-share-alt-square:before{content:\"\\f1e1\"}.fa-bomb:before{content:\"\\f1e2\"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:\"\\f1e3\"}.fa-tty:before{content:\"\\f1e4\"}.fa-binoculars:before{content:\"\\f1e5\"}.fa-plug:before{content:\"\\f1e6\"}.fa-slideshare:before{content:\"\\f1e7\"}.fa-twitch:before{content:\"\\f1e8\"}.fa-yelp:before{content:\"\\f1e9\"}.fa-newspaper-o:before{content:\"\\f1ea\"}.fa-wifi:before{content:\"\\f1eb\"}.fa-calculator:before{content:\"\\f1ec\"}.fa-paypal:before{content:\"\\f1ed\"}.fa-google-wallet:before{content:\"\\f1ee\"}.fa-cc-visa:before{content:\"\\f1f0\"}.fa-cc-mastercard:before{content:\"\\f1f1\"}.fa-cc-discover:before{content:\"\\f1f2\"}.fa-cc-amex:before{content:\"\\f1f3\"}.fa-cc-paypal:before{content:\"\\f1f4\"}.fa-cc-stripe:before{content:\"\\f1f5\"}.fa-bell-slash:before{content:\"\\f1f6\"}.fa-bell-slash-o:before{content:\"\\f1f7\"}.fa-trash:before{content:\"\\f1f8\"}.fa-copyright:before{content:\"\\f1f9\"}.fa-at:before{content:\"\\f1fa\"}.fa-eyedropper:before{content:\"\\f1fb\"}.fa-paint-brush:before{content:\"\\f1fc\"}.fa-birthday-cake:before{content:\"\\f1fd\"}.fa-area-chart:before{content:\"\\f1fe\"}.fa-pie-chart:before{content:\"\\f200\"}.fa-line-chart:before{content:\"\\f201\"}.fa-lastfm:before{content:\"\\f202\"}.fa-lastfm-square:before{content:\"\\f203\"}.fa-toggle-off:before{content:\"\\f204\"}.fa-toggle-on:before{content:\"\\f205\"}.fa-bicycle:before{content:\"\\f206\"}.fa-bus:before{content:\"\\f207\"}.fa-ioxhost:before{content:\"\\f208\"}.fa-angellist:before{content:\"\\f209\"}.fa-cc:before{content:\"\\f20a\"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:\"\\f20b\"}.fa-meanpath:before{content:\"\\f20c\"}.fa-buysellads:before{content:\"\\f20d\"}.fa-connectdevelop:before{content:\"\\f20e\"}.fa-dashcube:before{content:\"\\f210\"}.fa-forumbee:before{content:\"\\f211\"}.fa-leanpub:before{content:\"\\f212\"}.fa-sellsy:before{content:\"\\f213\"}.fa-shirtsinbulk:before{content:\"\\f214\"}.fa-simplybuilt:before{content:\"\\f215\"}.fa-skyatlas:before{content:\"\\f216\"}.fa-cart-plus:before{content:\"\\f217\"}.fa-cart-arrow-down:before{content:\"\\f218\"}.fa-diamond:before{content:\"\\f219\"}.fa-ship:before{content:\"\\f21a\"}.fa-user-secret:before{content:\"\\f21b\"}.fa-motorcycle:before{content:\"\\f21c\"}.fa-street-view:before{content:\"\\f21d\"}.fa-heartbeat:before{content:\"\\f21e\"}.fa-venus:before{content:\"\\f221\"}.fa-mars:before{content:\"\\f222\"}.fa-mercury:before{content:\"\\f223\"}.fa-intersex:before,.fa-transgender:before{content:\"\\f224\"}.fa-transgender-alt:before{content:\"\\f225\"}.fa-venus-double:before{content:\"\\f226\"}.fa-mars-double:before{content:\"\\f227\"}.fa-venus-mars:before{content:\"\\f228\"}.fa-mars-stroke:before{content:\"\\f229\"}.fa-mars-stroke-v:before{content:\"\\f22a\"}.fa-mars-stroke-h:before{content:\"\\f22b\"}.fa-neuter:before{content:\"\\f22c\"}.fa-genderless:before{content:\"\\f22d\"}.fa-facebook-official:before{content:\"\\f230\"}.fa-pinterest-p:before{content:\"\\f231\"}.fa-whatsapp:before{content:\"\\f232\"}.fa-server:before{content:\"\\f233\"}.fa-user-plus:before{content:\"\\f234\"}.fa-user-times:before{content:\"\\f235\"}.fa-hotel:before,.fa-bed:before{content:\"\\f236\"}.fa-viacoin:before{content:\"\\f237\"}.fa-train:before{content:\"\\f238\"}.fa-subway:before{content:\"\\f239\"}.fa-medium:before{content:\"\\f23a\"}.fa-yc:before,.fa-y-combinator:before{content:\"\\f23b\"}.fa-optin-monster:before{content:\"\\f23c\"}.fa-opencart:before{content:\"\\f23d\"}.fa-expeditedssl:before{content:\"\\f23e\"}.fa-battery-4:before,.fa-battery-full:before{content:\"\\f240\"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:\"\\f241\"}.fa-battery-2:before,.fa-battery-half:before{content:\"\\f242\"}.fa-battery-1:before,.fa-battery-quarter:before{content:\"\\f243\"}.fa-battery-0:before,.fa-battery-empty:before{content:\"\\f244\"}.fa-mouse-pointer:before{content:\"\\f245\"}.fa-i-cursor:before{content:\"\\f246\"}.fa-object-group:before{content:\"\\f247\"}.fa-object-ungroup:before{content:\"\\f248\"}.fa-sticky-note:before{content:\"\\f249\"}.fa-sticky-note-o:before{content:\"\\f24a\"}.fa-cc-jcb:before{content:\"\\f24b\"}.fa-cc-diners-club:before{content:\"\\f24c\"}.fa-clone:before{content:\"\\f24d\"}.fa-balance-scale:before{content:\"\\f24e\"}.fa-hourglass-o:before{content:\"\\f250\"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:\"\\f251\"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:\"\\f252\"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:\"\\f253\"}.fa-hourglass:before{content:\"\\f254\"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:\"\\f255\"}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:\"\\f256\"}.fa-hand-scissors-o:before{content:\"\\f257\"}.fa-hand-lizard-o:before{content:\"\\f258\"}.fa-hand-spock-o:before{content:\"\\f259\"}.fa-hand-pointer-o:before{content:\"\\f25a\"}.fa-hand-peace-o:before{content:\"\\f25b\"}.fa-trademark:before{content:\"\\f25c\"}.fa-registered:before{content:\"\\f25d\"}.fa-creative-commons:before{content:\"\\f25e\"}.fa-gg:before{content:\"\\f260\"}.fa-gg-circle:before{content:\"\\f261\"}.fa-tripadvisor:before{content:\"\\f262\"}.fa-odnoklassniki:before{content:\"\\f263\"}.fa-odnoklassniki-square:before{content:\"\\f264\"}.fa-get-pocket:before{content:\"\\f265\"}.fa-wikipedia-w:before{content:\"\\f266\"}.fa-safari:before{content:\"\\f267\"}.fa-chrome:before{content:\"\\f268\"}.fa-firefox:before{content:\"\\f269\"}.fa-opera:before{content:\"\\f26a\"}.fa-internet-explorer:before{content:\"\\f26b\"}.fa-tv:before,.fa-television:before{content:\"\\f26c\"}.fa-contao:before{content:\"\\f26d\"}.fa-500px:before{content:\"\\f26e\"}.fa-amazon:before{content:\"\\f270\"}.fa-calendar-plus-o:before{content:\"\\f271\"}.fa-calendar-minus-o:before{content:\"\\f272\"}.fa-calendar-times-o:before{content:\"\\f273\"}.fa-calendar-check-o:before{content:\"\\f274\"}.fa-industry:before{content:\"\\f275\"}.fa-map-pin:before{content:\"\\f276\"}.fa-map-signs:before{content:\"\\f277\"}.fa-map-o:before{content:\"\\f278\"}.fa-map:before{content:\"\\f279\"}.fa-commenting:before{content:\"\\f27a\"}.fa-commenting-o:before{content:\"\\f27b\"}.fa-houzz:before{content:\"\\f27c\"}.fa-vimeo:before{content:\"\\f27d\"}.fa-black-tie:before{content:\"\\f27e\"}.fa-fonticons:before{content:\"\\f280\"}\n"]} \ No newline at end of file diff --git a/filer/static/filer/css/maps/admin_filer.icons.css.map b/filer/static/filer/css/maps/admin_filer.icons.css.map new file mode 100644 index 000000000..c8ca15725 --- /dev/null +++ b/filer/static/filer/css/maps/admin_filer.icons.css.map @@ -0,0 +1 @@ +{"version":3,"sources":["components/_iconography.scss"],"names":[],"mappings":"AAIA,WACI,mCAAA,CACA,qDAAA,CACA,0WAAA,CAKA,kBAAA,CACA,iBAAA,CAGJ,UACI,oBAAA,CACA,iCAAA,CACA,iBAAA,CACA,mBAAA,CACA,iCAAA,CAAA,yBAAA,CACA,kCAAA,CACA,iCAAA,CAkDA,4BACI,eAAA,CADJ,4BACI,eAAA,CADJ,+BACI,eAAA,CADJ,0BACI,eAAA,CADJ,sBACI,eAAA,CADJ,gCACI,eAAA,CADJ,yBACI,eAAA,CADJ,wBACI,eAAA,CADJ,0BACI,eAAA,CADJ,0BACI,eAAA,CADJ,yBACI,eAAA,CADJ,wBACI,eAAA","file":"../admin_filer.icons.css","sourcesContent":["//######################################################################################################################\n// #ICONOGRAPHY#\n\n// default font file generated by gulp\n@font-face {\n font-family: \"django-filer-iconfont\";\n src: url(\"../fonts/django-filer-iconfont.eot?v=3.2.0\");\n src: url(\"../fonts/django-filer-iconfont.eot?v=3.2.0#iefix\") format(\"eot\"),\n url(\"../fonts/django-filer-iconfont.woff2?v=3.2.0\") format(\"woff2\"),\n url(\"../fonts/django-filer-iconfont.woff?v=3.2.0\") format(\"woff\"),\n url(\"../fonts/django-filer-iconfont.ttf?v=3.2.0\") format(\"truetype\"),\n url(\"../fonts/django-filer-iconfont.svg?v=3.2.0#django-filer-iconfont\") format(\"svg\");\n font-weight: normal;\n font-style: normal;\n}\n\n%icon {\n display: inline-block;\n font-family: django-filer-iconfont;\n font-size: inherit;\n text-rendering: auto;\n transform: translate(0, 0);\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n\n@function icon-char($filename) {\n $char: \"\";\n\n @if $filename == arrow-down {\n $char: \"E001\";\n }\n @if $filename == caret-down {\n $char: \"E002\";\n }\n @if $filename == chevron-right {\n $char: \"E003\";\n }\n @if $filename == download {\n $char: \"E004\";\n }\n @if $filename == link {\n $char: \"E005\";\n }\n @if $filename == move-to-folder {\n $char: \"E006\";\n }\n @if $filename == picture {\n $char: \"E007\";\n }\n @if $filename == select {\n $char: \"E008\";\n }\n @if $filename == settings {\n $char: \"E009\";\n }\n @if $filename == th-large {\n $char: \"E00A\";\n }\n @if $filename == th-list {\n $char: \"E00B\";\n }\n @if $filename == upload {\n $char: \"E00C\";\n }\n\n @return $char;\n}\n\n.cms-icon {\n @extend %icon;\n}\n@mixin icon($filename, $insert: before) {\n &:#{$insert} {\n content: #{\"\\\"\\\\\"}#{icon-char($filename) + \"\\\"\"};\n }\n}\n\n// #####################################################################################################################\n// #ICONS:start#\n// use unicode characters for accessibility reasons and use aria-hidden=\"true\" for decorative icons\n// DOCS: http://filamentgroup.com/lab/bulletproof_icon_fonts.html\n\n.cms-icon-arrow-down {\n @include icon(arrow-down);\n}\n\n.cms-icon-caret-down {\n @include icon(caret-down);\n}\n\n.cms-icon-chevron-right {\n @include icon(chevron-right);\n}\n\n.cms-icon-download {\n @include icon(download);\n}\n\n.cms-icon-link {\n @include icon(link);\n}\n\n.cms-icon-move-to-folder {\n @include icon(move-to-folder);\n}\n\n.cms-icon-picture {\n @include icon(picture);\n}\n\n.cms-icon-select {\n @include icon(select);\n}\n\n.cms-icon-settings {\n @include icon(settings);\n}\n\n.cms-icon-th-large {\n @include icon(th-large);\n}\n\n.cms-icon-th-list {\n @include icon(th-list);\n}\n\n.cms-icon-upload {\n @include icon(upload);\n}\n"]} \ No newline at end of file diff --git a/mptt/__init__.py b/mptt/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/requirements/base.txt b/tests/requirements/base.txt index 3bbe8ffa8..0bbf637e4 100644 --- a/tests/requirements/base.txt +++ b/tests/requirements/base.txt @@ -1,5 +1,6 @@ # requirements from setup.py Pillow +django-app-helper>=3.3.1 # other requirements coverage diff --git a/tests/requirements/django-2.2.txt b/tests/requirements/django-2.2.txt index b11817dee..0d58becf5 100644 --- a/tests/requirements/django-2.2.txt +++ b/tests/requirements/django-2.2.txt @@ -2,4 +2,3 @@ django>=2.2,<3.0 django_polymorphic>=2.0,<2.1 -django-app-helper diff --git a/tests/requirements/django-3.0.txt b/tests/requirements/django-3.0.txt index 7acd7b8cd..1923d5a5f 100644 --- a/tests/requirements/django-3.0.txt +++ b/tests/requirements/django-3.0.txt @@ -2,4 +2,3 @@ django>=3.0,<3.1 django_polymorphic>=2.1,<2.2 -django-app-helper diff --git a/tests/requirements/django-3.1.txt b/tests/requirements/django-3.1.txt index a02465ea6..bff7a2517 100644 --- a/tests/requirements/django-3.1.txt +++ b/tests/requirements/django-3.1.txt @@ -2,4 +2,3 @@ django>=3.1,<3.2 django_polymorphic>=2,<3.1 -django-app-helper diff --git a/tests/requirements/django-3.2.txt b/tests/requirements/django-3.2.txt index 52ee4482d..ffb90488f 100644 --- a/tests/requirements/django-3.2.txt +++ b/tests/requirements/django-3.2.txt @@ -2,4 +2,3 @@ django>=3.2,<4 django_polymorphic>=2,<3.1 -django-app-helper diff --git a/tests/requirements/django-4.0.txt b/tests/requirements/django-4.0.txt index 1a782168a..851285aa6 100644 --- a/tests/requirements/django-4.0.txt +++ b/tests/requirements/django-4.0.txt @@ -2,4 +2,3 @@ django>=4.0,<4.1 django_polymorphic>=3.1 -https://github.com/jrief/django-app-helper/archive/refs/heads/develop.zip diff --git a/tests/requirements/django-4.1.txt b/tests/requirements/django-4.1.txt index e47bf45d9..228ee5a6e 100644 --- a/tests/requirements/django-4.1.txt +++ b/tests/requirements/django-4.1.txt @@ -2,4 +2,3 @@ django>=4.1,<4.2 django_polymorphic>=3.1 -https://github.com/jrief/django-app-helper/archive/refs/heads/develop.zip diff --git a/tests/requirements/django-4.2.txt b/tests/requirements/django-4.2.txt index 7e2cc1421..1f158dcf5 100644 --- a/tests/requirements/django-4.2.txt +++ b/tests/requirements/django-4.2.txt @@ -2,4 +2,3 @@ django>=4.2,<5 django_polymorphic>=3.1 -https://github.com/jrief/django-app-helper/archive/refs/heads/develop.zip
acl-org__acl-anthology-1701
Correction to Anthology ID 2021.naacl-main.477 ## Revision or erratum - [X] I have attached the revised PDF or erratum to this issue - [X] I have written a brief description of the changes below **Description of changes** From Bushra Sabir: I want to bring to your notice that my paper ID: 1271, "ReinforceBug: A Framework to Generate Adversarial Textual Examples" got accepted and was orally presented in NAACL-2021. Initially, the paper was missing from sessions as well and at my request, it was added. This piece of work is an important work in my PHD, however, due to its absence in proceedings, it's hard for me to claim it. I request you to please add in the proceedings so that it is easy for me to link it to my ORCID profile. From email discussion amongst the NAACL 2021 committees: This paper was accidentally not included in the NAACL 2021 proceedings because it was accidentally not included in the schedule from which ACLPUB generates the proceedings. We decided that it would be too risky at this point to regenerate the entire proceedings, so instead, we would like to just add it to the ACL Anthology directly. I have guessed a new Anthology ID for it (I believe this is one beyond the largest number assigned to a NAACL main paper) but please feel free to adjust as necessary. The official author list from START is: * Bushra Sabir * Muhammad Ali Babar * Raj Gaire Here are the paper, data, and code from the START final submission: * [1271_Paper.pdf](https://github.com/acl-org/acl-anthology/files/7781672/1271_Paper.pdf) * [1271_OptionalSupplementaryData.xlsx](https://github.com/acl-org/acl-anthology/files/7781674/1271_OptionalSupplementaryData.xlsx) * [1271_OptionalSupplementaryCode.zip](https://github.com/acl-org/acl-anthology/files/7781676/1271_OptionalSupplementaryCode.zip)
[]
[]
diff --git a/bin/add_attachment.py b/bin/add_attachment.py old mode 100644 new mode 100755 diff --git a/data/xml/2020.inlg.xml b/data/xml/2020.inlg.xml index f75776537e..fbf0663fb7 100644 --- a/data/xml/2020.inlg.xml +++ b/data/xml/2020.inlg.xml @@ -483,6 +483,7 @@ <abstract>Generating multi-sentence image descriptions is a challenging task, which requires a good model to produce coherent and accurate paragraphs, describing salient objects in the image. We argue that multiple sources of information are beneficial when describing visual scenes with long sequences. These include (i) perceptual information and (ii) semantic (language) information about how to describe what is in the image. We also compare the effects of using two different pooling mechanisms on either a single modality or their combination. We demonstrate that the model which utilises both visual and language inputs can be used to generate accurate and diverse paragraphs when combined with a particular pooling mechanism. The results of our automatic and human evaluation show that learning to embed semantic information along with visual stimuli into the paragraph generation model is not trivial, raising a variety of proposals for future experiments.</abstract> <url hash="4bd2596f">2020.inlg-1.40</url> <bibkey>ilinykh-dobnik-2020-image</bibkey> + <pwcdataset url="https://paperswithcode.com/dataset/image-paragraph-captioning">Image Paragraph Captioning</pwcdataset> </paper> <paper id="41"> <title>Transformer based Natural Language Generation for Question-Answering</title> diff --git a/data/xml/2021.acl.xml b/data/xml/2021.acl.xml index 6fda59e439..d4b2e9b0ab 100644 --- a/data/xml/2021.acl.xml +++ b/data/xml/2021.acl.xml @@ -5659,9 +5659,11 @@ <author><first>Weihua</first><last>Peng</last></author> <pages>4862–4872</pages> <abstract>Identifying causal relations of events is an important task in natural language processing area. However, the task is very challenging, because event causality is usually expressed in diverse forms that often lack explicit causal clues. Existing methods cannot handle well the problem, especially in the condition of lacking training data. Nonetheless, humans can make a correct judgement based on their background knowledge, including descriptive knowledge and relational knowledge. Inspired by it, we propose a novel Latent Structure Induction Network (LSIN) to incorporate the external structural knowledge into this task. Specifically, to make use of the descriptive knowledge, we devise a Descriptive Graph Induction module to obtain and encode the graph-structured descriptive knowledge. To leverage the relational knowledge, we propose a Relational Graph Induction module which is able to automatically learn a reasoning structure for event causality reasoning. Experimental results on two widely used datasets indicate that our approach significantly outperforms previous state-of-the-art methods.</abstract> - <url hash="2e648e90">2021.acl-long.376</url> + <url hash="d4e7b0af">2021.acl-long.376</url> <doi>10.18653/v1/2021.acl-long.376</doi> <bibkey>cao-etal-2021-knowledge</bibkey> + <revision id="1" href="2021.acl-long.376v1" hash="2e648e90"/> + <revision id="2" href="2021.acl-long.376v2" hash="d4e7b0af" date="2022-01-02">Updated results in Table 2 and Figure 3.</revision> </paper> <paper id="377"> <title>Turn the Combination Lock: Learnable Textual Backdoor Attacks via Word Substitution</title> @@ -6831,13 +6833,15 @@ <author><first>Wenming</first><last>Xiao</last></author> <pages>5847–5858</pages> <abstract>Lexicon information and pre-trained models, such as BERT, have been combined to explore Chinese sequence labeling tasks due to their respective strengths. However, existing methods solely fuse lexicon features via a shallow and random initialized sequence layer and do not integrate them into the bottom layers of BERT. In this paper, we propose Lexicon Enhanced BERT (LEBERT) for Chinese sequence labeling, which integrates external lexicon knowledge into BERT layers directly by a Lexicon Adapter layer. Compared with existing methods, our model facilitates deep lexicon knowledge fusion at the lower layers of BERT. Experiments on ten Chinese datasets of three tasks including Named Entity Recognition, Word Segmentation, and Part-of-Speech Tagging, show that LEBERT achieves state-of-the-art results.</abstract> - <url hash="88836111">2021.acl-long.454</url> + <url hash="21e090ed">2021.acl-long.454</url> <doi>10.18653/v1/2021.acl-long.454</doi> <bibkey>liu-etal-2021-lexicon</bibkey> <pwccode url="https://github.com/liuwei1206/LEBERT" additional="false">liuwei1206/LEBERT</pwccode> <pwcdataset url="https://paperswithcode.com/dataset/resume-ner">Resume NER</pwcdataset> <pwcdataset url="https://paperswithcode.com/dataset/universal-dependencies">Universal Dependencies</pwcdataset> <pwcdataset url="https://paperswithcode.com/dataset/weibo-ner">Weibo NER</pwcdataset> + <revision id="1" href="2021.acl-long.454v1" hash="88836111"/> + <revision id="2" href="2021.acl-long.454v2" hash="21e090ed" date="2021-12-26">Modified Figures 2 and 4 for clarity</revision> </paper> <paper id="455"> <title>Math Word Problem Solving with Explicit Numerical Values</title> diff --git a/data/xml/2021.argmining.xml b/data/xml/2021.argmining.xml index 7d7a1a30e6..d49b8b1939 100644 --- a/data/xml/2021.argmining.xml +++ b/data/xml/2021.argmining.xml @@ -281,6 +281,7 @@ <attachment type="Software" hash="e2932bd7">2021.argmining-1.21.Software.zip</attachment> <bibkey>kapadnis-etal-2021-team</bibkey> <doi>10.18653/v1/2021.argmining-1.21</doi> + <pwccode url="https://github.com/manavkapadnis/enigma_argmining" additional="false">manavkapadnis/enigma_argmining</pwccode> </paper> </volume> </collection> diff --git a/data/xml/2021.eacl.xml b/data/xml/2021.eacl.xml index e89e58c510..c0d854abe7 100644 --- a/data/xml/2021.eacl.xml +++ b/data/xml/2021.eacl.xml @@ -4256,11 +4256,13 @@ <author><first>Adrian</first><last>Ulges</last></author> <pages>3650–3660</pages> <abstract>We present a joint model for entity-level relation extraction from documents. In contrast to other approaches - which focus on local intra-sentence mention pairs and thus require annotations on mention level - our model operates on entity level. To do so, a multi-task approach is followed that builds upon coreference resolution and gathers relevant signals via multi-instance learning with multi-level representations combining global entity and local mention information. We achieve state-of-the-art relation extraction results on the DocRED dataset and report the first entity-level end-to-end relation extraction results for future reference. Finally, our experimental results suggest that a joint approach is on par with task-specific learning, though more efficient due to shared parameters and training steps.</abstract> - <url hash="f42064e7">2021.eacl-main.319</url> + <url hash="8db3ce98">2021.eacl-main.319</url> <bibkey>eberts-ulges-2021-end</bibkey> <doi>10.18653/v1/2021.eacl-main.319</doi> <pwccode url="https://github.com/lavis-nlp/jerex" additional="false">lavis-nlp/jerex</pwccode> <pwcdataset url="https://paperswithcode.com/dataset/docred">DocRED</pwcdataset> + <revision id="1" href="2021.eacl-main.319v1" hash="f42064e7"/> + <revision id="2" href="2021.eacl-main.319v2" hash="8db3ce98" date="2021-12-03">Added an evaluation related remark and the publisher to reference, corrected a typo.</revision> </paper> <paper id="320"> <title><fixed-case>WER</fixed-case>-<fixed-case>BERT</fixed-case>: Automatic <fixed-case>WER</fixed-case> Estimation with <fixed-case>BERT</fixed-case> in a Balanced Ordinal Classification Paradigm</title> diff --git a/data/xml/2021.emnlp.xml b/data/xml/2021.emnlp.xml index 09b70bba9b..465214625c 100644 --- a/data/xml/2021.emnlp.xml +++ b/data/xml/2021.emnlp.xml @@ -2847,6 +2847,7 @@ <url hash="fc13cfe4">2021.emnlp-main.197</url> <bibkey>tan-etal-2021-coupling</bibkey> <doi>10.18653/v1/2021.emnlp-main.197</doi> + <pwccode url="https://github.com/txannie/zp-dnlg" additional="false">txannie/zp-dnlg</pwccode> <pwcdataset url="https://paperswithcode.com/dataset/matinf">MATINF</pwcdataset> </paper> <paper id="198"> @@ -3094,10 +3095,12 @@ <author><first>Jinan</first><last>Xu</last></author> <pages>2716–2725</pages> <abstract>Implicit event argument extraction (EAE) is a crucial document-level information extraction task that aims to identify event arguments beyond the sentence level. Despite many efforts for this task, the lack of enough training data has long impeded the study. In this paper, we take a new perspective to address the data sparsity issue faced by implicit EAE, by bridging the task with machine reading comprehension (MRC). Particularly, we devise two data augmentation regimes via MRC, including: 1) implicit knowledge transfer, which enables knowledge transfer from other tasks, by building a unified training framework in the MRC formulation, and 2) explicit data augmentation, which can explicitly generate new training examples, by treating MRC models as an annotator. The extensive experiments have justified the effectiveness of our approach — it not only obtains state-of-the-art performance on two benchmarks, but also demonstrates superior results in a data-low scenario.</abstract> - <url hash="981943cb">2021.emnlp-main.214</url> + <url hash="7a22ed6d">2021.emnlp-main.214</url> <bibkey>liu-etal-2021-machine</bibkey> <doi>10.18653/v1/2021.emnlp-main.214</doi> <pwcdataset url="https://paperswithcode.com/dataset/wikievents">WikiEvents</pwcdataset> + <revision id="1" href="2021.emnlp-main.214v1" hash="981943cb"/> + <revision id="2" href="2021.emnlp-main.214v2" hash="7a22ed6d" date="2022-01-02">Corrected sponsor number in the Acknowledgments section.</revision> </paper> <paper id="215"> <title><fixed-case>I</fixed-case>mportance <fixed-case>E</fixed-case>stimation from <fixed-case>M</fixed-case>ultiple <fixed-case>P</fixed-case>erspectives for <fixed-case>K</fixed-case>eyphrase <fixed-case>E</fixed-case>xtraction</title> @@ -3592,6 +3595,7 @@ <url hash="9ac55a8c">2021.emnlp-main.247</url> <bibkey>wang-etal-2021-hierarchical</bibkey> <doi>10.18653/v1/2021.emnlp-main.247</doi> + <pwccode url="https://github.com/tata1661/shine-emnlp21" additional="false">tata1661/shine-emnlp21</pwccode> </paper> <paper id="248"> <title><tex-math>k</tex-math><fixed-case>F</fixed-case>olden: <tex-math>k</tex-math>-Fold Ensemble for Out-Of-Distribution Detection</title> @@ -8541,7 +8545,7 @@ <pwcdataset url="https://paperswithcode.com/dataset/natural-questions">Natural Questions</pwcdataset> </paper> <paper id="587"> - <title><fixed-case>C</fixed-case>onv<fixed-case>A</fixed-case>buse: Data, Analysis, and Benchmarks for Nuanced Detection in Conversational <fixed-case>AI</fixed-case></title> + <title><fixed-case>C</fixed-case>onv<fixed-case>A</fixed-case>buse: Data, Analysis, and Benchmarks for Nuanced Abuse Detection in Conversational <fixed-case>AI</fixed-case></title> <author><first>Amanda</first><last>Cercas Curry</last></author> <author><first>Gavin</first><last>Abercrombie</last></author> <author><first>Verena</first><last>Rieser</last></author> diff --git a/data/xml/2021.findings.xml b/data/xml/2021.findings.xml index ab2649034d..24b65fd4db 100644 --- a/data/xml/2021.findings.xml +++ b/data/xml/2021.findings.xml @@ -445,7 +445,7 @@ <attachment type="OptionalSupplementaryMaterial" hash="03bd4c14">2021.findings-acl.32.OptionalSupplementaryMaterial.zip</attachment> <doi>10.18653/v1/2021.findings-acl.32</doi> <bibkey>gritta-iacobacci-2021-xeroalign</bibkey> - <pwccode url="https://github.com/huawei-noah/noah-research/tree/master/xero_align" additional="true">huawei-noah/noah-research</pwccode> + <pwccode url="https://github.com/huawei-noah/noah-research" additional="true">huawei-noah/noah-research</pwccode> <pwcdataset url="https://paperswithcode.com/dataset/paws">PAWS</pwcdataset> <pwcdataset url="https://paperswithcode.com/dataset/paws-x">PAWS-X</pwcdataset> </paper> @@ -11755,6 +11755,7 @@ <url hash="abe06cdf">2021.findings-emnlp.407</url> <bibkey>sato-etal-2021-speculative-sampling</bibkey> <doi>10.18653/v1/2021.findings-emnlp.407</doi> + <pwccode url="https://github.com/jack-and-rozz/speculative_sampling" additional="false">jack-and-rozz/speculative_sampling</pwccode> </paper> <paper id="408"> <title>Perceived and Intended Sarcasm Detection with Graph Attention Networks</title> diff --git a/data/xml/2021.law.xml b/data/xml/2021.law.xml index 9fd6976716..bdfc90fe8c 100644 --- a/data/xml/2021.law.xml +++ b/data/xml/2021.law.xml @@ -145,8 +145,10 @@ <author><first>Steven</first><last>Bethard</last></author> <pages>106–111</pages> <abstract>While annotating normalized times in food security documents, we found that the semantically compositional annotation for time normalization (SCATE) scheme required several near-duplicate annotations to get the correct semantics for expressions like Nov. 7th to 11th 2021. To reduce this problem, we explored replacing SCATE’s Sub-Interval property with a Super-Interval property, that is, making the smallest units (e.g., 7th and 11th) rather than the largest units (e.g., 2021) the heads of the intersection chains. To ensure that the semantics of annotated time intervals remained unaltered despite our changes to the syntax of the annotation scheme, we applied several different techniques to validate our changes. These validation techniques detected and allowed us to resolve several important bugs in our automated translation from Sub-Interval to Super-Interval syntax.</abstract> - <url hash="c5470273">2021.law-1.11</url> + <url hash="1acae20b">2021.law-1.11</url> <bibkey>su-bethard-2021-simplifying</bibkey> + <revision id="1" href="2021.law-1.11v1" hash="c5470273"/> + <revision id="2" href="2021.law-1.11v2" hash="1acae20b" date="2021-12-02">Corrected paper to show author names instead of anonymous.</revision> <doi>10.18653/v1/2021.law-1.11</doi> </paper> <paper id="12"> diff --git a/data/xml/2021.mmtlrl.xml b/data/xml/2021.mmtlrl.xml index 209ad880e4..e8f9cd631e 100644 --- a/data/xml/2021.mmtlrl.xml +++ b/data/xml/2021.mmtlrl.xml @@ -3,12 +3,10 @@ <volume id="1" ingest-date="2021-11-09"> <meta> <booktitle>Proceedings of the First Workshop on Multimodal Machine Translation for Low Resource Languages (MMTLRL 2021)</booktitle> - <editor><first>India</first><last>Thoudam Doren Singh, National Institute of Technology Silchar</last></editor> - <editor><first>Dfki</first><last>Cristina España i Bonet</last></editor> - <editor><first>Germany</first><last>Universität des Saarlandes</last></editor> - <editor><first>India</first><last>Sivaji Bandyopadhyay, National Institute of Technology Silchar</last></editor> - <editor><first>Dfki</first><last>Josef Van Genabith</last></editor> - <editor><first>Germany</first><last>Universität des Saarlandes</last></editor> + <editor><first>Thoudam</first><last>Doren Singh</last></editor> + <editor><first>Cristina</first><last>España i Bonet</last></editor> + <editor><first>Sivaji</first><last>Bandyopadhyay</last></editor> + <editor><first>Josef</first><last>van Genabith</last></editor> <publisher>INCOMA Ltd.</publisher> <address>Online (Virtual Mode)</address> <month>September</month> diff --git a/data/xml/2021.mrl.xml b/data/xml/2021.mrl.xml index 3ffe2e7728..8415a706be 100644 --- a/data/xml/2021.mrl.xml +++ b/data/xml/2021.mrl.xml @@ -274,6 +274,7 @@ <url hash="1073cb2a">2021.mrl-1.19</url> <bibkey>kim-etal-2021-analysis</bibkey> <doi>10.18653/v1/2021.mrl-1.19</doi> + <pwccode url="https://github.com/emorynlp/mrl-2021" additional="false">emorynlp/mrl-2021</pwccode> </paper> <paper id="20"> <title>Regularising Fisher Information Improves Cross-lingual Generalisation</title> diff --git a/data/xml/2021.naacl.xml b/data/xml/2021.naacl.xml index f6fab1cd71..b7f74e8670 100644 --- a/data/xml/2021.naacl.xml +++ b/data/xml/2021.naacl.xml @@ -6805,6 +6805,19 @@ <doi>10.18653/v1/2021.naacl-main.476</doi> <bibkey>cao-wang-2021-inference</bibkey> </paper> + <paper id="477"> + <title>ReinforceBug: A Framework to Generate Adversarial Textual Examples</title> + <author><first>Bushra</first><last>Sabir</last></author> + <author><first>Muhammad Ali</first><last>Babar</last></author> + <author><first>Raj</first><last>Gaire</last></author> + <pages>5954–5964</pages> + <abstract>Adversarial Examples (AEs) generated by perturbingining examples are useful in improving the robustness of Deep Learning (DL) based models. Most prior works generate AEs that are either unconscionable due to lexical errors or semantically and functionally deviant from original examples. In this paper, we present ReinforceBug, a reinforcement learning framework, that learns a policy that is transferable on unseen datasets and generates utility-preserving and transferable (on other models) AEs. Our experiments show that ReinforceBug is on average 10% more successful as compared to the state-of the-art attack TextFooler. Moreover, the target models have on average 73.64% confidence in wrong prediction, the generated AEs preserve the functional equivalence and semantic similarity (83.38%) to their original counterparts, and are transferable on other models with an average success rate of 46%</abstract> + <url hash="9ec3df5d">2021.naacl-main.477</url> + <doi>10.18653/v1/2021.naacl-main.477</doi> + <bibkey>sabir-etal-2021-reinforcebug</bibkey> + <attachment type="OptionalSupplementaryCode" hash="d3a806e3">2021.naacl-main.477.OptionalSupplementaryCode.zip</attachment> + <attachment type="OptionalSupplementaryData" hash="061d3b2a">2021.naacl-main.477.OptionalSupplementaryData.zip</attachment> + </paper> </volume> <volume id="demos" ingest-date="2021-05-24"> <meta> diff --git a/data/xml/2021.nsurl.xml b/data/xml/2021.nsurl.xml index 8d6a0e0ed9..0c824a6ae2 100644 --- a/data/xml/2021.nsurl.xml +++ b/data/xml/2021.nsurl.xml @@ -16,46 +16,60 @@ <bibkey>nsurl-2021-international-nlp</bibkey> </frontmatter> <paper id="1"> - <title>Nasrin Taghizadeh, <fixed-case>A</fixed-case>li Ebrahimi and Heshaam Faili</title> - <author><first>NSURL-2021 Shared Task 1: Semantic Relation Extraction</first><last>in Persian</last></author> + <title>NSURL-2021 Shared Task 1: Semantic Relation Extraction in Persian</title> + <author><first>Nasrin</first><last>Taghizadeh</last></author> + <author><first>Ali</first><last>Ebrahimi</last></author> + <author><first>Heshaam</first><last>Faili</last></author> <pages>1–7</pages> <url hash="9bde3cd4">2021.nsurl-1.1</url> <bibkey>in-persian-2021-nasrin-taghizadeh</bibkey> <pwcdataset url="https://paperswithcode.com/dataset/perlex">Perlex</pwcdataset> </paper> <paper id="2"> - <title>Romina Oji, Nasrin Taghizadeh and Heshaam Faili</title> - <author><first>PerSpellData: An Exhaustive Parallel Spell Dataset For</first><last>Persian</last></author> + <title>PerSpellData: An Exhaustive Parallel Spell Dataset For Persian</title> + <author><first>Romina</first><last>Oji</last></author> + <author><first>Nasrin</first><last>Taghizadeh</last></author> + <author><first>Heshaam</first><last>Faili</last></author> <pages>8–14</pages> <url hash="8fe4700e">2021.nsurl-1.2</url> <bibkey>persian-2021-romina-oji</bibkey> <pwccode url="https://github.com/rominaoji/perspelldata" additional="false">rominaoji/perspelldata</pwccode> </paper> <paper id="3"> - <title>Hadi Khalilia, Abed Alhakim Freihat and Fausto Giunchiglia</title> - <author><first>The Dimensions</first><last>of Lexical Semantic Resource Quality</last></author> + <title>The Dimensions of Lexical Semantic Resource Quality</title> + <author><first>Hadi</first><last>Khalilia</last></author> + <author><first>Abed Alhakim</first><last>Freihat</last></author> + <author><first>Fausto</first><last>Giunchiglia</last></author> <pages>15–21</pages> <url hash="0fc8b17b">2021.nsurl-1.3</url> <bibkey>of-lexical-semantic-resource-quality-2021-hadi-khalilia</bibkey> </paper> <paper id="4"> - <title>Fatemeh sadat Hosseini, Shima Kashef, Elham Shabaninia, Hossein Nezamabadi-pour</title> - <author><first>IDPL-PFOD: An Image Dataset</first><last>of Printed Farsi Text for OCR Research</last></author> + <title>IDPL-PFOD: An Image Dataset of Printed Farsi Text for OCR Research</title> + <author><first>Fatemeh sadat</first><last>Hosseini</last></author> + <author><first>Shima</first><last>Kashef</last></author> + <author><first>Elham</first><last>Shabaninia</last></author> + <author><first>Hossein</first><last>Nezamabadi-pour</last></author> <pages>22–31</pages> <url hash="80bb2938">2021.nsurl-1.4</url> <bibkey>of-printed-farsi-text-for-ocr-research-2021-fatemeh-sadat</bibkey> </paper> <paper id="5"> - <title>Moein Salimi Sartakhti, Romina Etezadi and Mehrnoush Shamsfard</title> - <author><first>Improving Persian</first><last>Relation Extraction Models By Data Augmentation</last></author> + <title>Improving Persian Relation Extraction Models By Data Augmentation</title> + <author><first>Moein Salimi</first><last>Sartakhti</last></author> + <author><first>Romina</first><last>Etezadi</last></author> + <author><first>Mehrnoush</first><last>Shamsfard</last></author> <pages>32–37</pages> <url hash="cb33fa2c">2021.nsurl-1.5</url> <bibkey>relation-extraction-models-by-data-augmentation-2021-moein-salimi</bibkey> <pwcdataset url="https://paperswithcode.com/dataset/perlex">Perlex</pwcdataset> </paper> <paper id="6"> - <title>Mohammad Mahdi Jafari, Somayyeh Behmanesh, Alireza Talebpour and <fixed-case>A</fixed-case>li Nadian Ghomsheh</title> - <author><first>Improving</first><last>pre-trained Language Model for Relation Extraction Using Syntactic Information in Persian</last></author> + <title>Improving pre-trained Language Model for Relation Extraction Using Syntactic Information in Persian</title> + <author><first>Mohammad Mahdi</first><last>Jafari</last></author> + <author><first>Somayyeh</first><last>Behmanesh</last></author> + <author><first>Alireza</first><last>Talebpour</last></author> + <author><first>Ali Nadian</first><last>Ghomsheh</last></author> <pages>38–44</pages> <url hash="2c9622fd">2021.nsurl-1.6</url> <bibkey>pre-trained-language-model-for-relation-extraction-using-syntactic-information-in-persian-2021-mohammad-mahdi</bibkey> diff --git a/data/xml/2021.ranlp.xml b/data/xml/2021.ranlp.xml index e20bf15e44..8cb8316e5c 100644 --- a/data/xml/2021.ranlp.xml +++ b/data/xml/2021.ranlp.xml @@ -1075,8 +1075,10 @@ <author><first>Stefan</first><last>Ultes</last></author> <pages>839–845</pages> <abstract>This paper presents an automatic method to evaluate the naturalness of natural language generation in dialogue systems. While this task was previously rendered through expensive and time-consuming human labor, we present this novel task of automatic naturalness evaluation of generated language. By fine-tuning the BERT model, our proposed naturalness evaluation method shows robust results and outperforms the baselines: support vector machines, bi-directional LSTMs, and BLEURT. In addition, the training speed and evaluation performance of naturalness model are improved by transfer learning from quality and informativeness linguistic knowledge.</abstract> - <url hash="e10078ae">2021.ranlp-main.96</url> + <url hash="e059c241">2021.ranlp-main.96</url> <bibkey>liu-etal-2021-naturalness</bibkey> + <revision id="1" href="2021.ranlp-1.96v1" hash="e10078ae"/> + <revision id="2" href="2021.ranlp-1.96v2" hash="e059c241" date="2021-12-02">Revised author emails</revision> </paper> <paper id="97"> <title>Towards the Application of Calibrated Transformers to the Unsupervised Estimation of Question Difficulty from Text</title> diff --git a/data/xml/W19.xml b/data/xml/W19.xml index ac08cc12b5..f815d46efb 100644 --- a/data/xml/W19.xml +++ b/data/xml/W19.xml @@ -11820,7 +11820,7 @@ One of the references was wrong therefore it is corrected to cite the appropriat <abstract>This paper describes the Microsoft Translator submissions to the WMT19 news translation shared task for English-German. Our main focus is document-level neural machine translation with deep transformer models. We start with strong sentence-level baselines, trained on large-scale data created via data-filtering and noisy back-translation and find that back-translation seems to mainly help with translationese input. We explore fine-tuning techniques, deeper models and different ensembling strategies to counter these effects. Using document boundaries present in the authentic and synthetic parallel data, we create sequences of up to 1000 subword segments and train transformer translation models. We experiment with data augmentation techniques for the smaller authentic data with document-boundaries and for larger authentic data without boundaries. We further explore multi-task training for the incorporation of document-level source language monolingual data via the BERT-objective on the encoder and two-pass decoding for combinations of sentence-level and document-level systems. Based on preliminary human evaluation results, evaluators strongly prefer the document-level systems over our comparable sentence-level system. The document-level systems also seem to score higher than the human references in source-based direct assessment.</abstract> <url hash="b1427483">W19-5321</url> <doi>10.18653/v1/W19-5321</doi> - <attachment type="poster" hash="cccb1eb1">W19-5321.Poster.pdf</attachment> + <attachment type="poster" hash="cacbd7c2">W19-5321.Poster.pdf</attachment> <bibkey>junczys-dowmunt-2019-microsoft</bibkey> </paper> <paper id="22">
flask-admin__flask-admin-1735
"examples" package erroneously installed Because there's an `examples/__init__.py` file in the repo, [this line of setup.py](https://github.com/flask-admin/flask-admin/blob/08bdcdea27fed6dcef7627bf7d2ac0625c4c2a9c/setup.py#L54) is erroneously deciding to install two packages, `flask-admin` and `examples`. I suspect other packages make this same mistake, which can lead to some file conflicts during installation. It would be best to either change your `find_packages` invocation to blacklist `exclude`: ```python find_packages(exclude=['examples', 'examples.*']) ``` Or (and this is how I would do it to avoid this problem in the future) or to whitelist `flask_admin`: ```python find_packages(include=['flask_admin']) ``` I would also go further and also blacklist `flask_admin.tests`, since I think tests should not be part of the installation: ```python find_packages(include=['flask_admin'], exclude=['flask_admin.tests.*']) ``` But I think just not shipping the `examples` package is a good start.
[]
[]
diff --git a/examples/__init__.py b/examples/__init__.py deleted file mode 100644 index e69de29bb..000000000
akvo__akvo-rsr-4670
Split web service for local environment Currently, the `web` docker-compose service uses [`start-django.sh`](https://github.com/akvo/akvo-rsr/blob/master/scripts/docker/dev/start-django.sh) which starts 3 different processes in the background. This introduces these problems: - restarting other processes when only one is needed - throwing away logs (the `web` service only outputs stuff before the background processes are started) - inability to debug the processes (can't attach remote debugger nor read logs)
[]
[]
diff --git a/.dockerignore b/.dockerignore index 7d57f4dfaa..9c9e49f953 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,6 +3,8 @@ vagrant data scripts/data/*.gz scripts/data/*.lzo +scripts/data/config +scripts/data/dumps akvo/rsr/front-end/node_modules/ akvo/rsr/front-end/static/webpack-stats.json akvo/rsr/front-end/static/rsr/dist/ diff --git a/docker-compose-docker-sync.yaml b/docker-compose-docker-sync.yaml index 2ca4b49700..f85339ec80 100644 --- a/docker-compose-docker-sync.yaml +++ b/docker-compose-docker-sync.yaml @@ -1,4 +1,4 @@ -version: '3' +version: '3.7' services: web: diff --git a/docker-compose.ci.prod.images.yaml b/docker-compose.ci.prod.images.yaml index bf3a67006d..dfeb38f2ac 100644 --- a/docker-compose.ci.prod.images.yaml +++ b/docker-compose.ci.prod.images.yaml @@ -1,4 +1,4 @@ -version: '3' +version: '3.7' services: mainnetwork: diff --git a/docker-compose.ci.yaml b/docker-compose.ci.yaml index 83514505c0..c655e3a3ff 100644 --- a/docker-compose.ci.yaml +++ b/docker-compose.ci.yaml @@ -1,4 +1,4 @@ -version: '3' +version: '3.7' services: mainnetwork: diff --git a/docker-compose.override.yaml b/docker-compose.override.yaml index 53995c2b21..2a81da273c 100644 --- a/docker-compose.override.yaml +++ b/docker-compose.override.yaml @@ -1,4 +1,11 @@ -version: '3' +version: '3.7' + +x-web-common: &web-common + build: + context: . + dockerfile: Dockerfile-dev + entrypoint: scripts/docker/dev/run-as-user.sh + network_mode: service:mainnetwork services: mainnetwork: @@ -7,12 +14,31 @@ services: - "8080:8080" # for webpack HMR - "8081:8081" # for webpack HMR - "8082:8082" # snakeviz + web: - build: - context: . - dockerfile: Dockerfile-dev - entrypoint: scripts/docker/dev/run-as-user.sh + <<: *web-common command: scripts/docker/dev/start-django.sh + depends_on: + - rsrdbhost + + web-dir: + <<: *web-common + command: scripts/docker/dev/start-node-app.sh akvo/rsr/dir + depends_on: + - web + volumes: + - .:/var/akvo/rsr/code:delegated + - /var/akvo/rsr/code/src/ + + web-spa: + <<: *web-common + command: scripts/docker/dev/start-node-app.sh akvo/rsr/spa + depends_on: + - web + volumes: + - .:/var/akvo/rsr/code:delegated + - /var/akvo/rsr/code/src/ + reports: build: context: . @@ -29,6 +55,10 @@ services: nginx: image: nginx:1.17.9-alpine network_mode: service:mainnetwork + depends_on: + - web-spa + - web-dir + - rsr-memcached volumes: - ./scripts/docker/dev/nginx:/etc/nginx/conf.d rsrdbhost: diff --git a/docker-compose.yaml b/docker-compose.yaml index 6fac3ab245..0cbd7e8241 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '3' +version: '3.7' services: mainnetwork: diff --git a/manage.py b/manage.py old mode 100644 new mode 100755 diff --git a/scripts/docker/dev/start-django.sh b/scripts/docker/dev/start-django.sh index 9a661341b6..e516613e54 100755 --- a/scripts/docker/dev/start-django.sh +++ b/scripts/docker/dev/start-django.sh @@ -1,57 +1,21 @@ #!/usr/bin/env bash - +# Prepare and start the django server set -eu -_term() { - echo "Caught SIGTERM signal!" - kill -TERM "$child" 2>/dev/null -} - -trap _term SIGTERM - ./scripts/docker/dev/wait-for-dependencies.sh -if [ -z "${IS_REPORTS_CONTAINER:-}" ]; then - pushd akvo/rsr/front-end +pushd akvo/rsr/front-end if [[ ! -d "node_modules" ]]; then npm install fi if [[ ! -f "static/rsr/dist/vendors.js" ]]; then npm run dev fi - popd - - pushd akvo/rsr/spa - ( - npm install - npm start - ) & - popd - - pushd akvo/rsr/dir - ( - npm install - npm start - ) & - popd -fi - -if [ -z "${IS_REPORTS_CONTAINER:-}" ]; then - SKIP_REQUIRED_AUTH_GROUPS=true python manage.py migrate --noinput - SKIP_REQUIRED_AUTH_GROUPS=true python manage.py createcachetable || true -fi -#python manage.py collectstatic - -## Not running cron jobs in dev -#python manage.py crontab add -#env >> /etc/environment -#/usr/sbin/cron +popd -if [ -z "${IS_REPORTS_CONTAINER:-}" ]; then - python manage.py populate_local_db -fi +SKIP_REQUIRED_AUTH_GROUPS=true python manage.py migrate --noinput +SKIP_REQUIRED_AUTH_GROUPS=true python manage.py createcachetable || true -python manage.py runserver 0.0.0.0:${DJANGO_PORT:-8000} & +python manage.py populate_local_db -child=$! -wait "$child" +python manage.py runserver 0.0.0.0:${DJANGO_PORT:-8000} \ No newline at end of file diff --git a/scripts/docker/dev/start-node-app.sh b/scripts/docker/dev/start-node-app.sh new file mode 100755 index 0000000000..06b1b4f006 --- /dev/null +++ b/scripts/docker/dev/start-node-app.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# Script to start a node application +set -eu + +# The path to the app's directory containing node_modules +app_dir=$1 + +cd "$app_dir" +npm install +npm start
OpenEnergyPlatform__oeplatform-1353
Version in CITATION.cff is out of date ## Description of the issue We have introduced the citation.cff file, which also contains a version. This should be updated every time a release is made. It would be great if the version could be imported automatically from the VERSION file so we don't have to maintain multiple version identifiers. ## Ideas of solution - [x] add note to RELEASE_PROCEDURE.md (see #1228) - [x] auto import a version update from the VERSION file ## Context and Environment * Version used: * Operating system: * Environment setup and (python) version: ## Workflow checklist - [x] I am aware of the workflow in [CONTRIBUTING.md](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/CONTRIBUTING.md)
[ { "content": "", "path": "oeplatform/__init__.py" } ]
[ { "content": "__version__ = \"0.14.1\"\n", "path": "oeplatform/__init__.py" } ]
diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 000000000..6e9ce9e69 --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,8 @@ +[bumpversion] +current_version = 0.14.1 + +[bumpversion:file:VERSION] + +[bumpversion:file:CITATION.cff] + +[bumpversion:file:oeplatform/__init__.py] diff --git a/CITATION.cff b/CITATION.cff index efb76d24f..396a810ab 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -28,7 +28,7 @@ authors: title: "Open Energy Family - Open Energy Platform (OEP)" type: software license: AGPL-3.0-or-later -version: 0.11.1 -doi: +version: 0.14.1 +doi: date-released: 2022-12-12 -url: "https://github.com/OpenEnergyPlatform/oeplatform/" \ No newline at end of file +url: "https://github.com/OpenEnergyPlatform/oeplatform/" diff --git a/RELEASE_PROCEDURE.md b/RELEASE_PROCEDURE.md index 98d1c117f..965493a20 100644 --- a/RELEASE_PROCEDURE.md +++ b/RELEASE_PROCEDURE.md @@ -39,7 +39,7 @@ Before see How to [Contribute](https://github.com/OpenEnergyPlatform/oeplatform/ 1. Update the oeplatform/versions/changelogs/ [`current.md`](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/versions/changelogs/current.md) (see the examples of previous releases) - Change filename to release version (x_x_x.md) - Copy template to `current.md` - - Update `VERSION` with lastest version number + - Update version with `bumpversion --allow-dirty [minor|patch]` 1. Deploy release branch on TOEP. - Test the changes - Create a hotfix and merge changes into the release branch diff --git a/docs_requirements.txt b/docs_requirements.txt index 88bee4ce2..21a229edf 100644 --- a/docs_requirements.txt +++ b/docs_requirements.txt @@ -1,4 +1,6 @@ -mkdocs-material # Raises error in py3.6?? ~=9.1 -mkdocstrings # Raises error in py3.6?? ~=0.22 -mkdocstrings-python -mike \ No newline at end of file +# does not run in (productive py3.6) but that's ok +# because doc build should only runs on newer python on github +mkdocs-material~=9.1 +mkdocstrings~=0.22 +mkdocstrings-python +mike diff --git a/oeplatform/__init__.py b/oeplatform/__init__.py index e69de29bb..f075dd36a 100644 --- a/oeplatform/__init__.py +++ b/oeplatform/__init__.py @@ -0,0 +1 @@ +__version__ = "0.14.1" diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 000000000..819ceb244 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,6 @@ +-r requirements.txt +flake8 +black +isort +pre-commit +bumpversion
kartoza__prj.app-866
Only one organisation can be created per account It seems that only one organisation can be created from a login account. The folks at Camptocamp have two separate organisations (companies) and are unable to create the second organisation from their login.
[ { "content": "", "path": "django_project/core/settings/__init__.py" } ]
[ { "content": "# coding=utf-8\n", "path": "django_project/core/settings/__init__.py" } ]
diff --git a/django_project/base/tests/__init__.py b/django_project/base/tests/__init__.py index b979ed160..22d4b6542 100644 --- a/django_project/base/tests/__init__.py +++ b/django_project/base/tests/__init__.py @@ -1 +1,3 @@ +# coding=utf-8 +"""Test for base App.""" __author__ = 'timlinux' diff --git a/django_project/base/tests/test_views.py b/django_project/base/tests/test_views.py index 0f8fd0e6d..b19b2b1b6 100644 --- a/django_project/base/tests/test_views.py +++ b/django_project/base/tests/test_views.py @@ -218,3 +218,203 @@ def test_GithubRepoView_with_login(self): 'github/populate-github.html' ] self.assertEqual(response.template_name, expected_templates) + + +class TestOrganisationCreate(TestCase): + """Test organisation creation.""" + @override_settings(VALID_DOMAIN=['testserver', ]) + def setUp(self): + """Setting up before each test.""" + self.client = Client() + self.client.post( + '/set_language/', data = {'language': 'en'}) + logging.disable(logging.CRITICAL) + self.user = UserF.create(**{ + 'username': 'sonlinux', + 'is_staff': True, + }) + + self.user.set_password('password') + self.user.save() + + # lets set up a testing project to create organisations from. + self.test_project = ProjectF.create() + self.unapproved_project = ProjectF.create(approved=False) + self.test_organisation = OrganisationF.create() + + @override_settings(VALID_DOMAIN=['testserver', ]) + def test_oroganisation_create_with_login(self): + """ + Test that a single logged in user can create multiple organisations. + """ + client = Client() + loged_in = client.login(username='sonlinux', password='password') + + # Test client log in. + self.assertTrue(loged_in) + + expected_templates = [ + 'organisation/create.html' + ] + response = client.post(reverse('create-organisation')) + self.assertEqual(response.status_code, 200) + + # Test if get the correct template view after creation. + self.assertEqual(response.template_name, expected_templates) + + @override_settings(VALID_DOMAIN = ['testserver', ]) + def test_multiple_organisation_create_with_single_login(self): + """ + Test that a single logged in user can create multiple + organisations. + """ + client = Client() + loged_in = client.login(username='sonlinux', password='password') + + # Test that user is actually loged in. + self.assertTrue(loged_in) + post_data_list = [ + 'Test organisation creation', + 'Test organisation creation two', + 'Test organisation creation three'] + + for post_data in post_data_list: + response = client.post(reverse('create-organisation'), + {'name': post_data}) + self.assertEqual(response.status_code, 302) + + @override_settings(VALID_DOMAIN = ['testserver', ]) + def test_organisation_create_with_no_login(self): + """Test that no non-authenticated user can create an organisation.""" + client = Client() + post_data = { + 'name': u'A new test organisation', + } + # response = client.post( reverse( 'account_login') , post_data ) + response = client.post(reverse('create-organisation'), post_data) + self.assertEqual(response.status_code, 302) + + +class TestOrganisationCreateWithSuperuserPermissions(TestCase): + """Test organisation creation with a superuser login.""" + @override_settings(VALID_DOMAIN=['testserver', ]) + def setUp(self): + """Setting up before each test.""" + self.client = Client() + self.client.post( + '/set_language/', data = {'language': 'en'}) + logging.disable(logging.CRITICAL) + self.user = UserF.create(**{ + 'username': 'sonlinux', + 'is_superuser': True, + }) + + self.user.set_password('password') + self.user.save() + + # lets set up a testing project to create organisations from. + self.test_project = ProjectF.create() + self.unapproved_project = ProjectF.create(approved=False) + self.test_organisation = OrganisationF.create() + + @override_settings(VALID_DOMAIN=['testserver', ]) + def test_oroganisation_create_with_superuser(self): + """ + Test that a superuser login can create multiple organisations. + """ + client = Client() + loged_in = client.login(username='sonlinux', password='password') + + # Test client log in. + self.assertTrue(loged_in) + + expected_templates = [ + 'organisation/create.html' + ] + response = client.post(reverse('create-organisation')) + self.assertEqual(response.status_code, 200) + + # Test if get the correct template view after creation. + self.assertEqual(response.template_name, expected_templates) + + @override_settings(VALID_DOMAIN = ['testserver', ]) + def test_multiple_organisation_create_with_superuser(self): + """ + Test that a superuser login can create multiple organisations. + """ + client = Client() + loged_in = client.login(username='sonlinux', password='password') + + # Test that user is actually loged in. + self.assertTrue(loged_in) + post_data_list = [ + 'Test organisation creation', + 'Test organisation creation two', + 'Test organisation creation three'] + + for post_data in post_data_list: + response = client.post(reverse('create-organisation'), + {'name': post_data}) + self.assertEqual(response.status_code, 302) + + +class TestOrganisationCreateWithNoneStaffPermissions(TestCase): + """Test organisation creation with a none staff user.""" + @override_settings(VALID_DOMAIN=['testserver', ]) + def setUp(self): + """Setting up before each test.""" + self.client = Client() + self.client.post( + '/set_language/', data = {'language': 'en'}) + logging.disable(logging.CRITICAL) + self.user = UserF.create(**{ + 'username': 'sonlinux', + 'is_staff': False, + }) + + self.user.set_password('password') + self.user.save() + + # lets set up a testing project to create organisations from. + self.test_project = ProjectF.create() + self.unapproved_project = ProjectF.create(approved=False) + self.test_organisation = OrganisationF.create() + + @override_settings(VALID_DOMAIN=['testserver', ]) + def test_oroganisation_create_with_none_staff_login(self): + """Test that a none staff user can create an organisations.""" + + client = Client() + loged_in = client.login(username='sonlinux', password='password') + + # Test client log in. + self.assertTrue(loged_in) + + expected_templates = [ + 'organisation/create.html' + ] + response = client.post(reverse('create-organisation')) + self.assertEqual(response.status_code, 200) + + # Test if get the correct template view after creation. + self.assertEqual(response.template_name, expected_templates) + + @override_settings(VALID_DOMAIN = ['testserver', ]) + def test_multiple_organisation_create_with_none_staff_user(self): + """ + Test that a none staff user can create multiple organisations. + """ + client = Client() + loged_in = client.login(username='sonlinux', password='password') + + # Test that user is actually loged in. + self.assertTrue(loged_in) + post_data_list = [ + 'Test organisation creation', + 'Test organisation creation two', + 'Test organisation creation three'] + + for post_data in post_data_list: + response = client.post(reverse('create-organisation'), + {'name': post_data}) + self.assertEqual(response.status_code, 302) diff --git a/django_project/core/settings/__init__.py b/django_project/core/settings/__init__.py index e69de29bb..9bad5790a 100644 --- a/django_project/core/settings/__init__.py +++ b/django_project/core/settings/__init__.py @@ -0,0 +1 @@ +# coding=utf-8
CTFd__CTFd-598
Docker startup getting stuck on mysqladmin ping **Environment**: - CTFd Version/Commit: ctfd/ctfd:latest from Docker hub (17 days old) - Operating System: Amazon Linux AMI 2017.09.j x86_64 ECS HVM GP2 - Web Browser and Version: N/A **What happened?** Trying to setup CTFd with AWS ECS and RDS Aurora. If I don't set the DATABASE_URL env variable, it works fine and starts. If I do set the DATABASE_URL to mysql+pymysql://ctfd:<MYPASSWORD>@ctfd.<resource-id>i.eu-west-1.rds.amazonaws.com/ctfd I get stuck on docker-entrypoint.sh:7 `while ! mysqladmin ping -h db --silent; do` **What did you expect to happen?** That the ping should succeed and startup continue **How to reproduce your issue** Create an ECS task with ctfd/ctfd as image source, set env variable SECRET_KEY and DATABASE_URL. Start container. I have made sure the container can access the database by running `docker exec container-id mysql -h ctfd.<resource-id>.eu-west-1.rds.amazonaws.com -p<SECRET PASSWORD>` which works. **Any associated stack traces or error logs** Just stuck on "Waiting on MySQL" My question is basically: am I doing something wrong and should somehow make that "db" resolve to the database or is the script incorrect and should take the value of DATABASE_URL into account?
[ { "content": "from CTFd import create_app\n\napp = create_app()\n", "path": "wsgi.py" } ]
[ { "content": null, "path": "wsgi.py" } ]
diff --git a/ctfd.ini b/ctfd.ini deleted file mode 100644 index 951cae856..000000000 --- a/ctfd.ini +++ /dev/null @@ -1,46 +0,0 @@ -# UWSGI Configuration File -# Install uwsgi (sudo apt-get install uwsgi), copy this file to -# /etc/uwsgi/apps-available and then link it in /etc/uwsgi/apps-enabled -# Only two lines below (commented) need to be changed for your config. -# Then, you can use something like the following in your nginx config: -# -# # SERVER_ROOT is not / (e.g. /ctf) -# location = /ctf { rewrite ^ /ctf/; } -# location /ctf { -# include uwsgi_params; -# uwsgi_pass unix:/run/uwsgi/app/ctfd/socket; -# } -# -# # SERVER_ROOT is / -# location / { -# include uwsgi_params; -# wsgi_pass unix:/run/uwsgi/app/ctfd/socket; -# } -[uwsgi] -# Where you've put CTFD -chdir = /var/www/ctfd/ -# If SCRIPT_ROOT is not / -#mount = /ctf=wsgi.py -# SCRIPT_ROOT is / -mount = /=wsgi.py - -# You shouldn't need to change anything past here -plugin = python -module = wsgi - -master = true -processes = 1 -threads = 1 - -vacuum = true - -manage-script-name = true -wsgi-file = wsgi.py -callable = app - -die-on-term = true - -# If you're not on debian/ubuntu, replace with uid/gid of web user -uid = www-data -gid = www-data - diff --git a/docker-compose.yml b/docker-compose.yml index 4fffb767d..9f36b6de8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -10,6 +10,8 @@ services: - UPLOAD_FOLDER=/var/uploads - LOG_FOLDER=/var/log/CTFd - DATABASE_URL=mysql+pymysql://root:ctfd@db/ctfd + - REDIS_URL=redis://cache:6379 + - WORKERS=4 volumes: - .data/CTFd/logs:/var/log/CTFd - .data/CTFd/uploads:/var/uploads @@ -32,7 +34,15 @@ services: networks: internal: # This command is required to set important mariadb defaults - command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --wait_timeout=28800] + command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --wait_timeout=28800, --log-warnings=0] + + cache: + image: redis:4 + restart: always + volumes: + - .data/redis:/data + networks: + internal: networks: default: diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index 4fb8bd919..8cc99f88e 100755 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -1,23 +1,35 @@ #!/bin/sh +# Check that a .ctfd_secret_key file or SECRET_KEY envvar is set +if [ ! -f .ctfd_secret_key ] && [ -z "$SECRET_KEY" ]; then + if [ $WORKERS -gt 1 ]; then + echo "[ ERROR ] You are configured to use more than 1 worker." + echo "[ ERROR ] To do this, you must define the SECRET_KEY environment variable or create a .ctfd_secret_key file." + echo "[ ERROR ] Exiting..." + exit 1 + fi +fi + +# Check that the database is available if [ -n "$DATABASE_URL" ] then - # https://stackoverflow.com/a/29793382 - echo "Waiting on MySQL" - while ! mysqladmin ping -h db --silent; do + database=`echo $DATABASE_URL | awk -F[@//] '{print $4}'` + echo "Waiting for $database to be ready" + while ! mysqladmin ping -h $database --silent; do # Show some progress echo -n '.'; sleep 1; done - echo "Ready" + echo "$database is ready" # Give it another second. sleep 1; fi +# Start CTFd echo "Starting CTFd" gunicorn 'CTFd:create_app()' \ --bind '0.0.0.0:8000' \ - --workers 1 \ + --workers $WORKERS \ --worker-class 'gevent' \ --access-logfile "${LOG_FOLDER:-/opt/CTFd/CTFd/logs}/access.log" \ --error-logfile "${LOG_FOLDER:-/opt/CTFd/CTFd/logs}/error.log" diff --git a/wsgi.py b/wsgi.py deleted file mode 100644 index 67b0172f5..000000000 --- a/wsgi.py +++ /dev/null @@ -1,3 +0,0 @@ -from CTFd import create_app - -app = create_app()
plotly__dash-2553
[BUG] Flask 2.2.3 dependency has HIGH security vulnerability (fixed in 2.2.5) Issue #2538 pinned the upper bound of the Flask dependency to 2.2.3. However Flask 2.2.3 is affected by a HIGH security vulnerability that is fixed in Flask 2.2.5. See https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-30861 Debian 11, Python 3.11 (from Python official 3.11 Docker image) ``` # pip install dash Collecting dash Downloading dash-2.10.1-py3-none-any.whl (10.3 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 10.3/10.3 MB 14.1 MB/s eta 0:00:00 Collecting Flask<=2.2.3,>=1.0.4 (from dash) Downloading Flask-2.2.3-py3-none-any.whl (101 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 101.8/101.8 kB 17.0 MB/s eta 0:00:00 ``` ``` dash 2.10.1 dash-core-components 2.0.0 dash-html-components 2.0.0 dash-table 5.0.0 ``` **Describe the bug** Dash installs a vulnerable version of Flask and dependency scans flag the vulnerability. **Expected behavior** No known and fixed security vulnerabilities added. Perhaps Pin to 2.2.* instead of specific 2.2.3 version where future pins will find new security issues.
[ { "content": "__version__ = \"2.10.1\"\n", "path": "dash/version.py" } ]
[ { "content": "__version__ = \"2.10.2\"\n", "path": "dash/version.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 90844ed6ca..54c5b0714e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,12 @@ All notable changes to `dash` will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/). +## [2.10.2] - 2023-05-31 + +## Changed + +- Set Flask and Werkzeug version upper bound to `<2.3`. + ## [2.10.1] - 2023-05-30 ## Fixed diff --git a/dash/version.py b/dash/version.py index 565443f86f..6c96c9755a 100644 --- a/dash/version.py +++ b/dash/version.py @@ -1 +1 @@ -__version__ = "2.10.1" +__version__ = "2.10.2" diff --git a/requires-install.txt b/requires-install.txt index d7049d8735..8730b6d6ea 100644 --- a/requires-install.txt +++ b/requires-install.txt @@ -1,5 +1,5 @@ -Flask>=1.0.4,<=2.2.3 -Werkzeug<=2.2.3 +Flask>=1.0.4,<2.3.0 +Werkzeug<2.3.0 plotly>=5.0.0 dash_html_components==2.0.0 dash_core_components==2.0.0
horovod__horovod-1139
Replace .step(synchronize=False) with optimizer.skip_synchronize() NVIDIA AMP does not support passing additional flags to `optimizer.step()`, such as `optimizer.step(synchronize=False)`. This PR switches API to use context manager: ```python optimizer.synchronize() with optimizer.skip_synchronize(): optimizer.step() ```
[ { "content": "__version__ = '0.16.3'\n", "path": "horovod/__init__.py" } ]
[ { "content": "__version__ = '0.16.4'\n", "path": "horovod/__init__.py" } ]
diff --git a/horovod/__init__.py b/horovod/__init__.py index 66e314a006..538eb5d865 100644 --- a/horovod/__init__.py +++ b/horovod/__init__.py @@ -1 +1 @@ -__version__ = '0.16.3' +__version__ = '0.16.4'
freedomofpress__securedrop-1117
Update kernel module blacklist During an installation last week, we encountered an issue with the kernel module blacklist. The install was using the new generation of Intel NUCs ([NUC5i5RYK](http://www.amazon.com/dp/B00SD9ISIQ) and [NUC5i5RYH](http://www.amazon.com/dp/B00SD9IS1S/)). Unlike the previous generation of NUCs, which did not include wireless networking hardware by default, the new generation includes wireless networking hardware for Wifi and Bluetooth on the motherboard. This means that Ubuntu running on the servers not only loaded the high-level kernel modules for wifi and bluetooth support (`iwlwifi` and `bluetooth`), it also loaded modules necessary for support on the specific (included) hardware: `iwlmvm` and `btusb`. When the `remove kernel modules` Ansible role ran, it failed with an error because it could not remove the top-level modules without removing their dependencies first. A quickfix to get this working on the new hardware was to change `disabled_kernel_modules` in `group_vars/securedrop.yml` from: ``` yml disabled_kernel_modules: - bluetooth - iwlwifi ``` to: ``` yml disabled_kernel_modules: - btusb - bluetooth - iwlmvm - iwlwifi ``` The order of the modules is important! We need to make sure the the dependencies are removed prior to the target modules that depend on them. This list is also likely specific to the new generation of Intel NUCs. If we want to support a wider variety of hardware, we may want to try being smart about removing kernel modules and their dependencies, e.g. something akin to this technique from [Stack Exchange](https://askubuntu.com/questions/317230/how-can-i-temporarily-disable-a-kernel-module). Finally, we need to make sure this updated module blacklist still works on the old hardware as well.
[ { "content": "__version__ = '0.3.4'\n", "path": "securedrop/version.py" } ]
[ { "content": "__version__ = '0.3.5'\n", "path": "securedrop/version.py" } ]
diff --git a/changelog.md b/changelog.md index 067f5d8e02..cbfd4c1003 100644 --- a/changelog.md +++ b/changelog.md @@ -1,5 +1,16 @@ # Changelog +## 0.3.5 + +The issues for this release were tracked with the 0.3.5 milestone on Github: https://github.com/freedomofpress/securedrop/milestones/0.3.5 + +* Use certificate verification instead of fingerprint verification by default for the OSSEC Postfix configuration (#1076) +* Fix apache2 service failing to start on Digital Ocean (#1078) +* Allow Apache to rotate its logs (#1074) +* Prevent reboots during cron-apt upgrade (#1071) +* Update documentation (#1107, #1112, #1113) +* Blacklist additional kernel modules used for wireless networking (#1116) + ## 0.3.4 The issues for this release were tracked with the 0.3.4 milestone on Github: https://github.com/freedomofpress/securedrop/milestones/0.3.4 diff --git a/docs/hardware.md b/docs/hardware.md index 54b205225b..d024d25d4e 100644 --- a/docs/hardware.md +++ b/docs/hardware.md @@ -1,104 +1,177 @@ # Hardware for SecureDrop -This document outlines requirements and recommended hardware for use with SecureDrop. If you have any questions, please contact [email protected]. +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> +**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* -## SecureDrop Infrastructure +- [Overview](#overview) + - [Required Hardware](#required-hardware) + - [Servers](#servers) + - [A note about virtualization](#a-note-about-virtualization) + - [Workstations](#workstations) + - [A note about recycled hardware](#a-note-about-recycled-hardware) + - [Optional Hardware](#optional-hardware) +- [Specific Hardware Recommendations](#specific-hardware-recommendations) + - [Application/Monitor Servers](#applicationmonitor-servers) + - [Potential BIOS issue](#potential-bios-issue) + - [Secure Viewing Station (SVS)](#secure-viewing-station-svs) + - [A note about Hi-DPI displays](#a-note-about-hi-dpi-displays) + - [Tails USBs](#tails-usbs) + - [Transfer Device](#transfer-device) + - [Network Firewall](#network-firewall) + - [Network Switch](#network-switch) + - [Printers](#printers) + - [Monitor, Keyboard, Mouse](#monitor-keyboard-mouse) -The SecureDrop infrastructure consists of different components, all of which are listed below. We recommend you read through the whole document and figure out what makes the most sense for your organization. +<!-- END doctoc generated TOC please keep comment here to allow auto update --> -### App Server and Monitor Server +This document outlines the required hardware components necessary to successfully install and operate a SecureDrop instance, and recommends some specific components that we have found to work well. If you have any questions, please email [email protected]. -The *Application Server* (or *App Server* for short) runs the SecureDrop application. This server hosts both the website that sources access (*Source Interface*) and the website that journalists access (*Document Interface*). The *Monitor Server* keeps track of the *App Server* and sends out email alerts if something seems wrong. SecureDrop requires that you have: +## Overview - * 1 x physical server for the *Application Server*, which will run the SecureDrop application. - * 1 x physical server for the *Monitor Server*, which sends emails about issues with the *App Server*. +### Required Hardware -The SecureDrop application requires a 64-bit operating system. You can repurpose old hardware if it is capable of running 64-bit Ubuntu. Otherwise, we recommend you get two [Intel NUCs](http://www.amazon.com/dp/B00F3F38O2/) with power cords. Make sure you also get the cables required to connect the NUCs to a monitor. Additionally, you will need to get the following [two 240 GB hard drives](http://www.amazon.com/dp/B00BQ8RKT4/) and a [4 GB (2GBx2) memory kit](http://www.amazon.com/Crucial-PC3-12800-204-Pin-Notebook-CT2CP25664BF160B/dp/B005MWQ6WC/). +#### Servers -#### Potential BIOS issue +These are the core components of a SecureDrop instance. -The previous release of SecureDrop (0.2.1) was based on Ubuntu 12.04.1 (precise). We encountered issues installing this version of SecureDrop on some types of Intel NUCs. The problem manifested after installing Ubuntu on the NUC. The installation would complete, but rebooting after installation would not succeed. +- **Application Server**: 1 physical server to run the SecureDrop web services. +- **Monitor Server**: 1 physical server which monitors activity on the *Application Server* and sends email notifications to an administrator. +- **Network Firewall**: 1 physical computer that is used as a dedicated firewall for the SecureDrop servers. -We have not encountered this or any similar problems in testing the current release (0.3) with the Intel NUCs. Since 0.3 is based on Ubuntu 14.04.1 (trusty), we believe the issue has been resolved by Ubuntu. +##### A note about virtualization -If you do encounter issues booting Ubuntu on the NUCs, try [updating the BIOS according to these instructions](http://arstechnica.com/gadgets/2014/02/new-intel-nuc-bios-update-fixes-steamos-other-linux-booting-problems/). +We are often asked if it is acceptable to run SecureDrop on cloud servers (e.g. Amazon EC2, DigitalOcean, etc.) instead of on dedicated hardware. This request is generally motivated by a desire for cost savings and convenience; however, cloud servers are trivially accessible and manipulable by the provider that operates them. In the context of SecureDrop, this means that the provider could access extremely sensitive information, such as the plaintext of submissions or the encryption keys used to identify and access the Tor Hidden Services. -### Secure Viewing Station (SVS) +One of the core goals of SecureDrop is to avoid the potential compromise of sources through the compromise of third party communications providers. Therefore, we consider the use of virtualization for production instances of SecureDrop to be an unacceptable compromise and do not support it. While it is technically possible to modify SecureDrop's automated installation process to work on virtualized servers (for example, we do so to support our CI pipeline), you do so at your own risk and without our support or consent. -The *Secure Viewing Station* is a machine that is kept offline and only ever used together with the Tails operating system. This machine will be used to generate GPG keys for all journalists with access to SecureDrop, as well as decrypt and view submitted documents. Since this machine will never touch the Internet or run an operating system other than Tails, it does not need a hard drive or network device. We recommend the following: +#### Workstations - * 1 x laptop without a hard drive, network interface card or wireless units. - * 1 x encrypted, external hard drive to store documents on while working on a story. - * 1 x offline printer. +These components are necessary to do the initial installation of SecureDrop and to process submissions using the airgapped workflow. -We recommend that you either buy or repurpose an old laptop. Another option is to buy an [Intel NUC](http://www.amazon.com/dp/B00F3F38O2/) with a power cord and [4 GB of memory](http://www.amazon.com/Crucial-PC3-12800-204-Pin-Notebook-CT2CP25664BF160B/dp/B005MWQ6WC/), but note that you will also need to get a monitor and a wired keyboard and mouse. +- **Secure Viewing Station (SVS)**: 1 physical computer used as an airgap to decrypt and view submissions retrieved from the **Application Server**. + - The chosen hardware should be solely used for this purpose and should have any wireless networking hardware removed before use. +- **Admin/Journalist Workstation(s)**: *At least 1* physical computer that is used as a workstation for SecureDrop admins and/or journalists. + - Each Admin and Journalist will have their own bootable Tails USB with an encrypted persistent partition that they will use to access SecureDrop. You will need at least one *workstation* to boot the Tails USBs, and may need more depending on: the number of admins/journalists you wish to grant access to SecureDrop, whether they can share the same workstation due to availability requirements, geographic distribution, etc. +- **USB drive(s)**: *At least 2* USB drives to use as a bootable Tails USB for the **SVS** and the **Admin Tails**/**Journalist Tails**. + - If only one person is maintaining the system, you may use the same Tails instance as both the Admin Tails and the Journalist Tails; otherwise, we recommend buying 1 drive for each admin and each journalist. + - We also recommend buying two additional USBs to use as bootable backups of the **SVS** and **Admin Tails**. +- **Two-factor authenticator**: Two-factor authentication is used when connecting to different parts of the SecureDrop system. Each admin and each journalist needs a two-factor authenticator. We currently support two options for two-factor authentication: + - Your existing smartphone with an app that computes TOTP codes (e.g. [Google Authenticator][]) + - A dedicated hardware dongle that computes HOTP codes (e.g. a [YubiKey][]). +- **Transfer Device(s)**: You need a mechanism to transfer encrypted submissions from the **Journalist Workstation** to the **SVS** to decrypt and view them. The most common transfer devices are DVD/CD-R discs and USB drives. + - From a security perspective, it is preferable to use write-once media such as DVD/CD-R discs because it eliminates the risk of exfiltration by malware that persists on the Transfer Device (e.g. [BadUSB][]). + - On the other hand, using write-once media to transfer data is typically inconvenient and time-consuming. You should consider your threat model and choose your transfer device accordingly. +- **Monitor, Keyboard, Mouse**: You will need these to do the initial installation of Ubuntu on the Application and Monitor servers. + - Depending on your setup, you may also need these to work on the **SVS**. +>>>>>>> release/0.3.5 -#### Printers +[BadUSB]: https://srlabs.de/badusb/ +[Google Authenticator]: https://support.google.com/accounts/answer/1066447?hl=en +[YubiKey]: https://www.yubico.com/products/yubikey-hardware/yubikey/ -Careful consideration should be given to the printer used with the SVS. Most printers today have wireless functionality (WiFi or Bluetooth connectivity) which should be **avoided** because it could be used to compromise the airgap. +#### A note about recycled hardware -Unfortunately, it is difficult to find printers that work with Tails, and it is increasingly difficult to find non-wireless printers at all. To assist you, we have compiled the following partial list of airgap-safe printers that have been tested and are known to work with Tails: +If you cannot afford to purchase new hardware for your SecureDrop instance, we encourage you to consider re-purposing existing hardware to use with SecureDrop. If you are comfortable working with hardware, this is a great way to set up a SecureDrop instance for cheap. -| Model | Testing Date | Tails Versions | Price (new) | Price (used) | Notes | -|---------------------------|--------------|----------------|------------------|------------------|------------| -| HP LaserJet 400 M401n | 06/2015 | 1.4 | $178.60 (Amazon) | $115.00 (Amazon) | Monochrome laser printer. Heavy (10 lbs.) When adding the printer in Tails, you need to set "Make and model" to "HP LaserJet 400 CUPS+Gutenprint v5.2.9". | -| HP Deskjet 6940 | 04/2015 | 1.3.2 | $639.99 (Amazon) | $196.99 (Amazon) | Monochrome Inkjet printer | +Since SecureDrop's throughput is significantly limited by the use of Tor for all connections, there is no need to use top of the line hardware for any of the servers or the firewall. In our experience, relatively recent recycled Dell desktops or servers are adequate for the SecureDrop servers, and recycled Thinkpad laptops work well for the Admin/Journalist workstations. -If you know of another model of printer that fits our requirements and works with Tails, please submit a pull request to add it to this list. +If you choose to use recycled hardware, you should of course consider whether or not it is trustworthy; making that determination is outside the scope of this document. -### Tails and Transfer Devices +### Optional Hardware -The *Transfer Device* is the physical media used to transfer encrypted documents from the *Journalist Workstation* to the *Secure Viewing Station*. Additional devices are needed to run Tails. We recommend the following: +This hardware is not *required* to run a SecureDrop instance, but most of it is still recommended. - * 1 x physical media for the system administrator to run Tails on. - * 1 x physical media for the system administrator to transfer files with. - * 1 x physical media for the journalist to run Tails on. - * 1 x physical media for the journalist to transfer files with. - * 1 x physical media with Tails for the *Secure Viewing Station*. - * 1 x physical media with Tails for the *Secure Viewing Station* (backup). +- **Offline Printer**: It is often useful to print submissions from the **SVS** for review and annotation. + - To maintain the integrity of the airgap, this printer should be dedicated to use with the SVS, connected via a wired connection, and should not have any wireless communication capabilities. +- **Offline Storage**: The **SVS** is booted from a Tails USB drive, which has an encrypted persistent volume but typically has a fairly limited storage capacity since it's just a USB drive. For installations that expect to receive a large volume of submissions, we recommend buying an external hard drive that can be encrypted and used to store submissions that have been been transferred from the **Application Server** to the **SVS**. +- **Backup storage**: It's useful to run periodic backups of the servers in case of failure. We recommend buying an external hard drive that can be encrypted and used to store server backups. + - Since this drive will be connected to the **Admin Workstation** to perform backups, it should *not* be the same drive used for **Offline Storage**. +- **Network Switch**: If your firewall has fewer than **four** NIC's, you will need an additional Ethernet switch to perform installation and maintenance tasks with the Admin Workstation. This switch is generally useful because it allows you to connect the **Admin Workstation** to your firewall's LAN port without taking down either of the SecureDrop servers. -We recommend getting [16 GB Leef Supra](http://www.amazon.com/dp/B00FWQTBZ2/) USB sticks to run Tails on, and [4 GB Patriot](http://www.amazon.com/Swivel-Flash-Drive-Memory-Stick/dp/B00M1GYD90/) USB sticks to use when transferring files. Each journalist should have two USB sticks. For the Secure Viewing Station and backup, we recommend getting [Corsair 64 GB](http://www.amazon.com/dp/B00EM71W1S/) USB sticks. +## Specific Hardware Recommendations -Another alternative setup exists in which journalists do not transfer files on a USB stick, but instead use a CD-R or DVD-R. The encrypted documents are copied to the CD-R or DVD-R, then decrypted and read on the Secure Viewing Station. The disks are destroyed after first use. We recommend getting a [Samsung model burner](http://www.newegg.com/External-CD-DVD-Blu-Ray-Drives/SubCategory/ID-420) for this purpose. +### Application/Monitor Servers -### Two-factor authentication device +The Intel NUC (Next Unit of Computing) is a capable, cheap, quiet, and low-powered device that can be used for the SecureDrop servers. There are a [variety of models][Intel NUCs] to choose from. We recommend the [D54250WYK][] because it has a mid-range CPU (Intel i5), the common Mini DisplayPort connector for the monitor, and USB 3.0 ports for faster OS installation and data transfer. Conveniently (for the paranoid), it supports wireless networking (Wifi and Bluetooth) through *optional* expansion cards not included by default - which means you don't have to spend time ripping out the wireless hardware before beginning the installation. -Two-factor authentication is used when connecting to different parts of the SecureDrop system, including the *Document Interface*. We recommend the following for each administrator or journalist with access to the system: +[Intel NUCs]: https://www-ssl.intel.com/content/www/us/en/nuc/products-overview.html +[D54250WYK]: https://www-ssl.intel.com/content/www/us/en/nuc/nuc-kit-d54250wyk.html - * 1 x two-factor authentication device +If you purchase the NUC from [Amazon](http://www.amazon.com/Intel-D54250WYK-DisplayPort-Graphics-i5-4250U/dp/B00F3F38O2/), make sure you click "With Powercord" to have one included in the package. -We recommend using either a smartphone capable of running [Google Authenticator](https://support.google.com/accounts/answer/1066447?hl=en) or a [YubiKey](https://www.yubico.com/products/yubikey-hardware/yubikey/). +Note that the NUCs come as kits and some assembly is required. You will need to purchase the RAM and hard drive separately for each NUC and insert the cards into the NUC before it can be used. We recommend: -### Network firewall +- 2 [240 GB SSDs](http://www.amazon.com/dp/B00BQ8RKT4/) +- A [4 GB (4GBx2) memory kit](http://www.amazon.com/Crucial-PC3-12800-204-Pin-Notebook-CT2CP25664BF160B/dp/B005MWQ6WC/) + - You can put one 4GB memory stick in each of the servers. -An important part of SecureDrop's security model involves segmenting the infrastructure from the Internet and/or the corporate environment. For this reason, we recommend that you get: +*Warning:* The D54250WYK has recently been [EOL'ed by Intel](http://ark.intel.com/products/series/70407/Intel-NUC-Boards-and-Kits). Availability and prices may be subject to change. We are working on analyzing alternative recommendations, but there are no immediately obvious alternatives that share the benefits of the D54250WYK (primarily, the lack of integrated wireless networking hardware). - * 1 x firewall with pfSense and minimum three NICs. +#### Potential BIOS issue -We recommend getting a Netgate firewall with pfSense pre-installed, and you can choose from a firewall with [2 GB of system memory](http://store.netgate.com/NetgateAPU2.aspx) or one with [4 GB of system memory](http://store.netgate.com/APU4.aspx). +An earlier release of SecureDrop (0.2.1) was based on Ubuntu 12.04.1 (precise). We encountered issues installing this version of Ubuntu on some types of Intel NUCs. The problem manifested after installing Ubuntu on the NUC. The installation would complete, but rebooting after installation would not succeed. -### Network Switch +We have not encountered this or any similar problems in testing the current release series (0.3.x) with the Intel NUCs. Since 0.3 is based on Ubuntu 14.04.1 (trusty), we believe the issue has been resolved in the newer release of Ubuntu. + +If you do encounter issues booting Ubuntu on the NUCs, try updating the BIOS according to [these instructions](http://arstechnica.com/gadgets/2014/02/new-intel-nuc-bios-update-fixes-steamos-other-linux-booting-problems/). + +### Secure Viewing Station (SVS) + +The *Secure Viewing Station* is a machine that is kept offline and only ever used together with the Tails operating system. This machine will be used to generate the GPG keys used by SecureDrop to encrypt submissions, as well as decrypt and view submissions. Since this machine will never touch the Internet or run an operating system other than Tails, it does not need a hard drive or network device; in fact, we recommend removing these components if they are already present. + +One option is to buy a Linux-compatible laptop such as a [Lenovo Thinkpad](http://shop.lenovo.com/us/en/laptops/thinkpad/t-series/t540p/). You can also repurpose an old laptop if you have one available. + +Another option is to buy an [Intel NUC D54250WYK](http://www.amazon.com/Intel-D54250WYK-DisplayPort-Graphics-i5-4250U/dp/B00F3F38O2/) (same model as the servers) with a power cord and [4 GB of memory](http://www.amazon.com/Crucial-PC3-12800-204-Pin-Notebook-CT2CP25664BF160B/dp/B005MWQ6WC/), but note that you will also need to get a monitor and a wired keyboard and mouse. It does not come with a hard drive or wireless networking hardware by default, so you will not need to remove these components before using it. However, we do recommend taping over the IR receiver with some opaque masking tape. + +Note that if you do want to use a NUC for the SVS, you *should not* use any of the new generation of NUCs, which have names starting with "NUC5" (e.g. [NUC5i5RYK](https://www-ssl.intel.com/content/www/us/en/nuc/nuc-kit-nuc5i5ryk.html).. These NUCs have wireless networking built into the motherboard, and it is impossible to physically remove. + +#### A note about Hi-DPI displays -If you firewall has fewer than **four** NICs, you will need an additional Ethernet switch to perform installation and maintenance tasks with the Admin Workstation. This switch is generally useful because it allows you to connect to your firewall's LAN port without taking down either of the SecureDrop servers, which is useful if you want to perform maintenance tasks from the Admin Workstation on the SecureDrop installation or the firewall configuration. This is possible without the switch if your firewall has enough ports, but you will need to perform some additional initial firewall setup to get this to work. +The current version of Tails (1.5.1) is based on Debian 7 ("Wheezy"), which does not have good support for Hi-DPI displays. Examples of laptops that use this type of display are MacBook/MacBook Pros with the Retina display, or the Dell Precision M3800. We *do not recommend* using such laptops with any of the components that run Tails (the SVS, Admin Workstation, and Journalist Workstation). While it is possible to use them, the screen resolution will not be scaled correctly. Typically, this means everything will be really tiny, bordering on unreadable. -We recommend getting a [5-port Netgear ProSafe Ethernet Switch](http://www.amazon.com/NETGEAR-ProSafe-Gigabit-Ethernet-Desktop/dp/B0000BVYT3/) or similar. +Until the upcoming version of Tails (2.x, based on Debian 8) comes out, use standard resolution displays with Tails. -## Appendix +### Tails USBs -### Notes on the NUCs +We *strongly recommend* getting USB 3.0-compatible drives to run Tails from. The transfer speeds are significantly faster than USB 2.0, which means a live operating system booting from one will be much faster and more responsive. -There are a variety of available NUCs, and each different model supports different hardware specs and peripheral connectors. For hardware testing, we have been using: +You will need *at least* an 8GB drive to run Tails with an encrypted persistent partition. We recommend getting something in the 16-64GB range so you can handle large amounts of submissions without hassle. Anything more than that is probably overkill. -#### D34010WYK +Other than that, the choice of USB drive depends on capacity, form factor, cost, and a host of other factors. One option that we like is the [Leef Supra](http://www.amazon.com/Leef-Supra-PrimeGrade-Memory-Silver/dp/B00FWQMKA0). -[Amazon link w/ picture](http://www.amazon.com/Intel-Computing-BOXD34010WYK1-Black-White/dp/B00H3YT886/) +### Transfer Device -We have been using one for the Secure Viewing Station (SVS), which is air-gapped and never connected to the Internet, and one for the Admin Workstation, which is Internet-connected and is used to run the Ansible playbooks. You could also use an existing workstation, or a recycled machine, for this purpose, assuming you feel confident that this machine has not been physically compromised in any way. +If you are using USBs for the transfer device, the same general recommendations for the Tails USBs also apply. One thing to consider is that you are going to have *a lot* of USB drives to keep track of, so you should consider how you will label or identify them and buy drives accordingly. Drives that are physically larger are often easier to label (e.g. with tape or a label from a labelmaker). + +If you are using DVD/CD-R's for the transfer device, you will need *two* DVD/CD writers: one for burning DVDs from the **Journalist Workstation**, and one for reading the burned DVDs on the **SVS**. We recommend using two separate drives instead of sharing the same drive to avoid the potential risk of malware exfiltrating data by compromising the drive's firmware. We've found the DVD/CD writers from Samsung and LG to work reasonably well, you can find some examples [here](http://www.newegg.com/External-CD-DVD-Blu-Ray-Drives/SubCategory/ID-420). + +Finally, you will need a stack of blank DVD/CD-R's, which you can buy anywhere. + +### Network Firewall + +We recommend the [pfSense SG-2440](http://store.pfsense.org/SG-2440/). + +### Network Switch + +This is optional, for people who are using a firewall with less than 4 ports (the recommended firewall has 4 ports). Any old switch with more than 3 ports will do, such as the [5-port Netgear ProSafe Ethernet Switch](http://www.amazon.com/NETGEAR-ProSafe-Gigabit-Ethernet-Desktop/dp/B0000BVYT3/). + +### Printers + +Careful consideration should be given to the printer used with the SVS. Most printers today have wireless functionality (WiFi or Bluetooth connectivity) which should be **avoided** because it could be used to compromise the airgap. + +Unfortunately, it is difficult to find printers that work with Tails, and it is increasingly difficult to find non-wireless printers at all. To assist you, we have compiled the following partial list of airgap-safe printers that have been tested and are known to work with Tails: + +| Model | Testing Date | Tails Versions | Price (new) | Price (used) | Notes | +|---------------------------|--------------|----------------|------------------|------------------|------------| +| HP LaserJet 400 M401n | 06/2015 | 1.4 | $178.60 (Amazon) | $115.00 (Amazon) | Monochrome laser printer. Heavy (10 lbs.) When adding the printer in Tails, you need to set "Make and model" to "HP LaserJet 400 CUPS+Gutenprint v5.2.9". | +| HP Deskjet 6940 | 04/2015 | 1.3.2 | $639.99 (Amazon) | $196.99 (Amazon) | Monochrome Inkjet printer | + +If you know of another model of printer that fits our requirements and works with Tails, please submit a pull request to add it to this list. -This machine has USB 3.0, which is nice for booting live USBs quickly and for transferring large files. It has two available display connectors: Mini-HDMI and DisplayPort. +### Monitor, Keyboard, Mouse -#### DC3217IYE +We don't have anything specific to recommend when it comes to displays. You should make sure you know what monitor cable you need for the servers, since you will need to connect them to a monitor to do the initial Ubuntu installation. -[Amazon link w/ picture](http://www.amazon.com/Intel-Computing-Gigabit-i3-3217U-DC3217IYE/dp/B0093LINVK) +You should use a wired (USB) keyboard and mouse, not wireless. -We have been using two of these for the Application and Monitor servers (app and mon). They only have USB 2.0, which is not so bad because the Linux installation using live USB is a one-time process and you rarely transfer files directly from the servers. They also only have one available display connector: HDMI. diff --git a/docs/images/firewall/admin_workstation_static_ip_configuration.png b/docs/images/firewall/admin_workstation_static_ip_configuration.png new file mode 100755 index 0000000000..f3594c2efd Binary files /dev/null and b/docs/images/firewall/admin_workstation_static_ip_configuration.png differ diff --git a/docs/images/firewall/edit_network_connection.png b/docs/images/firewall/edit_network_connection.png new file mode 100755 index 0000000000..c7cf62a55c Binary files /dev/null and b/docs/images/firewall/edit_network_connection.png differ diff --git a/docs/images/firewall/edit_wired_connection.png b/docs/images/firewall/edit_wired_connection.png deleted file mode 100755 index 5a5c81e10a..0000000000 Binary files a/docs/images/firewall/edit_wired_connection.png and /dev/null differ diff --git a/docs/images/firewall/editing_wired_connection.png b/docs/images/firewall/editing_wired_connection.png deleted file mode 100755 index 32fd143ea4..0000000000 Binary files a/docs/images/firewall/editing_wired_connection.png and /dev/null differ diff --git a/docs/images/firewall/invoke_auto_upgrade.png b/docs/images/firewall/invoke_auto_upgrade.png new file mode 100755 index 0000000000..c710092434 Binary files /dev/null and b/docs/images/firewall/invoke_auto_upgrade.png differ diff --git a/docs/images/firewall/ip_aliases_with_opt2.png b/docs/images/firewall/ip_aliases_with_opt2.png new file mode 100755 index 0000000000..84fd875220 Binary files /dev/null and b/docs/images/firewall/ip_aliases_with_opt2.png differ diff --git a/docs/images/firewall/lan_rules.png b/docs/images/firewall/lan_rules.png index 247e534276..d2e3247e34 100644 Binary files a/docs/images/firewall/lan_rules.png and b/docs/images/firewall/lan_rules.png differ diff --git a/docs/images/firewall/lan_rules_with_opt2.png b/docs/images/firewall/lan_rules_with_opt2.png new file mode 100755 index 0000000000..e65575be18 Binary files /dev/null and b/docs/images/firewall/lan_rules_with_opt2.png differ diff --git a/docs/images/firewall/opt1_rules.png b/docs/images/firewall/opt1_rules.png index 55e15d52df..fda85d09f9 100644 Binary files a/docs/images/firewall/opt1_rules.png and b/docs/images/firewall/opt1_rules.png differ diff --git a/docs/images/firewall/opt1_rules_with_opt2.png b/docs/images/firewall/opt1_rules_with_opt2.png new file mode 100644 index 0000000000..d06e8db0e8 Binary files /dev/null and b/docs/images/firewall/opt1_rules_with_opt2.png differ diff --git a/docs/images/firewall/opt2_rules.png b/docs/images/firewall/opt2_rules.png new file mode 100644 index 0000000000..d0eb98f17c Binary files /dev/null and b/docs/images/firewall/opt2_rules.png differ diff --git a/docs/images/firewall/pfsense_update_available.png b/docs/images/firewall/pfsense_update_available.png new file mode 100755 index 0000000000..d51f9048b1 Binary files /dev/null and b/docs/images/firewall/pfsense_update_available.png differ diff --git a/docs/install.md b/docs/install.md index e4013ac659..0bf0db3f05 100644 --- a/docs/install.md +++ b/docs/install.md @@ -194,8 +194,8 @@ The Freedom of the Press Foundation Master Signing Key should have a fingerprint Verify that the current release tag was signed with the master signing key. cd securedrop/ - git checkout 0.3.4 - git tag -v 0.3.4 + git checkout 0.3.5 + git tag -v 0.3.5 You should see 'Good signature from "Freedom of the Press Foundation Master Signing Key"' in the output of `git tag`. If you do not, signature verification has failed and you *should not* proceed with the installation. If this happens, please contact us at [email protected]. diff --git a/docs/network_firewall.md b/docs/network_firewall.md index 937159975c..298967b2d8 100644 --- a/docs/network_firewall.md +++ b/docs/network_firewall.md @@ -6,57 +6,93 @@ Network Firewall Setup Guide **Table of Contents** *generated with [DocToc](http://doctoc.herokuapp.com/)* - [Before you begin](#before-you-begin) + - [3 NIC configuration](#3-nic-configuration) + - [4 NIC configuration](#4-nic-configuration) - [Initial Setup](#initial-setup) - - [Assign interfaces](#assign-interfaces) - [Initial configuration](#initial-configuration) - [Connect to the pfSense WebGUI](#connect-to-the-pfsense-webgui) - [Setup Wizard](#setup-wizard) - [Connect Interfaces and Test Connectivity](#connect-interfaces-and-test-connectivity) - [SecureDrop-specific Configuration](#securedrop-specific-configuration) - - [Set up OPT1](#set-up-opt1) - [Disable DHCP on the LAN](#disable-dhcp-on-the-lan) - [Disabling DHCP](#disabling-dhcp) - [Assigning a static IP address to the Admin Workstation](#assigning-a-static-ip-address-to-the-admin-workstation) + - [Troubleshooting: DNS servers and the Unsafe Browser](#troubleshooting-dns-servers-and-the-unsafe-browser) + - [Set up OPT1](#set-up-opt1) + - [Set up OPT2](#set-up-opt2) - [Set up the network firewall rules](#set-up-the-network-firewall-rules) - [Example Screenshots](#example-screenshots) + - [3 NICs Configuration](#3-nics-configuration) + - [4 NICs Configuration](#4-nics-configuration) + - [Keeping pfSense Up to Date](#keeping-pfsense-up-to-date) <!-- END doctoc generated TOC please keep comment here to allow auto update --> -Unfortunately, due to the wide variety of firewalls that may be used, we do not provide specific instructions to cover every type or variation in software or hardware. +Unfortunately, due to the wide variety of firewalls that may be used, we do not provide specific instructions to cover every type or variation in software or hardware. This guide is based on pfSense, and assumes your firewall hardware has at least three interfaces: WAN, LAN, and OPT1. For hardware, you can build your own network firewall (not covered in this guide) and [install pfSense](https://doc.pfsense.org/index.php/Installing_pfSense) on it. For most installations, we recommend buying a dedicated firewall appliance with pfSense pre-installed, such as the one recommended in the Hardware Guide. + +We used to recommend the 3-NIC [Netgate APU 2](http://store.netgate.com/NetgateAPU2.aspx), but it has since been discontinued. We currently recommend the [pfSense SG-2440](http://store.pfsense.org/SG-2440/), which has 4 interfaces: WAN, LAN, OPT1, and OPT2. This guide covers both the old 3-NIC configuration, for existing installs that are still using it, and the 4-NIC configuration recommended for new installs. + +If your firewall only has 3 NICs (WAN, LAN, and OPT1), you will need to use a switch on the OPT1 interface to connect the Admin Workstation for the initial installation. If your firewall has 4 NICs (WAN, LAN, OPT1, and OPT2), a switch is not necessary. -This guide will focus on pfSense, and assumes your firewall has at least three interfaces: WAN, LAN, and OPT1. These are the default interfaces on the recommended Netgate firewall, and it should be easy to configure any pfSense firewall with 3 or more NICs this way. +If you are new to pfSense or firewall management in general, we recommend the following resources: -To avoid duplication, this guide refers to sections of the [pfSense Guide](http://data.sfb.bg.ac.rs/sftp/bojan.radic/Knjige/Guide_pfsense.pdf), so you will want to have that handy. +- [Official pfSense Wiki](https://doc.pfsense.org/index.php/Main_Page) +- [pfSense: The Definitive Guide](http://www.amazon.com/pfSense-Definitive-Guide-Christopher-Buechler-ebook/dp/B004OYTMPC) + - *Note:* This guide is now slightly out of date, although we found it to be a useful reference approximately 1 year ago. To get the latest version of this book, you need to become a [pfSense Gold Member](https://www.pfsense.org/our-services/gold-membership.html). Before you begin ---------------- -First, consider how the firewall will be connected to the Internet. You need to be able to provision two unique subnets for SecureDrop: the app subnet and the monitor subnet. There are a number of possible ways to configure this, and the best way will depend on the network that you are connecting to. +First, consider how the firewall will be connected to the Internet. You will need to provision several unique subnets, which should not conflict with the network configuration on the WAN interface. If you are unsure, consult your local sysadmin. + +Note that many firewalls, including the recommended Netgate pfSense, automatically set up the LAN interface on `192.168.1.1/24`. This particular private network is also a very common choice for home and office routers. If you are connecting the firewall to a router with the same subnet (common in a small office, home, or testing environment), you will probably be unable to connect to the network at first. However, you will be able to connect from the LAN to the pfSense WebGUI configuration wizard, and from there you will be able to configure the network so it is working correctly. + +#### 3 NIC configuration + +If your firewall has 3 NICs, we will refer to them as WAN, LAN, and OPT1. WAN is used to connect to the external network. LAN and OPT1 are used for the Application and Monitor Servers, respectively. Putting them on separate interfaces allows us to use the network firewall to filter and monitor the traffic *between* them. + +In addition, you will need to be able to connect the Admin Workstation to this setup for the initial installation. Before SecureDrop is installed, the only way to connect to the servers is via SSH over the local network, so the Admin Workstation needs to be directly connected. Once it is installed, SSH will be available remotely (as an authenticated Tor Hidden Servce) and you will not necessarily need to connect the Admin Workstation directly to adminster the servers - although you will still need to connect it directly to administer the network firewall. Since there isn't another NIC to connect the Admin Workstation to, we recommend using a small switch on the LAN (the specific choice of interface doesn't matter, but we recommend using the LAN to stay consistent with the rest of this guide) so you can connect both the Admin Workstation and the Application Server. + +Depending on your network configuration, you should define the following values before continuing. For the examples in this guide, we have chosen: + +* Admin/App Gateway: `10.20.1.1` +* Admin/App Subnet: `10.20.1.0/24` +* App Server: `10.20.1.2` +* Admin Workstation: `10.20.1.3` + +<!-- --> + +* Monitor Subnet: `10.20.2.0/24` +* Monitor Gateway: `10.20.2.1` +* Monitor Server: `10.20.2.2` + +#### 4 NIC configuration + +If your firewall has 4 NICs, we refer to them as WAN, LAN, OPT1, and OPT2. In this case, we can now use a dedicated port on the network firewall for each component of SecureDrop (Application Server, Monitor Server, and Admin Workstation), so you do not need a switch like you do for the 3-NIC configuration. + +Depending on your network configuration, you should define the following values before continuing. For the examples in this guide, we have chosen: -Note that many firewalls, including the recommended Netgate pfSense, automatically set up the LAN interface on 192.168.1.1/24. The `/24` subnet is a very common choice for home routers. If you are connecting the firewall to a router with the same subnet (common in a small office, home, or testing environment), you will probably be unable to connect to the network at first. However, you will be able to connect from the LAN to the pfSense WebGUI configuration wizard, and from there you will be able to configure the network so it is working correctly. +* Admin Subnet: `10.20.1.0/24` +* Admin Gateway: `10.20.1.1` +* Admin Workstation: `10.20.1.2` -The app subnet will need at least three IP addresses: one for the gateway, one for the app server, and one for the admin workstation. The monitor subnet will need at least two IP addresses: one for the gateway and one for the monitor server. +<!-- --> -We assume that you have examined your network configuration and have selected two appropriate subnets. We will refer to your chosen subnets as "App Subnet" and "Monitor Subnet" throughout the rest of the documentation. For the examples in the documentation, we have chosen: +* App Subnet: `10.20.2.0/24` +* App Gateway: `10.20.2.1` +* App Server: `10.20.2.2` -* App Subnet: 10.20.1.0/24 -* App Gateway: 10.20.1.1 -* App Server: 10.20.1.2 -* Admin Workstation: 10.20.1.3 +<!-- --> -* Monitor Subnet: 10.20.2.0/24 -* Monitor Gateway: 10.20.2.1 -* Monitor Server: 10.20.2.2 +* Monitor Subnet: `10.20.3.0/24` +* Monitor Gateway: `10.20.3.1` +* Monitor Server: `10.20.3.2` Initial Setup ------------- Unpack the firewall, connect power, and power on. -### Assign interfaces - -Section 3.2.3, "Assigning Interfaces", of the pfSense Guide. Some firewalls, like the Netgate recommended in the Hardware Guide, have this set up already, in which case you can skip this step. - ### Initial configuration We will use the pfSense WebGUI to do the initial configuration of the network firewall. @@ -65,36 +101,43 @@ We will use the pfSense WebGUI to do the initial configuration of the network fi 1. Boot the Admin Workstation into Tails from the Admin Live USB. -2. Connect the Admin Workstation to the switch on the LAN. +2. Connect the Admin Workstation to the LAN interface. You should see + a popup notification in Tails that says "Connection Established". + + - Make sure your *only* active connections is the one you just + established with the network firewall. If you are connected to + another network at the same time (e.g. a wireless network), you + may encounter problems trying to connect the pfSense WebGUI. 3. Launch the *Unsafe Browser*, *Applications → Internet → Unsafe Browser*. - ![Launching the Unsafe Browser](images/firewall/launching_unsafe_browser.png) + ![Launching the Unsafe Browser](images/firewall/launching_unsafe_browser.png) - 1. Note that the *Unsafe Browser* is, as the name suggests, **unsafe** (its - traffic is not routed through Tor). However, it is the only option in - this context because Tails [intentionally][tails_issue_7976] disables - LAN access in the *Tor Browser*. + 1. Note that the *Unsafe Browser* is, as the name suggests, + **unsafe** (its traffic is not routed through Tor). However, it + is the only option in this context because Tails + [intentionally][tails_issue_7976] disables LAN access in the + *Tor Browser*. 2. A dialog will ask "Do you really want to launch the Unsafe Browser?". Click **Launch**. - ![You really want to launch the Unsafe Browser](images/firewall/unsafe_browser_confirmation_dialog.png) + ![You really want to launch the Unsafe Browser](images/firewall/unsafe_browser_confirmation_dialog.png) 3. You will see a pop-up notification that says "Starting the Unsafe Browser..." - ![Pop-up notification](images/firewall/starting_the_unsafe_browser.png) + ![Pop-up notification](images/firewall/starting_the_unsafe_browser.png) - 4. After a few seconds, the Unsafe Browser should launch. The window has a - bright red border to remind you to be careful when using it. You should - close it once you're done configuring the firewall and use the Tor - Browser for any other web browsing you might do on the Admin - Workstation. + 4. After a few seconds, the Unsafe Browser should launch. The + window has a bright red border to remind you to be careful when + using it. You should close it once you're done configuring the + firewall and use the Tor Browser for any other web browsing you + might do on the Admin Workstation. - ![Unsafe Browser Homepage](images/firewall/unsafe_browser.png) + ![Unsafe Browser Homepage](images/firewall/unsafe_browser.png) 4. Navigate to the pfSense GUI in the *Unsafe Browser*: `https://192.168.1.1` -5. The firewall uses a self-signed certificate, so you will see a "This Connection Is Untrusted" warning when you connect. This is expected (see Section 4.5.6 of the pfSense Guide). You can safely continue by clicking "I Understand the Risks", "Add Exception...", and "Confirm Security Exception." +5. The firewall uses a self-signed certificate, so you will see a "This Connection Is Untrusted" warning when you connect. This is expected. You can safely continue by clicking "I Understand the Risks", "Add Exception...", and "Confirm Security Exception." 6. You should see the login page for the pfSense GUI. Log in with the default username and password (admin / pfsense). @@ -102,29 +145,29 @@ We will use the pfSense WebGUI to do the initial configuration of the network fi #### Setup Wizard -If you're setting up a brand new (or recently factory reset) router, pfSense will start you on the Setup Wizard. Click next, then next again. Don't sign up for a pfSense Gold subscription. +If you're setting up a brand new (or recently factory reset) router, logging in to the pfSense WebGUI will automatically start the Setup Wizard. Click next, then next again. Don't sign up for a pfSense Gold subscription (unless you want to). -On the "General Information" page, we recommend leaving your hostname as the default (pfSense). There is no relevant domain for SecureDrop, so we recommend setting this to "securedrop.local" or something similar. Use whatever DNS servers you wish. If you don't know what DNS servers to use, we recommend using Google's DNS servers: `8.8.8.8` and `8.8.4.4`. Click Next. +On the "General Information" page, we recommend leaving your hostname as the default (pfSense). There is no relevant domain for SecureDrop, so we recommend setting this to `securedrop.local` or something similar. Use your preferred DNS servers. If you don't know what DNS servers to use, we recommend using Google's DNS servers: `8.8.8.8` and `8.8.4.4`. Click Next. Leave the defaults for "Time Server Information". Click Next. On "Configure WAN Interface", enter the appropriate configuration for your network. Consult your local sysadmin if you are unsure what to enter here. For many environments, the default of DHCP will work and the rest of the fields can be left blank. Click Next. -For "Configure LAN Interface", set the IP address and subnet mask of the Application Subnet for the LAN interface. Be sure that the CIDR prefix correctly corresponds to your subnet mask-- pfsense should automatically calculate this for you, but you should always check. In most cases, your CIDR prefix should be `/24`. Click Next. +For "Configure LAN Interface", use the IP address and subnet mask of the *gateway* for the **Admin Subnet**. Click Next. -Set a strong admin password. We recommend generating a random password with KeePassX, and saving it in the Tails Persistent folder using the provided KeePassX database template. Click Next. +Set a strong admin password. We recommend generating a strong password with KeePassX, and saving it in the Tails Persistent folder using the provided KeePassX database template. Click Next. -Click Reload. +Click Reload. Once the reload completes and the web page refreshes, click the corresponding "here" link to "continue on to the pfSense webConfigurator". -If you changed the LAN Interface settings, you will no longer be able to connect after reloading the firewall and the next request will probably time out. This is not a problem - the firewall has reloaded and is working correctly. To connect to the new LAN interface, unplug and reconnect your network cable to have a new network address assigned to you via DHCP. Note that if you used a subnet with fewer addresses than `/24`, the default DHCP configuration in pfSense may not work. In this case, you should assign the Admin Workstation a static IP address that is known to be in the subnet to continue. +At this point, since you (probably) changed the LAN subnet settings from their defaults, you will no longer be able to connect after reloading the firewall and the next request will probably time out. This is not an error - the firewall has reloaded and is working correctly. To connect to the new LAN interface, unplug and reconnect your network cable to get a new network address assigned via DHCP. Note that if you used a subnet with fewer addresses than `/24`, the default DHCP configuration in pfSense may not work. In this case, you should assign the Admin Workstation a static IP address that is known to be in the subnet to continue. -Now the WebGUI will be available on the App Gateway address. Navigate to `https://<App Gateway IP>` in the *Unsafe Browser*, and do the same dance as before to log in to the pfSense WebGUI and continue configuring the firewall. +Now the WebGUI will be available on the Admin Gateway address. Navigate to `https://<Admin Gateway IP>` in the *Unsafe Browser*, and do the same dance as before to log in to the pfSense WebGUI. Once you've logged in to the WebGUI, you are ready to continue configuring the firewall. #### Connect Interfaces and Test Connectivity -Now that the initial configuration is completed, you can connect the WAN port without potentially conflicting with the default LAN settings (as explained earlier). Connect the WAN port to the external network. You can watch the WAN entry in the Interfaces table on the pfSense WebGUI homepage to see as it changes from down (red arrow pointing down) to up (green arrow pointing up). The WAN's IP address will be shown once it comes up. +Now that the initial configuration is completed, you can connect the WAN port without potentially conflicting with the default LAN settings (as explained earlier). Connect the WAN port to the external network. You can watch the WAN entry in the Interfaces table on the pfSense WebGUI homepage to see as it changes from down (red arrow pointing down) to up (green arrow pointing up). This usually takes several seconds. The WAN's IP address will be shown once it comes up. -Finally, test connectivity to make sure you are able to connect to the Internet through the WAN. The easiest way to do this is to use ping (Diagnostics → Ping in the WebGUI). +Finally, test connectivity to make sure you are able to connect to the Internet through the WAN. The easiest way to do this is to use ping (Diagnostics → Ping in the WebGUI). Enter an external hostname or IP that you expect to be up (e.g. `google.com`) and click "Ping". SecureDrop-specific Configuration --------------------------------- @@ -136,15 +179,6 @@ SecureDrop uses the firewall to achieve two primary goals: In order to use the firewall to isolate the app and monitor servers from each other, we need to connect them to separate interfaces, and then set up firewall rules that allow them to communicate. -### Set up OPT1 - -We set up the LAN interface during the initial configuration. We now need to set up the OPT1 interface. Start by connecting the Monitor Server to the OPT1 port. Then use the WebGUI to configure the OPT1 interface. Go to `Interfaces → OPT1`, and check the box to "Enable Interface". Use these settings: - -- IPv4 Configuration Type: Static IPv4 -- IPv4 Address: Monitor Gateway - -Once again, be sure that the CIDR prefix correctly corresponds to your subnet mask (and should be `/24` in most cases). Pfsense should automatically calculate this for you, but you should always check. Leave everything else as the default. Save and Apply Changes. - ### Disable DHCP on the LAN pfSense runs a DHCP server on the LAN interface by default. At this stage in the documentation, the Admin Workstation has an IP address assigned via that DHCP server. You can easily check your current IP address by *right-clicking* the networking icon (a blue cable going in to a white jack) in the top right of the menu bar, and choosing "Connection Information". @@ -155,7 +189,7 @@ In order to tighten the firewall rules as much as possible, we recommend disabli #### Disabling DHCP -To disable DHCP, navigate to "Services → DHCP Server". Uncheck the box to "Enable DHCP servers on LAN interface", scroll down, and click the Save button. +To disable DHCP, navigate to "Services → DHCP Server". Uncheck the box to "Enable DHCP server on LAN interface", scroll down, and click the Save button. #### Assigning a static IP address to the Admin Workstation @@ -165,43 +199,96 @@ Start by *right-clicking* the networking icon in the top right of the menu bar, ![Edit Connections](images/firewall/edit_connections.png) -Select "Wired connection" from the list and click the "Edit..." button. +Select the name of the current connection from the list and click the "Edit..." button. -![Edit Wired Connection](images/firewall/edit_wired_connection.png) +![Edit Wired Connection](images/firewall/edit_network_connection.png) Change to the "IPv4 Settings" tab. Change "Method:" from "Automatic (DHCP)" to "Manual". Click the Add button and fill in the static networking information for the Admin Workstation. -![Editing Wired Connection](images/firewall/editing_wired_connection.png) +*Note:* The Unsafe Browser will not launch when using a manual network configuration if it does not have DNS servers configured. This is technically unnecessary for our use case because we are only using it to access IP addresses on the LAN, and do not need to resolve anything with DNS. Nonetheless, you should configure some DNS servers here so you can continue to use the Unsafe Browser to access the WebGUI in future sessions. We recommend keeping it simple and using the same DNS servers that you used for the network firewall in the setup wizard. + +![Admin Wokstation Static IP Configuration](images/firewall/admin_workstation_static_ip_configuration.png) Click "Save...". If the network does not come up within 15 seconds or so, try disconnecting and reconnecting your network cable to trigger the change. You will need you have succeeded in connecting with your new static IP when you see a pop-up notification that says "Tor is ready. You can now access the Internet". -### Set up the network firewall rules +##### Troubleshooting: DNS servers and the Unsafe Browser -Since there are a variety of firewalls with different configuration interfaces and underlying sets of software, we cannot provide a set of network firewall rules to match every use case. Instead, we provide a firewall rules template in `install_files/network_firewall/rules`. This template is written in the iptables format, which you will need to manually translate for your firewall and preferred configuration method. +After saving the new network configuration, you may still encounter the "No DNS servers configured" error when trying to launch the Unsafe Browser. If you encounter this issue, you can resolve it by disconnecting from the network and then reconnecting, which causes the network configuration to be reloaded. -For pfSense, see Section 6 of the pfSense Guide for information on setting up firewall rules through the WebGUI. Here are some tips on interpreting the rules template for pfSense: +To do this, click the network icon in the system toolbar, and click "Disconnect" under the bolded name of the currently active network connection. After it disconnects, click the network icon again and click the name of the connection to reconnect. You should see a popup notification that says "Connection Established", followed several seconds later by a "Tor is ready" popup notification. -1. Create aliases for the repeated values (IPs and ports). -2. pfSense is a stateful firewall, which means that you don't need corresponding rules for the iptables rules that allow incoming traffic in response to outgoing traffic (`--state ESTABLISHED,RELATED`). pfSense does this for you automatically. -3. You should create the rules on the interface where the traffic originates from. The easy way to do this is look at the sources (`-s`) of each of the iptables rules, and create that rule on the corresponding interface: +### Set up OPT1 - * `-s APP_IP` → `LAN` - * `-s MONITOR_IP` → `OPT1` +We set up the LAN interface during the initial configuration. We now need to set up the OPT1 interface for the Application Server. Start by connecting the Application Server to the OPT1 port. Then use the WebGUI to configure the OPT1 interface. Go to `Interfaces → OPT1`, and check the box to "Enable Interface". Use these settings: +- IPv4 Configuration Type: Static IPv4 +- IPv4 Address: Application Gateway + +Make sure that the CIDR routing prefix is correct. Leave everything else as the default. Save and Apply Changes. + +### Set up OPT2 + +If you have 4 NICs, you will have to enable the OPT2 interface. Go to `Interfaces → OPT2`, and check the box to "Enable Interface". OPT2 interface is set up similarly to how we set up OPT1 in the previous section. Use these settings: + +- IPv4 Configuration Type: Static IPv4 +- IPv4 Address: Monitor Gateway + +Make sure that the CIDR routing prefix is correct. Leave everything else as the default. Save and Apply Changes. + +### Set up the network firewall rules + +Since there are a variety of firewalls with different configuration interfaces and underlying sets of software, we cannot provide a set of network firewall rules to match every use case. + +This document is currently geared towards pfSense configured using the WebGUI; as a result, the easiest way to set up your firewall rules is to look at the screenshots of a correctly configured firewall below and edit the interfaces, aliases, and firewall rules on your firewall to match them. + +Here are some general tips for setting up pfSense firewall rules: + +1. Create aliases for the repeated values (IPs and ports). +2. pfSense is a stateful firewall, which means that you don't need corresponding rules to allow incoming traffic in response to outgoing traffic (like you would in, e.g. iptables with `--state ESTABLISHED,RELATED`). pfSense does this for you automatically. +3. You should create the rules *on the interface where the traffic originates*. 4. Make sure you delete the default "allow all" rule on the LAN interface. Leave the "Anti-Lockout" rule enabled. -5. Any traffic that is not explicitly passed is logged and dropped by default in pfSense, so you don't need to add explicit rules (`LOGNDROP`) for that. -6. Since some of the rules are almost identical except for whether they allow traffic from the App Server or the Monitor Server (`-s MONITOR_IP,APP_IP`), you can use the "add a new rule based on this one" button to save time creating a copy of the rule on the other interface. -7. If you are having trouble with connections, the firewall logs can be very helpful. You can find them in the WebGUI in *Status → System Logs → Firewall*. +5. Any traffic that is not explicitly passed is logged and dropped by default in pfSense, so you don't need to add explicit rules (iptables `LOGNDROP`) for that. +6. Since some of the rules are almost identical except for whether they allow traffic from the App Server or the Monitor Server, you can use the "add a new rule based on this one" button to save time creating a copy of the rule on the other interface. +7. If you are troubleshooting connectivity, the firewall logs can be very helpful. You can find them in the WebGUI in *Status → System Logs → Firewall*. -We recognize that this process is cumbersome and may be difficult for people inexperienced in managing networks to understand. We are working on automating much of this for the next SecureDrop release. If you're unsure how to set up your firewall, use the screenshots in the next section as your guide. +We recognize that this process is cumbersome and may be difficult for people inexperienced in managing a firewall. We are working on automating much of this for an upcoming SecureDrop release. If you're unsure how to set up your firewall, use the screenshots in the next section as your guide. + +For more experienced pfSense users, we have included a copy of the `.xml` backup from a correctly configured example firewall (SG-2440) in `install_files/network_firewall/pfsense_full_backup.xml`. Note that this file has been edited by hand to remove potentially sensitive information (admin password hashes and the test server's TLS private key, among other things, were replaced with `REDACTED`), so you probably won't be able to import it directly (we haven't tried). The main sections of the file that you should be interested in are `interfaces`, `filter` (the firewall rules), and `aliases` (necessary to parse the firewall rules). #### Example Screenshots Here are some example screenshots of a working pfSense firewall configuration. +##### 3 NICs Configuration + ![Firewall IP Aliases](images/firewall/ip_aliases.png) ![Firewall Port Aliases](images/firewall/port_aliases.png) ![Firewall LAN Rules](images/firewall/lan_rules.png) ![Firewall OPT1 Rules](images/firewall/opt1_rules.png) +##### 4 NICs Configuration + +![Firewall IP Aliases](images/firewall/ip_aliases_with_opt2.png) +![Firewall Port Aliases](images/firewall/port_aliases.png) +![Firewall LAN Rules](images/firewall/lan_rules_with_opt2.png) +![Firewall OPT1 Rules](images/firewall/opt1_rules_with_opt2.png) +![Firewall OPT2 Rules](images/firewall/opt2_rules.png) + Once you've set up the firewall, **exit the Unsafe Browser**, and continue with the instructions in the [Install Guide](/docs/install.md#set-up-the-servers). + + +### Keeping pfSense Up to Date + +Periodically, the pfSense project maintainers release an update to the pfSense software running on your firewall. You will be notified by the appearance of bold red text saying "Update available" in the **Version** section of the "Status: Dashboard" page (the home page of the WebGUI). + +![Update available](images/firewall/pfsense_update_available.png) + +If you see that an update is available, we recommend installing it. Most of these updates are for minor bugfixes, but occasionally they can contain important security fixes. If you are receiving support from Freedom of the Press Foundation, we will inform you when an important security update is available for your pfSense firewall. Alternatively, you can keep appraised of updates yourself by checking the ["releases" tag on the pfSense Blog](https://blog.pfsense.org/?tag=releases) (protip: use the RSS feed). + +To install the update, click the "click here" link next to "Update available". We recommend checking the "perform full backup prior to upgrade" box in case something goes wrong. Click "Invoke auto upgrade". + +![Invoke auto upgrade](images/firewall/invoke_auto_upgrade.png) + +You will see a blank page with a spinning progress indicator in the browser tab while pfSense performs the backup prior to upgrade. This typically takes a few minutes. Once that's done, you will see a page with a progress bar at the top that will periodically update as the upgrade progresses. Wait for the upgrade to complete, which may take a while depending on the speed of your network. + +*Note:* In a recent test, the progress page did not successfully update itself as the upgraded progressed. After waiting for some time, we refreshed the page and found that the upgrade had completed successfully. If your upgrade is taking longer than expected or not showing any progress, try refreshing the page. diff --git a/install_files/ansible-base/group_vars/securedrop.yml b/install_files/ansible-base/group_vars/securedrop.yml index f35a22db36..a77896719f 100644 --- a/install_files/ansible-base/group_vars/securedrop.yml +++ b/install_files/ansible-base/group_vars/securedrop.yml @@ -2,7 +2,7 @@ # Variables that apply to both the app and monitor server go in this file # If the monitor or app server need different values define the variable in # hosts_vars/app.yml or host_vars/mon.yml host_vars/development.yml -securedrop_app_code_version: "0.3.4" +securedrop_app_code_version: "0.3.5" tor_wait_for_hidden_services: yes tor_hidden_services_parent_dir: "/var/lib/tor/services" @@ -11,7 +11,9 @@ tor_DataDirectory: /var/lib/tor securedrop_tor_user: "debian-tor" disabled_kernel_modules: + - btusb - bluetooth + - iwlmvm - iwlwifi ssh_2fa_dependencies: diff --git a/install_files/network_firewall/pfsense_full_backup.xml b/install_files/network_firewall/pfsense_full_backup.xml new file mode 100755 index 0000000000..8becb439bf --- /dev/null +++ b/install_files/network_firewall/pfsense_full_backup.xml @@ -0,0 +1,781 @@ +<?xml version="1.0"?> +<pfsense> + <version>11.9</version> + <lastchange/> + <theme>pfsense_ng</theme> + <system> + <optimization>normal</optimization> + <hostname>pfSense</hostname> + <domain>securedrop.local</domain> + <dnsserver>8.8.4.4</dnsserver> + <dnsserver>8.8.8.8</dnsserver> + <dnsallowoverride>on</dnsallowoverride> + <group> + <name>all</name> + <description><![CDATA[All Users]]></description> + <scope>system</scope> + <gid>1998</gid> + <member>0</member> + </group> + <group> + <name>admins</name> + <description><![CDATA[System Administrators]]></description> + <scope>system</scope> + <gid>1999</gid> + <member>0</member> + <priv>page-all</priv> + </group> + <user> + <name>admin</name> + <descr><![CDATA[System Administrator]]></descr> + <scope>system</scope> + <groupname>admins</groupname> + <password>REDACTED</password> + <uid>0</uid> + <priv>user-shell-access</priv> + <md5-hash>REDACTED</md5-hash> + <nt-hash>REDACTED</nt-hash> + </user> + <nextuid>2000</nextuid> + <nextgid>2000</nextgid> + <timezone>Etc/UTC</timezone> + <time-update-interval>300</time-update-interval> + <timeservers>0.pfsense.pool.ntp.org</timeservers> + <webgui> + <protocol>https</protocol> + <noautocomplete/> + <ssl-certref>55afb66613c71</ssl-certref> + </webgui> + <disablenatreflection>yes</disablenatreflection> + <disablesegmentationoffloading/> + <disablelargereceiveoffloading/> + <serialspeed>115200</serialspeed> + <enableserial/> + <ipv6allow/> + <powerd_enable/> + <powerd_ac_mode>hadp</powerd_ac_mode> + <powerd_battery_mode>hadp</powerd_battery_mode> + <powerd_normal_mode>hadp</powerd_normal_mode> + <bogons> + <interval>monthly</interval> + </bogons> + <kill_states/> + <crypto_hardware>aesni</crypto_hardware> + </system> + <interfaces> + <wan> + <enable/> + <if>igb0</if> + <ipaddr>dhcp</ipaddr> + <ipaddrv6>dhcp6</ipaddrv6> + <gateway/> + <blockpriv>on</blockpriv> + <blockbogons>on</blockbogons> + <media/> + <mediaopt/> + <dhcp6-duid/> + <dhcp6-ia-pd-len>0</dhcp6-ia-pd-len> + </wan> + <lan> + <enable/> + <if>igb1</if> + <ipaddr>10.20.1.1</ipaddr> + <subnet>24</subnet> + <media/> + <mediaopt/> + </lan> + <opt1> + <if>igb2</if> + <descr><![CDATA[OPT1]]></descr> + <enable/> + <spoofmac/> + <ipaddr>10.20.2.1</ipaddr> + <subnet>24</subnet> + </opt1> + <opt2> + <if>igb3</if> + <descr><![CDATA[OPT2]]></descr> + <enable/> + <spoofmac/> + <ipaddr>10.20.3.1</ipaddr> + <subnet>24</subnet> + </opt2> + </interfaces> + <staticroutes/> + <dhcpd/> + <pptpd> + <mode/> + <redir/> + <localip/> + <remoteip/> + </pptpd> + <snmpd> + <syslocation/> + <syscontact/> + <rocommunity>public</rocommunity> + </snmpd> + <diag> + <ipv6nat> + <ipaddr/> + </ipv6nat> + </diag> + <bridge/> + <syslog/> + <nat> + <outbound> + <mode>automatic</mode> + </outbound> + </nat> + <filter> + <rule> + <id/> + <tracker>1442384610</tracker> + <type>pass</type> + <interface>lan</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <protocol>tcp</protocol> + <source> + <address>admin_workstation</address> + </source> + <destination> + <address>local_servers</address> + </destination> + <descr><![CDATA[SSH access for initial installation (Ansible)]]></descr> + <updated> + <time>1442384610</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442384610</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442384638</tracker> + <type>pass</type> + <interface>lan</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <protocol>tcp</protocol> + <source> + <address>admin_workstation</address> + </source> + <destination> + <any/> + </destination> + <descr><![CDATA[Tails Tor connection]]></descr> + <updated> + <time>1442384638</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442384638</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442384949</tracker> + <type>pass</type> + <interface>opt1</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <protocol>udp</protocol> + <source> + <address>app_server</address> + </source> + <destination> + <address>monitor_server</address> + <port>OSSEC</port> + </destination> + <descr><![CDATA[OSSEC agent]]></descr> + <updated> + <time>1442384949</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442384949</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442385003</tracker> + <type>pass</type> + <interface>opt1</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <protocol>tcp</protocol> + <source> + <address>app_server</address> + </source> + <destination> + <address>monitor_server</address> + <port>ossec_agent_auth</port> + </destination> + <descr><![CDATA[Allow OSSEC agent auth during initial install]]></descr> + <updated> + <time>1442385003</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442385003</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442385070</tracker> + <type>block</type> + <interface>opt1</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <source> + <network>opt1</network> + </source> + <destination> + <network>lan</network> + </destination> + <descr><![CDATA[Block non-whitelisted traffic between OPT1 and LAN]]></descr> + <created> + <time>1442385070</time> + <username>[email protected]</username> + </created> + <updated> + <time>1442385087</time> + <username>[email protected]</username> + </updated> + </rule> + <rule> + <id/> + <tracker>1442385121</tracker> + <type>block</type> + <interface>opt1</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <source> + <network>opt1</network> + </source> + <destination> + <network>opt2</network> + </destination> + <descr><![CDATA[Block non-whitelisted traffic between OPT1 and OPT2]]></descr> + <updated> + <time>1442385121</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442385121</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442385165</tracker> + <type>pass</type> + <interface>opt1</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <protocol>tcp</protocol> + <source> + <address>app_server</address> + </source> + <destination> + <any/> + </destination> + <descr><![CDATA[Allow TCP out on any port for Tor]]></descr> + <updated> + <time>1442385165</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442385165</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442385233</tracker> + <type>pass</type> + <interface>opt1</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <protocol>tcp/udp</protocol> + <source> + <address>app_server</address> + </source> + <destination> + <address>external_dns_servers</address> + <port>53</port> + </destination> + <descr><![CDATA[Allow DNS]]></descr> + <updated> + <time>1442385233</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442385233</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442385357</tracker> + <type>pass</type> + <interface>opt1</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <protocol>udp</protocol> + <source> + <address>app_server</address> + </source> + <destination> + <any/> + <port>123</port> + </destination> + <descr><![CDATA[Allow NTP]]></descr> + <created> + <time>1442385357</time> + <username>[email protected]</username> + </created> + <updated> + <time>1442386386</time> + <username>[email protected]</username> + </updated> + </rule> + <rule> + <id/> + <tracker>1442385433</tracker> + <type>block</type> + <interface>opt2</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <source> + <network>opt2</network> + </source> + <destination> + <network>lan</network> + </destination> + <descr><![CDATA[Block all non-whitelisted traffic from OPT2 and LAN]]></descr> + <updated> + <time>1442385433</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442385433</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442385483</tracker> + <type>block</type> + <interface>opt2</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <source> + <network>opt2</network> + </source> + <destination> + <network>opt1</network> + </destination> + <descr><![CDATA[Block all non-whitelisted traffic from OPT2 and OPT1]]></descr> + <updated> + <time>1442385483</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442385483</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442385530</tracker> + <type>pass</type> + <interface>opt2</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <protocol>tcp</protocol> + <source> + <address>monitor_server</address> + </source> + <destination> + <any/> + </destination> + <descr><![CDATA[Allow TCP out on any port for Tor and SMTP]]></descr> + <updated> + <time>1442385530</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442385530</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442385574</tracker> + <type>pass</type> + <interface>opt2</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os/> + <protocol>tcp/udp</protocol> + <source> + <address>monitor_server</address> + </source> + <destination> + <address>external_dns_servers</address> + <port>53</port> + </destination> + <descr><![CDATA[Allow DNS]]></descr> + <updated> + <time>1442385574</time> + <username>[email protected]</username> + </updated> + <created> + <time>1442385574</time> + <username>[email protected]</username> + </created> + </rule> + <rule> + <id/> + <tracker>1442385619</tracker> + <type>pass</type> + <interface>opt2</interface> + <ipprotocol>inet</ipprotocol> + <tag/> + <tagged/> + <max/> + <max-src-nodes/> + <max-src-conn/> + <max-src-states/> + <statetimeout/> + <statetype>keep state</statetype> + <os></os> + <protocol>udp</protocol> + <source> + <address>monitor_server</address> + </source> + <destination> + <any/> + <port>123</port> + </destination> + <descr><![CDATA[Allow NTP]]></descr> + <created> + <time>1442385619</time> + <username>[email protected]</username> + </created> + <updated> + <time>1442386405</time> + <username>[email protected]</username> + </updated> + </rule> + </filter> + <shaper> + </shaper> + <ipsec> + <phase1/> + </ipsec> + <aliases> + <alias> + <name>admin_workstation</name> + <address>10.20.1.2</address> + <descr/> + <type>host</type> + <detail><![CDATA[Entry added Wed, 16 Sep 2015 06:19:38 +0000]]></detail> + </alias> + <alias> + <name>app_server</name> + <address>10.20.2.2</address> + <descr/> + <type>host</type> + <detail><![CDATA[Entry added Wed, 16 Sep 2015 06:19:55 +0000]]></detail> + </alias> + <alias> + <name>external_dns_servers</name> + <address>8.8.8.8 8.8.4.4</address> + <descr/> + <type>host</type> + <detail><![CDATA[Entry added Wed, 16 Sep 2015 06:20:33 +0000||Entry added Wed, 16 Sep 2015 06:20:33 +0000]]></detail> + </alias> + <alias> + <name>local_servers</name> + <address>app_server monitor_server</address> + <descr/> + <type>host</type> + <detail><![CDATA[Entry added Wed, 16 Sep 2015 06:21:09 +0000||Entry added Wed, 16 Sep 2015 06:21:09 +0000]]></detail> + </alias> + <alias> + <name>monitor_server</name> + <address>10.20.3.2</address> + <descr/> + <type>host</type> + <detail><![CDATA[Entry added Wed, 16 Sep 2015 06:21:25 +0000]]></detail> + </alias> + <alias> + <name>OSSEC</name> + <address>1514</address> + <descr/> + <type>port</type> + <detail><![CDATA[Entry added Wed, 16 Sep 2015 06:21:48 +0000]]></detail> + </alias> + <alias> + <name>ossec_agent_auth</name> + <address>1515</address> + <descr/> + <type>port</type> + <detail><![CDATA[Entry added Wed, 16 Sep 2015 06:22:01 +0000]]></detail> + </alias> + </aliases> + <proxyarp/> + <cron> + <item> + <minute>1,31</minute> + <hour>0-5</hour> + <mday>*</mday> + <month>*</month> + <wday>*</wday> + <who>root</who> + <command>/usr/bin/nice -n20 adjkerntz -a</command> + </item> + <item> + <minute>1</minute> + <hour>3</hour> + <mday>1</mday> + <month>*</month> + <wday>*</wday> + <who>root</who> + <command>/usr/bin/nice -n20 /etc/rc.update_bogons.sh</command> + </item> + <item> + <minute>*/60</minute> + <hour>*</hour> + <mday>*</mday> + <month>*</month> + <wday>*</wday> + <who>root</who> + <command>/usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 sshlockout</command> + </item> + <item> + <minute>*/60</minute> + <hour>*</hour> + <mday>*</mday> + <month>*</month> + <wday>*</wday> + <who>root</who> + <command>/usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 webConfiguratorlockout</command> + </item> + <item> + <minute>1</minute> + <hour>1</hour> + <mday>*</mday> + <month>*</month> + <wday>*</wday> + <who>root</who> + <command>/usr/bin/nice -n20 /etc/rc.dyndns.update</command> + </item> + <item> + <minute>*/60</minute> + <hour>*</hour> + <mday>*</mday> + <month>*</month> + <wday>*</wday> + <who>root</who> + <command>/usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot</command> + </item> + <item> + <minute>30</minute> + <hour>12</hour> + <mday>*</mday> + <month>*</month> + <wday>*</wday> + <who>root</who> + <command>/usr/bin/nice -n20 /etc/rc.update_urltables</command> + </item> + </cron> + <wol/> + <rrd> + <enable/> + </rrd> + <load_balancer> + <monitor_type> + <name>ICMP</name> + <type>icmp</type> + <descr><![CDATA[ICMP]]></descr> + <options/> + </monitor_type> + <monitor_type> + <name>TCP</name> + <type>tcp</type> + <descr><![CDATA[Generic TCP]]></descr> + <options/> + </monitor_type> + <monitor_type> + <name>HTTP</name> + <type>http</type> + <descr><![CDATA[Generic HTTP]]></descr> + <options> + <path>/</path> + <host/> + <code>200</code> + </options> + </monitor_type> + <monitor_type> + <name>HTTPS</name> + <type>https</type> + <descr><![CDATA[Generic HTTPS]]></descr> + <options> + <path>/</path> + <host/> + <code>200</code> + </options> + </monitor_type> + <monitor_type> + <name>SMTP</name> + <type>send</type> + <descr><![CDATA[Generic SMTP]]></descr> + <options> + <send/> + <expect>220 *</expect> + </options> + </monitor_type> + </load_balancer> + <widgets> + <sequence>system_information-container:col1:show,captive_portal_status-container:col1:close,carp_status-container:col1:close,cpu_graphs-container:col1:close,gateways-container:col1:close,gmirror_status-container:col1:close,installed_packages-container:col1:close,interface_statistics-container:col1:close,interfaces-container:col2:show,ipsec-container:col2:close,load_balancer_status-container:col2:close,log-container:col2:close,picture-container:col2:close,rss-container:col2:close,services_status-container:col2:close,traffic_graphs-container:col2:close</sequence> + </widgets> + <openvpn/> + <dnshaper> + </dnshaper> + <unbound> + <enable/> + <dnssec/> + <active_interface/> + <outgoing_interface/> + <custom_options/> + <hideidentity/> + <hideversion/> + <dnssecstripped/> + </unbound> + <cert> + <refid>55afb66613c71</refid> + <descr><![CDATA[webConfigurator default (55afb66613c71)]]></descr> + <type>server</type> + <crt>REDACTED</crt> + <prv>REDACTED</prv> + </cert> + <revision> + <time>1442386405</time> + <description><![CDATA[[email protected]: /firewall_rules_edit.php made unknown change]]></description> + <username>[email protected]</username> + </revision> + <ppps/> + <gateways/> +</pfsense> + diff --git a/install_files/network_firewall/rules b/install_files/network_firewall/rules deleted file mode 100644 index e8fa881d30..0000000000 --- a/install_files/network_firewall/rules +++ /dev/null @@ -1,48 +0,0 @@ -*filter -:INPUT ACCEPT [0:0] -:FORWARD ACCEPT [0:0] -:OUTPUT ACCEPT [0:0] -:LOGNDROP - [0:0] - -# SSH access for initial installation (Ansible) --A OUTPUT -s ADMIN_IP -p tcp --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "admin ssh traffic" --A INPUT -d ADMIN_IP -p tcp --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "admin ssh traffic" - -# OSSEC agent --A OUTPUT -s APP_IP -d MONITOR_IP -p udp --dport 1514 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "OSSEC agent" --A INPUT -s MONITOR_IP -d APP_IP -p udp --sport 1514 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "OSSEC agent" - -# Allow OSSEC agent auth during initial install --A OUTPUT -s APP_IP -d MONITOR_IP -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "OSSEC agent auth" --A INPUT -s MONITOR_IP -d APP_IP -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "OSSEC agent auth" - -# Block non-whitelisted traffic between LAN and OPT1 -# If using the networks from the install documentation -# LAN_NET = 10.20.1.0/24 -# OPT1_NET = 10.20.2.0/24 --A OUTPUT -s LAN_NET -d OPT1_NET -j LOGNDROP -m comment --comment "Block non-whitelisted traffic" --A INPUT -s OPT1_NET -d LAN_NET -j LOGNDROP -m comment --comment "Block non-whitelisted traffic" - -# allow tor outbound --A OUTPUT -s ADMIN_WORKSTATION,APP_IP,MONITOR_IP -p tcp -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "Allow Tor" --A INPUT -d ADMIN_WORKSTATION,APP_IP,MONITOR_IP -p tcp -m state --state ESTBALISHED,RELATED -j ACCEPT -m comment --comment "Allow Tor" - -# allow DNS --A OUTPUT -s APP_IP,MONITOR_IP -d EXTERNAL_DNS_SERVERS -p udp --dport 53 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "Allow DNS" --A INPUT -s EXTERNAL_DNS_SERVERS -d APP_IP,MONITOR_IP -p udp --sport 53 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment ---comment "Allow DNS" - -# allow NTP --A OUTPUT -s APP_IP,MONITOR_IP -p udp --sport 123 --dport 123 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "Allow NTP" --A INPUT -d APP_IP,MONITOR_IP -p udp --sport 123 --dport 123 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "Allow NTP" - -# Drop and log all other traffic --A INPUT -j LOGNDROP -m comment --comment "Drop all other incomming traffic" --A OUTPUT -j LOGNDROP -m comment --comment "Drop all other outgoing traffic" - -# LOGNDROP everything else -# The log prefixes were added to make these match the default OSSEC rules for iptables --A LOGNDROP -p tcp -m limit --limit 5/min -j LOG --log-prefix "Denied_TCP " --A LOGNDROP -p udp -m limit --limit 5/min -j LOG --log-prefix "Denied_UDP " --A LOGNDROP -p icmp -m limit --limit 5/min -j LOG --log-prefix "Denied_ICMP " --A LOGNDROP -j DROP -COMMIT diff --git a/install_files/securedrop-app-code/DEBIAN/control b/install_files/securedrop-app-code/DEBIAN/control index 347fc410e3..aecabc72c4 100644 --- a/install_files/securedrop-app-code/DEBIAN/control +++ b/install_files/securedrop-app-code/DEBIAN/control @@ -4,7 +4,7 @@ Priority: optional Maintainer: SecureDrop Team <[email protected]> Homepage: https://freedom.press/securedrop Package: securedrop-app-code -Version: 0.3.4 +Version: 0.3.5 Architecture: amd64 Depends: python-pip,apparmor-utils,gnupg2,haveged,python,python-pip,secure-delete,sqlite,apache2-mpm-worker,libapache2-mod-wsgi,libapache2-mod-xsendfile,redis-server,supervisor Description: Packages the SecureDrop application code pip dependencies and apparmor profiles. This package will put the apparmor profiles in enforce mode. This package does use pip to install the pip wheelhouse diff --git a/install_files/securedrop-app-code/usr/share/doc/securedrop-app-code/changelog.Debian b/install_files/securedrop-app-code/usr/share/doc/securedrop-app-code/changelog.Debian index f1c2910b2b..12d51826b5 100644 --- a/install_files/securedrop-app-code/usr/share/doc/securedrop-app-code/changelog.Debian +++ b/install_files/securedrop-app-code/usr/share/doc/securedrop-app-code/changelog.Debian @@ -1,3 +1,14 @@ +securedrop-app-code (0.3.5) trusty; urgency=medium + + * Use certificate verification instead of fingerprint verification by default for the OSSEC Postfix configuration (#1076) + * Fix apache2 service failing to start on Digital Ocean (#1078) + * Allow Apache to rotate its logs (#1074) + * Prevent reboots during cron-apt upgrade (#1071) + * Update documentation (#1107, #1112, #1113) + * Blacklist additional kernel modules used for wireless networking (#1116) + + -- SecureDrop Team <[email protected]> Fri, 18 Sep 2015 21:28:41 +0000 + securedrop-app-code (0.3.4) trusty; urgency=medium * Fix ineffective SSH connection throttling (iSEC-15FTC-7, #1053) diff --git a/install_files/securedrop-ossec-agent/DEBIAN/control b/install_files/securedrop-ossec-agent/DEBIAN/control index 3ab20f307f..45becadfac 100644 --- a/install_files/securedrop-ossec-agent/DEBIAN/control +++ b/install_files/securedrop-ossec-agent/DEBIAN/control @@ -4,7 +4,7 @@ Priority: optional Maintainer: SecureDrop Team <[email protected]> Homepage: https://freedom.press/securedrop Package: securedrop-ossec-agent -Version: 2.8.2+0.3.4 +Version: 2.8.2+0.3.5 Architecture: amd64 Depends: ossec-agent Replaces: ossec-agent diff --git a/install_files/securedrop-ossec-server/DEBIAN/control b/install_files/securedrop-ossec-server/DEBIAN/control index 13a9cec93d..5f8c96d3ee 100644 --- a/install_files/securedrop-ossec-server/DEBIAN/control +++ b/install_files/securedrop-ossec-server/DEBIAN/control @@ -4,7 +4,7 @@ Priority: optional Maintainer: SecureDrop Team <[email protected]> Homepage: https://freedom.press/securedrop Package: securedrop-ossec-server -Version: 2.8.2+0.3.4 +Version: 2.8.2+0.3.5 Architecture: amd64 Depends: ossec-server Replaces: ossec-server diff --git a/securedrop/version.py b/securedrop/version.py index bfeb9e74ab..40ed83d946 100644 --- a/securedrop/version.py +++ b/securedrop/version.py @@ -1 +1 @@ -__version__ = '0.3.4' +__version__ = '0.3.5'
django-wiki__django-wiki-1287
Release final 0.10 with Django 4.2 support Issue title kind of says it :) Would be nice to do this soon :+1:
[ { "content": "__version__ = \"0.10b1\"\n", "path": "src/wiki/__about__.py" } ]
[ { "content": "__version__ = \"0.10\"\n", "path": "src/wiki/__about__.py" } ]
diff --git a/docs/release_notes.rst b/docs/release_notes.rst index 53e73236b..9acb60979 100644 --- a/docs/release_notes.rst +++ b/docs/release_notes.rst @@ -5,14 +5,49 @@ Release notes Release plan ------------ - -- **0.3.x** series suppors Django 1.11. As with the upstream Django release, 0.3 was be the last series with Python 2.7 support. -- **0.4.x** supports Django 1.11 and Django 2.1 and Python 3.4+. -- **0.5.x** Remove Django 1.11 support, adds Django 2.2 and 3.x support. Python 3.5+. -- **0.6.x** Targets Bootstrap v4, if you are interested in this work, please get in touch on Github! -- **0.7.x** Removes Django 2.1 support, adds Django 3.1, 3.2 +- **0.11.x** Update bootstrap to v5 and removes Python 3.7 support. +- **0.10.x** Uses Hatch as the new build system and has upgrades for Django 4.x and Python 3.11. - **0.9.x** Definitely Removes Python 3.5 and 3.6 support +- **0.7.x** Removes Django 2.1 support, adds Django 3.1, 3.2 +- **0.6.x** Targets Bootstrap v4, if you are interested in this work, please get in touch on Github! +- **0.5.x** Remove Django 1.11 support, adds Django 2.2 and 3.x support. Python 3.5+. +- **0.4.x** supports Django 1.11 and Django 2.1 and Python 3.4+. +- **0.3.x** series suppors Django 1.11. As with the upstream Django release, 0.3 was be the last series with Python 2.7 support. + + +0.10 +---- + +Released on 2023-05-15 + +Changed +~~~~~~~ + +* Removed sqlite database and use fixtures url-issue:`1260` (Oscar Cortez) +* Improved settings and middleware for demo url-issue:`1267` (Oscar Cortez) +* Updated languages and use the new Transifex client with Docker (Benjamin Balder Bach) +* Updated django requirement from <4.2,>=2.1 to >=2.1,<4.3 :url-issue:`1275` (Oscar Cortez) +* Upgraded for Sphinx 6 on Read the Docs :url-issue:`1270` (Benjamin Balder Bach) +* Improved Read The Docs configuration :url-issue:`1283` (Oscar Cortez) + +Added +~~~~~ + +* New milestone for v0.11 (https://github.com/django-wiki/django-wiki/milestone/13) +* Added support for the latest Bleach package version :url-issue:`1264` (Oscar Cortez) +* Added environments for Python 3.11 and Django 4.1 :url-issue:`1265` (Oscar Cortez) +* Use the new Transifex client in Docker :url-issue:`1284` (Benjamin Balder Bach) +* Improve discussion forms :url-issue:`1262` (Oscar Cortez) + +Fixed +~~~~~ +* Bleach is deprecated :url-issue:`1259` (Oscar Cortez) +* TypeError: unsupported operand type(s) for +: 'frozenset' and 'list' url-issue:`1257` and :url-issue:`1251` (Oscar Cortez) +* Editing the demo without a login :url-issue:`1263` (Oscar Cortez) +* jQuery broken on docs pages :url-issue:`1281` (Benjamin Balder Bach) +* Solve yml issues and improve issue templates :url-issue:`1261` (Oscar Cortez) +* Fix Release Date for 0.10b1 :url-issue:`1282` (Thomas Rinklin) 0.10b1 ------ diff --git a/src/wiki/__about__.py b/src/wiki/__about__.py index 40b6f0473..91bf82336 100644 --- a/src/wiki/__about__.py +++ b/src/wiki/__about__.py @@ -1 +1 @@ -__version__ = "0.10b1" +__version__ = "0.10"
django-wiki__django-wiki-1337
Django 5 support Pending other dependencies * https://github.com/django-wiki/django-nyt/issues/126
[ { "content": "__version__ = \"0.10\"\n", "path": "src/wiki/__about__.py" } ]
[ { "content": "__version__ = \"0.11rc1\"\n", "path": "src/wiki/__about__.py" } ]
diff --git a/.circleci/config.yml b/.circleci/config.yml index 0099e91cb..5d2abab6c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -34,36 +34,58 @@ jobs: workflows: - main: + test: jobs: - hatch: + name: "Python 3.9, Django 3.2" hatch_env: "py3.9-dj3.2" python_version: "3.9" - hatch: + name: "Python 3.9, Django 4.0" hatch_env: "py3.9-dj4.0" python_version: "3.9" - hatch: + name: "Python 3.9, Django 4.1" hatch_env: "py3.9-dj4.1" python_version: "3.9" - hatch: + name: "Python 3.9, Django 4.2" hatch_env: "py3.9-dj4.2" python_version: "3.9" - hatch: + name: "Python 3.10, Django 3.2" hatch_env: "py3.10-dj3.2" python_version: "3.10" - hatch: + name: "Python 3.10, Django 4.0" hatch_env: "py3.10-dj4.0" python_version: "3.10" - hatch: + name: "Python 3.10, Django 4.1" hatch_env: "py3.10-dj4.1" python_version: "3.10" - hatch: + name: "Python 3.10, Django 4.2" hatch_env: "py3.10-dj4.2" python_version: "3.10" - hatch: + name: "Python 3.10, Django 5.0" + hatch_env: "py3.10-dj5.0" + python_version: "3.10" + - hatch: + name: "Python 3.11, Django 4.1" hatch_env: "py3.11-dj4.1" python_version: "3.11" - hatch: + name: "Python 3.11, Django 4.2" hatch_env: "py3.11-dj4.2" python_version: "3.11" + - hatch: + name: "Python 3.11, Django 5.0" + hatch_env: "py3.11-dj5.0" + python_version: "3.11" + - hatch: + name: "Python 3.12, Django 5.0" + hatch_env: "py3.12-dj5.0" + python_version: "3.12" - lint diff --git a/README.rst b/README.rst index ea9e9acb2..2c9527333 100644 --- a/README.rst +++ b/README.rst @@ -24,6 +24,13 @@ The below table explains which Django versions are supported. +------------------+----------------+--------------+ | Release | Django | Upgrade from | +==================+================+==============+ +| 0.11.x | 3.2, 4.0, 4.1, | 0.10 | +| | 4.2, 5.0 | | ++------------------+----------------+--------------+ +| 0.10.x | 2.2, 3.0, 3.1, | 0.7 | +| | 3.2, 4.0, 4.1, | | +| | 4.2 | | ++------------------+----------------+--------------+ | 0.9.x | 2.2, 3.0, 3.1, | 0.7 | | | 3.2, 4.0 | | +------------------+----------------+--------------+ diff --git a/docs/release_notes.rst b/docs/release_notes.rst index 0a9dd2682..c72a951f7 100644 --- a/docs/release_notes.rst +++ b/docs/release_notes.rst @@ -5,7 +5,8 @@ Release notes Release plan ------------ -- **0.11.x** Update bootstrap to v5 and removes Python 3.7 support. +- **0.12.x** Update bootstrap to v5 +- **0.11.x** Adds Django 5.x support and Python 3.12 - **0.10.x** Uses Hatch as the new build system and has upgrades for Django 4.x and Python 3.11. - **0.9.x** Definitely Removes Python 3.5 and 3.6 support - **0.7.x** Removes Django 2.1 support, adds Django 3.1, 3.2 @@ -14,6 +15,58 @@ Release plan - **0.4.x** supports Django 1.11 and Django 2.1 and Python 3.4+. - **0.3.x** series suppors Django 1.11. As with the upstream Django release, 0.3 was be the last series with Python 2.7 support. +0.11rc1 +------- + +Added +~~~~~ + +* ``[TOC]`` Markdown extension now accepts several arguments, ``toc_depth``, ``title`` and more :url-issue:`1304` (Ryan Henrichson) +* Pymdown-extensions support is gradually added: + + * `PyMDown Blocks <https://facelessuser.github.io/pymdown-extensions/extensions/blocks/>`__ are now supported :url-issue:`1316` (Ryan Henrichson) + * Sidebar documentation for new plugin (enable by adding ``wiki.plugins.pymdown.apps.PyMdownConfig`` to your installed apps :url-issue:`1334` (Ryan Henrichson, Benjamin Balder Bach) + +* Add support for Django 5.0 :url-issue:`1337` (Benjamin Balder Bach) +* Add support for Python 3.12 :url-issue:`1337` (Benjamin Balder Bach) +* Markdown 3.4 and 3.5 support :url-issue:`1313` (Ryan Henrichson) + +Fixed +~~~~~ + +* Fix xframe_options_sameorigin bug in MergeView :url-issue:`1294` (liuxiawei 刘夏唯) +* Read the Docs configuration, setup and theme upgraded (Benjamin Balder Bach) +* CodeCov setup restored :url-issue:`1295` (Oscar Cortez) +* Editsection plugin: Relax path regex (fixing NoReverseMatch) + tests :url-issue:`1299` (Chris Vigelius) +* Fixed extending ``WIKI_MARKDOWN_HTML_WHITELIST`` :url-issue:`1314` (Ryan Henrichson) + +Changed +~~~~~~~ + +* ``align`` attributes in ``<td>`` tags are allowed in generated HTML :url-issue:`1320` (yengip) +* Codebase linted with ruff (Black and flake8 removed) :url-issue:`1321` (Oscar Cortez) +* Dependencies bumped: + + * django-nyt 1.4 + * django-mptt 0.16 + * Markdown 3.5 + +* Dependency added: + + * pymdown-extensions 10.5 + +Translations +~~~~~~~~~~~~ + +* + + +Removed +~~~~~~~ + +* Removes support for Django 2.2, 3.0, 3.1 +* Removes support for Python 3.7, 3.8, 3.9 + 0.10 ---- diff --git a/pyproject.toml b/pyproject.toml index 8810719c0..4bd229c85 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ classifiers = [ "Framework :: Django :: 3.2", "Framework :: Django :: 4.1", "Framework :: Django :: 4.2", + "Framework :: Django :: 5.0", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python", @@ -30,6 +31,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", @@ -39,14 +41,14 @@ classifiers = [ ] dependencies = [ # Django version support is added one minor release at a time - "Django>=3.2,<4.3", + "Django>=3.2,<5.1", # bleach has been pretty stable, hence the loose pin "bleach[css]>=6,<7", # Pillow is used very little and has never broken "Pillow", # django-nyt is maintained by django-wiki maintainers, so we can # control breakage and pinning ourselves - "django-nyt>=1.2.2", + "django-nyt>=1.4,<1.5", # django-mptt has had several breaking changes "django-mptt>=0.13,<0.17", # django-sekizai is basically only releasing every time @@ -154,6 +156,10 @@ django = ["3.2", "4.0"] python = ["3.9", "3.10", "3.11"] django = ["4.1", "4.2"] +[[tool.hatch.envs.test.matrix]] +python = ["3.10", "3.11", "3.12"] +django = ["5.0"] + [tool.hatch.envs.transifex] dependencies = [] diff --git a/src/wiki/__about__.py b/src/wiki/__about__.py index 91bf82336..d81d225d5 100644 --- a/src/wiki/__about__.py +++ b/src/wiki/__about__.py @@ -1 +1 @@ -__version__ = "0.10" +__version__ = "0.11rc1" diff --git a/tests/core/test_models.py b/tests/core/test_models.py index 1844a0258..3583af349 100644 --- a/tests/core/test_models.py +++ b/tests/core/test_models.py @@ -129,9 +129,9 @@ def test_cache(self): ) expected = """<h1 id="wiki-toc-header">header""" """.*</h1>""" # cached content does not exist yet. this will create it - self.assertRegexpMatches(a.get_cached_content(), expected) + self.assertRegex(a.get_cached_content(), expected) # actual cached content test - self.assertRegexpMatches(a.get_cached_content(), expected) + self.assertRegex(a.get_cached_content(), expected) def test_articlerevision_presave_signals(self): a = Article.objects.create() diff --git a/tests/core/test_template_tags.py b/tests/core/test_template_tags.py index ae33b24ed..22c59e0f6 100644 --- a/tests/core/test_template_tags.py +++ b/tests/core/test_template_tags.py @@ -211,14 +211,14 @@ def test_called_with_preview_content_and_article_have_current_revision( output = wiki_render({}, article, preview_content=content) self.assertCountEqual(self.keys, output) self.assertEqual(output["article"], article) - self.assertRegexpMatches(output["content"], expected) + self.assertRegex(output["content"], expected) self.assertIs(output["preview"], True) self.assertEqual(output["plugins"], {"spam": "eggs"}) self.assertEqual(output["STATIC_URL"], django_settings.STATIC_URL) self.assertEqual(output["CACHE_TIMEOUT"], settings.CACHE_TIMEOUT) output = self.render({"article": article, "pc": content}) - self.assertRegexpMatches(output, expected) + self.assertRegex(output, expected) def test_called_with_preview_content_and_article_dont_have_current_revision( self diff --git a/tests/plugins/attachments/test_views.py b/tests/plugins/attachments/test_views.py index c6746c99e..0dbd053d8 100644 --- a/tests/plugins/attachments/test_views.py +++ b/tests/plugins/attachments/test_views.py @@ -159,12 +159,12 @@ def test_render(self): r'<span class="attachment"><a href=".*attachments/download/1/"' r' title="Click to download test\.txt">\s*test\.txt\s*</a>' ) - self.assertRegexpMatches(output, expected) + self.assertRegex(output, expected) def test_render_missing(self): output = self.get_article("[attachment:2]") expected = r'<span class="attachment attachment-deleted">\s*Attachment with ID #2 is deleted.\s*</span>' - self.assertRegexpMatches(output, expected) + self.assertRegex(output, expected) def test_render_title(self): output = self.get_article('[attachment:1 title:"Test title"]') @@ -172,7 +172,7 @@ def test_render_title(self): r'<span class="attachment"><a href=".*attachments/download/1/"' r' title="Click to download test\.txt">\s*Test title\s*</a>' ) - self.assertRegexpMatches(output, expected) + self.assertRegex(output, expected) def test_render_title_size(self): output = self.get_article('[attachment:1 title:"Test title 2" size]') @@ -180,4 +180,4 @@ def test_render_title_size(self): r'<span class="attachment"><a href=".*attachments/download/1/"' r' title="Click to download test\.txt">\s*Test title 2 \[25[^b]bytes\]\s*</a>' ) - self.assertRegexpMatches(output, expected) + self.assertRegex(output, expected) diff --git a/tests/plugins/globalhistory/test_globalhistory.py b/tests/plugins/globalhistory/test_globalhistory.py index 4fb10e18d..32e3ac07d 100644 --- a/tests/plugins/globalhistory/test_globalhistory.py +++ b/tests/plugins/globalhistory/test_globalhistory.py @@ -17,7 +17,7 @@ def test_history(self): response = self.client.get(url) expected = "(?s).*Root Article.*no log message.*" - self.assertRegexpMatches(response.rendered_content, expected) + self.assertRegex(response.rendered_content, expected) URLPath.create_urlpath( URLPath.root(), @@ -30,7 +30,7 @@ def test_history(self): expected = ( "(?s).*TestHistory1.*Comment 1.*" "Root Article.*no log message.*" ) - self.assertRegexpMatches(response.rendered_content, expected) + self.assertRegex(response.rendered_content, expected) urlpath = URLPath.create_urlpath( URLPath.root(), @@ -45,13 +45,13 @@ def test_history(self): "Root Article.*no log message.*" ) response = self.client.get(url) - self.assertRegexpMatches(response.rendered_content, expected) + self.assertRegex(response.rendered_content, expected) response = self.client.get(url0) - self.assertRegexpMatches(response.rendered_content, expected) + self.assertRegex(response.rendered_content, expected) response = self.client.get(url1) - self.assertRegexpMatches(response.rendered_content, expected) + self.assertRegex(response.rendered_content, expected) response = self.client.post( reverse("wiki:edit", kwargs={"path": "testhistory2/"}), @@ -72,10 +72,10 @@ def test_history(self): "Root Article.*no log message.*" ) response = self.client.get(url) - self.assertRegexpMatches(response.rendered_content, expected) + self.assertRegex(response.rendered_content, expected) response = self.client.get(url0) - self.assertRegexpMatches(response.rendered_content, expected) + self.assertRegex(response.rendered_content, expected) expected = ( "(?s).*TestHistory2Mod.*Testing Revision.*" @@ -83,7 +83,7 @@ def test_history(self): "Root Article.*no log message.*" ) response = self.client.get(url1) - self.assertRegexpMatches(response.rendered_content, expected) + self.assertRegex(response.rendered_content, expected) def test_translation(self): # Test that translation of "List of %s changes in the wiki." exists.
cookiecutter__cookiecutter-1273
PEP257 docstrings for file "./docs/__init__.py" Cover `./docs/__init__.py` file with docstrings and follow [PEP257](https://www.python.org/dev/peps/pep-0257/). We use [pydocstyle](https://pypi.org/project/pydocstyle/) for validation. Current validation log: ``` ./docs/__init__.py:1 at module level: D104: Missing docstring in public package ``` Subtask for #742
[ { "content": "", "path": "docs/__init__.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"Main package for docs.\"\"\"\n", "path": "docs/__init__.py" } ]
diff --git a/docs/__init__.py b/docs/__init__.py index e69de29bb..e7eaad7bd 100644 --- a/docs/__init__.py +++ b/docs/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- + +"""Main package for docs."""
microsoft__Qcodes-4248
Filename collision due to case-sensitivity in Keysight folder When pulling the qcodes repository on Windows, there is a filename collision between the uppercase and lowercase Keysight folders. The error message is as follows: ```bash $ git clone https://github.com/QCoDeS/Qcodes.git Cloning into 'Qcodes'... remote: Enumerating objects: 1522, done. remote: Counting objects: 100% (1522/1522), done. remote: Compressing objects: 100% (655/655), done. Receiving objects: 100% (112398/112398), 242.65 MiB | 2.58 MiB/s, done. Resolving deltas: 100% (87395/87395), done. warning: the following paths have collided (e.g. case-sensitive paths on a case-insensitive filesystem) and only one from the same colliding group is in the working tree: 'qcodes/instrument_drivers/Keysight/__init__.py' 'qcodes/instrument_drivers/keysight/__init__.py' ``` I propose we remove the lowercase keysight folder as it has now been deprecated for over 2 years. ### System Windows/OSX and other OS's with case insensitive file systems.
[ { "content": "", "path": "qcodes/instrument_drivers/Keysight/__init__.py" } ]
[ { "content": "# Intentionally left blank\n", "path": "qcodes/instrument_drivers/Keysight/__init__.py" } ]
diff --git a/qcodes/instrument_drivers/Keysight/__init__.py b/qcodes/instrument_drivers/Keysight/__init__.py index e69de29bb2d..e484f8b84cd 100644 --- a/qcodes/instrument_drivers/Keysight/__init__.py +++ b/qcodes/instrument_drivers/Keysight/__init__.py @@ -0,0 +1 @@ +# Intentionally left blank diff --git a/qcodes/instrument_drivers/keysight/Use Upper Case Keysightfolder b/qcodes/instrument_drivers/keysight/Use Upper Case Keysightfolder deleted file mode 100644 index 99af1006b40..00000000000 --- a/qcodes/instrument_drivers/keysight/Use Upper Case Keysightfolder +++ /dev/null @@ -1,2 +0,0 @@ -In the repository there are two different Keysight folders: "Keysight" and "keysight". Under some operating systems they appear as one. If you should see two, use the upper case one. For more information go to: -https://github.com/QCoDeS/Qcodes/pull/725 diff --git a/qcodes/instrument_drivers/keysight/__init__.py b/qcodes/instrument_drivers/keysight/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000
kivy__python-for-android-2797
Python exception when using colorlog due to incomplete IO implementation in sys.stderr I am attempting to run a program which uses `TTYColoredFormatter` from [colorlog](https://pypi.org/project/colorlog/). This class formats log messages, adding ANSI escape codes _only_ if the stream it is writing to returns `True` for `stream.isatty()`. Unfortunately, python-for-android's bootstrap code replaces sys.stderr and sys.stdout with a custom `LogFile` object: https://github.com/kivy/python-for-android/blob/53d77fc26c9e37eb6ce05f8899f4dae8334842b1/pythonforandroid/bootstraps/common/build/jni/application/src/start.c#L226-L242 This object doesn't implement `isatty()` (or much else, for that matter). As a result, the program raises an exception: ``` 03-03 13:32:56.222 5806 5891 I python : Traceback (most recent call last): 03-03 13:32:56.222 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/main.py", line 3, in <module> 03-03 13:32:56.222 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/main_activity/__main__.py", line 7, in main 03-03 13:32:56.222 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/main_activity/activity.py", line 19, in <module> 03-03 13:32:56.222 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/kolibri_utils.py", line 13, in <module> 03-03 13:32:56.223 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/android_whitenoise.py", line 11, in <module> 03-03 13:32:56.223 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri/__init__.py", line 10, in <module> 03-03 13:32:56.223 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri/utils/env.py", line 29, in <module> 03-03 13:32:56.223 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri/dist/colorlog/colorlog.py", line 203, in __init__ 03-03 13:32:56.223 5806 5891 I python : AttributeError: 'LogFile' object has no attribute 'isatty' ``` (For reference, we're using colorlog v3.2.0, so the code raising the exception looks like this: https://github.com/borntyping/python-colorlog/blob/v3.2.0/colorlog/colorlog.py#L191-L211). Service don t start anymore, as smallIconName extra is now mandatory https://github.com/kivy/python-for-android/blob/8cb497dd89e402478011df61f4690b963a0c96da/pythonforandroid/bootstraps/common/build/src/main/java/org/kivy/android/PythonService.java#L116 ```java.lang.NullPointerException: Attempt to invoke virtual method 'boolean java.lang.String.equals(java.lang.Object)' on a null object reference``` We could test if null before.
[ { "content": "__version__ = '2023.02.10'\n", "path": "pythonforandroid/__init__.py" } ]
[ { "content": "__version__ = '2023.05.21'\n", "path": "pythonforandroid/__init__.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 3724af62ab..98288f265d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,45 @@ # Changelog +## [v2023.05.21](https://github.com/kivy/python-for-android/tree/v2023.05.21) + +[Full Changelog](https://github.com/kivy/python-for-android/compare/v2023.02.10...v2023.05.21) + +**Closed issues:** + +- python [\#2795](https://github.com/kivy/python-for-android/issues/2795) +- Create APK from PyQt app [\#2794](https://github.com/kivy/python-for-android/issues/2794) +- psutil/\_psutil\_linux.so" is 64-bit instead of 32-bit [\#2785](https://github.com/kivy/python-for-android/issues/2785) +- pythonforandroid.toolchain.py: error: unrecognized arguments: --dir [\#2775](https://github.com/kivy/python-for-android/issues/2775) +- App [\#2774](https://github.com/kivy/python-for-android/issues/2774) +- org.kivy.android.PythonActivity$NewIntentListener is not visible from class loader java.lang.IllegalArgumentException [\#2770](https://github.com/kivy/python-for-android/issues/2770) +- Service don t start anymore, as smallIconName extra is now mandatory [\#2768](https://github.com/kivy/python-for-android/issues/2768) +- Start a background sticky service that auto-restart. [\#2767](https://github.com/kivy/python-for-android/issues/2767) +- Fail installation [\#2764](https://github.com/kivy/python-for-android/issues/2764) +- Python exception when using colorlog due to incomplete IO implementation in sys.stderr [\#2762](https://github.com/kivy/python-for-android/issues/2762) +- AttributeError: 'org.kivy.android.PythonService' object has no attribute 'getComponentName' [\#2760](https://github.com/kivy/python-for-android/issues/2760) +- https://code.videolan.org not available [\#2758](https://github.com/kivy/python-for-android/issues/2758) +- Cannot install Python-for-Android [\#2754](https://github.com/kivy/python-for-android/issues/2754) +- c/\_cffi\_backend.c:407:23: error: expression is not assignable [\#2753](https://github.com/kivy/python-for-android/issues/2753) +- not install [\#2749](https://github.com/kivy/python-for-android/issues/2749) +- APK crashes upon launch. logcat error: null pointer dereference \(occurs with imported modules\) [\#2358](https://github.com/kivy/python-for-android/issues/2358) +- Error occured while building the aplication using buildozer [\#2104](https://github.com/kivy/python-for-android/issues/2104) +- "Could Not Extract Public Data" Needs very explicit instructions or feedback to the user [\#260](https://github.com/kivy/python-for-android/issues/260) + +**Merged pull requests:** + +- Update Kivy recipe for 2.2.0 [\#2793](https://github.com/kivy/python-for-android/pull/2793) ([misl6](https://github.com/misl6)) +- Update `pyjnius` version to `1.5.0` [\#2791](https://github.com/kivy/python-for-android/pull/2791) ([misl6](https://github.com/misl6)) +- fix tools/liblink: syntax error [\#2771](https://github.com/kivy/python-for-android/pull/2771) ([SomberNight](https://github.com/SomberNight)) +- fix \#2768 smallIconName null can t be compared to String [\#2769](https://github.com/kivy/python-for-android/pull/2769) ([brvier](https://github.com/brvier)) +- android\_api to integer [\#2765](https://github.com/kivy/python-for-android/pull/2765) ([kuzeyron](https://github.com/kuzeyron)) +- Use io.IOBase for LogFile [\#2763](https://github.com/kivy/python-for-android/pull/2763) ([dylanmccall](https://github.com/dylanmccall)) +- Home app functionality [\#2761](https://github.com/kivy/python-for-android/pull/2761) ([kuzeyron](https://github.com/kuzeyron)) +- Add debug loggings for identifying a matching dist [\#2751](https://github.com/kivy/python-for-android/pull/2751) ([BitcoinWukong](https://github.com/BitcoinWukong)) +- Add PyAV recipe [\#2750](https://github.com/kivy/python-for-android/pull/2750) ([DexerBR](https://github.com/DexerBR)) +- Merge master into develop [\#2748](https://github.com/kivy/python-for-android/pull/2748) ([misl6](https://github.com/misl6)) +- Add support for Python 3.10 and make it the default while building hostpython3 and python3 [\#2577](https://github.com/kivy/python-for-android/pull/2577) ([misl6](https://github.com/misl6)) + + ## [v2023.02.10](https://github.com/kivy/python-for-android/tree/v2023.02.10) (2023-02-10) [Full Changelog](https://github.com/kivy/python-for-android/compare/v2023.01.28...v2023.02.10) diff --git a/pythonforandroid/__init__.py b/pythonforandroid/__init__.py index 3e337f8ce6..ec30da5902 100644 --- a/pythonforandroid/__init__.py +++ b/pythonforandroid/__init__.py @@ -1 +1 @@ -__version__ = '2023.02.10' +__version__ = '2023.05.21'
OCHA-DAP__hdx-ckan-1038
Update the version number on the logo and footer. For sprint 25, we will increment to 0.3.2
[ { "content": "hdx_version='v0.3.1'", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
[ { "content": "hdx_version='v0.3.2'", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py index 13e6c49927..a1e294d081 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version='v0.3.1' \ No newline at end of file +hdx_version='v0.3.2' \ No newline at end of file
OCHA-DAP__hdx-ckan-770
remove text from home page Please remove this text from homepage 'This is an early version of the HDX Repository. Initially, you will be able to find global datasets relevant to humanitarian work as well as local datasets from our three pilot locations - Colombia, Kenya and Yemen. You can also create an account and add your own data to the repository to share privately or publicly. Please have a look around and send us your feedback!' this will be covered in the about page. Not sure if yumi will want to adjusts the centering of the remaining HDX and tagline but we can ask her
[ { "content": "hdx_version='v0.2.6'", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
[ { "content": "hdx_version='v0.3.0'", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/public/hdx_theme.css b/ckanext-hdx_theme/ckanext/hdx_theme/public/hdx_theme.css index 3561f27e3b..428a547358 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/public/hdx_theme.css +++ b/ckanext-hdx_theme/ckanext/hdx_theme/public/hdx_theme.css @@ -1014,6 +1014,7 @@ for example: in the user dashboard when the user has no organizations */ width: inherit; top: 30%; margin-left: -220px; + color: #000; } .hdx-modal .controls { diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/home/index.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/home/index.html index 614dfe6757..898be4ad97 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/home/index.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/home/index.html @@ -25,17 +25,19 @@ <div class="row"> <div class="span6"> <h1> - {{ _("The humanitarian data exchange") }} + {{ _("The Humanitarian Data Exchange") }} </h1> <h3> {{ _("Where your data comes to life") }} </h3> <p class="hdxDescription"> + {# {{ _("This is an early version of the HDX Repository. Initially, you will be able to find global datasets relevant to humanitarian work as well as local datasets from our three pilot locations - Colombia, Kenya and Yemen. You can also create an account and add your own data to the repository to share privately or publicly. Please have a look around and send us your feedback!") }} + #} </p> </div> </div> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/bulk_process.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/bulk_process.html index dd2af199de..f63a9abea2 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/bulk_process.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/bulk_process.html @@ -89,7 +89,7 @@ <h3 class="dataset-heading"> </table> </form> {% else %} - <p class="empty">{{ _('This organization has no datasets associated to it') }}</p> + <p class="empty">{{ _('This organisation has no datasets associated to it') }}</p> {% endif %} {% endblock %} </div> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/edit.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/edit.html index 33a27be764..741c239956 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/edit.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/edit.html @@ -1,9 +1,9 @@ {% extends "organization/base_form_page.html" %} -{% block subtitle %}{{ _('Edit Organization') }}{% endblock %} +{% block subtitle %}{{ _('Edit Organisation') }}{% endblock %} {% block page_heading_class %}hide-heading{% endblock %} -{% block page_heading %}{{ _('Edit Organization') }}{% endblock %} +{% block page_heading %}{{ _('Edit Organisation') }}{% endblock %} {% block primary %} <div class="create-org"> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/edit_base.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/edit_base.html index 37811c9ba1..5ba305c8ce 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/edit_base.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/edit_base.html @@ -5,7 +5,7 @@ {% block subtitle %}{{ organization.display_name }}{% endblock %} {% block breadcrumb_content %} - <li>{% link_for _('Organizations'), controller='organization', action='index' %}</li> + <li>{% link_for _('Organisations'), controller='organization', action='index' %}</li> {% block breadcrumb_content_inner %} <li>{% link_for organization.display_name|truncate(35), controller='organization', action='read', id=organization.name %}</li> <li class="active">{% link_for _('Admin'), controller='organization', action='edit', id=organization.name %}</li> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/index.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/index.html index cb4a57ac36..21c1ad42a2 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/index.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/index.html @@ -1,9 +1,9 @@ {% extends "page.html" %} -{% block subtitle %}{{ _('Organizations') }}{% endblock %} +{% block subtitle %}{{ _('Organisations') }}{% endblock %} {% block breadcrumb_content %} - <li class="active">{% link_for _('Organizations'), controller='organization', action='index' %}</li> + <li class="active">{% link_for _('Organisations'), controller='organization', action='index' %}</li> {% endblock %} {% block page_header %}{% endblock %} @@ -11,9 +11,9 @@ {% block page_primary_action %} {% if h.check_access('organization_create') %} {% if c.userobj.sysadmin %} - {% link_for _('Add Organization'), controller='organization', action='new', class_='btn btn-primary', icon='plus-sign-alt' %} + {% link_for _('Add Organisation'), controller='organization', action='new', class_='btn btn-primary', icon='plus-sign-alt' %} {% else %} - <a href="/organization/request_new?from=organization" class="btn btn-primary">{{ _("Request New Organization")}}</a> + <a href="/organization/request_new?from=organization" class="btn btn-primary">{{ _("Request New Organisation")}}</a> {% endif %} @@ -21,17 +21,17 @@ {% endblock %} {% block primary_content_inner %} - <h1 class="hide-heading">{% block page_heading %}{{ _('Organizations') }}{% endblock %}</h1> + <h1 class="hide-heading">{% block page_heading %}{{ _('Organisations') }}{% endblock %}</h1> {% block organizations_search_form %} {% set sorting_option = c.sort_by_selected or 'name asc' %} - {% snippet 'snippets/search_form.html', type='organization', query=c.q, sorting_selected=sorting_option, count=c.page.item_count, placeholder=_('Search organizations...'), show_empty=request.params %} + {% snippet 'snippets/search_form.html', type='organization', query=c.q, sorting_selected=sorting_option, count=c.page.item_count, placeholder=_('Search organisations...'), show_empty=request.params %} {% endblock %} {% block organizations_list %} {% if c.page.items or request.params %} {% snippet "organization/snippets/organization_list.html", organizations=c.page.items %} {% else %} <p class="empty"> - {{ _('There are currently no organizations for this site') }}. + {{ _('There are currently no organisations for this site') }}. {% if h.check_access('organization_create') %} {% link_for _('How about creating one?'), controller='organization', action='new' %}</a>. {% endif %} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/members.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/members.html index 0300d80838..3f9b269396 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/members.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/members.html @@ -4,7 +4,7 @@ {% set authorized = h.check_access('organization_update', {'id': c.group_dict.id}) %} {% block breadcrumb_content %} - <li>{% link_for _('Organizations'), controller='organization', action='index' %}</li> + <li>{% link_for _('Organisations'), controller='organization', action='index' %}</li> <li>{% link_for c.group_dict.display_name|truncate(35), controller='organization', action='read', id=c.group_dict.name %}</li> <li class="active">{% link_for _('Members'), controller='organization', action='members', id=c.group_dict.name %}</li> {% endblock %} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/new.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/new.html index 84c79919cc..5a039bc014 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/new.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/new.html @@ -1,25 +1,25 @@ {% extends "organization/base_form_page.html" %} -{% block subtitle %}{{ _('Create an Organization') }}{% endblock %} +{% block subtitle %}{{ _('Create an Organisation') }}{% endblock %} {% block breadcrumb_link %} - {{ h.nav_link(_('Create an Organization'), controller='organization', action='edit', id=c.organization.name) }} + {{ h.nav_link(_('Create an Organisation'), controller='organization', action='edit', id=c.organization.name) }} {% endblock %} -{# block page_heading %}{{ _('Create an Organization') }}{% endblock #} +{# block page_heading %}{{ _('Create an Organisation') }}{% endblock #} {# block page_header %}{% endblock #} {% block breadcrumb_content %} {% block breadcrumb_content_inner %} - <li class="active">{% link_for _('Add Organization'), controller='organization', action='new'%}</li> + <li class="active">{% link_for _('Add Organisation'), controller='organization', action='new'%}</li> {% endblock %} {% endblock %} {% block toolbar %} {{ super() }} - {% snippet "snippets/greeting_message.html", show_small_message=True, greeting=_('Creating an organization is easy.'), - explanation=_('Organizations don\'t have to be legal entities - they can be informal groups that users create to share data publicily or privately.') %} + {% snippet "snippets/greeting_message.html", show_small_message=True, greeting=_('Creating an organisation is easy.'), + explanation=_('Organisations don\'t have to be legal entities - they can be informal groups that users create to share data publicily or privately.') %} {% endblock %} {% block primary %} @@ -38,7 +38,7 @@ <h1 class="h1-title uppercase"> {% block page_heading %}1. {{ _('Basic Details') }}{% endblock %} </h1> - <p>{{ _('Tell us some basic details about your organization.') }}</p> + <p>{{ _('Tell us some basic details about your organisation.') }}</p> {% block form %} {{ c.form | safe }} {% endblock %} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/new_organization_form.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/new_organization_form.html index 2abefc2231..9711955049 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/new_organization_form.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/new_organization_form.html @@ -16,7 +16,7 @@ {% block save_text %} {%- if action == "edit" -%} - {{ _('Update Organization') }} + {{ _('Update Organisation') }} {%- else -%} {{ _('Submit') }} {%- endif -%} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/organization_preselector.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/organization_preselector.html index 80a32151cc..9b876c7016 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/organization_preselector.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/organization_preselector.html @@ -8,13 +8,13 @@ {{ super() }} {% set find_org_url = h.url_for(controller='organization', action='index') %} {% set create_org_url = h.url_for(controller='organization', action='new') if c.am_sysadmin - else h.url_for(controller='organization', action='index') %} + else h.url_for(controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_new_organization') %} {% snippet "snippets/greeting_message.html", show_small_message=True, greeting=_('Thanks for contributing - you rock.'), - explanation=_('First, please select the organization you will be contributing for. If you are not currently a member - of the organization you\'d like to contribute to, - <a href="'+find_org_url+'">find and request membership to the organization</a> + explanation=_('First, please select the organisation you will be contributing for. If you are not currently a member + of the organisation you\'d like to contribute to, + <a href="'+find_org_url+'">find and request membership to the organisation</a> or - <a href="'+create_org_url+'">add a new organization</a>.') %} + <a href="'+create_org_url+'">add a new organisation</a>.') %} {% endblock %} {% block primary %} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read.html index da45b3f495..e1361a1ac3 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read.html @@ -61,7 +61,7 @@ {% else %} <div class="big-message"> <p> - {{ _('There are no datasets currently uploaded to this organization.') }} + {{ _('There are no datasets currently uploaded to this organisation.') }} </p> <p> {% if h.check_access('package_create', {'organization_id': c.group_dict.id}) %} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read_base.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read_base.html index becb31029d..6ee2d7b9dd 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read_base.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read_base.html @@ -3,7 +3,7 @@ {% block subtitle %}{{ c.group_dict.display_name }}{% endblock %} {% block breadcrumb_content %} - <li>{% link_for _('Organizations'), controller='organization', action='index' %}</li> + <li>{% link_for _('Organisations'), controller='organization', action='index' %}</li> <li>{% link_for c.group_dict.display_name|truncate(35), controller='organization', action='read', id=c.group_dict.name %}</li> {% endblock %} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_mem_or_org.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_mem_or_org.html index a115aa1faf..eb8244ceda 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_mem_or_org.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_mem_or_org.html @@ -3,7 +3,7 @@ <div> {% block primary_content %} -{% snippet "snippets/greeting_message.html", show_small_message=True, greeting=_('Thanks for contributing - you rock.'), explanation=_('You don\'t currently belong to any organizations. You need to belong to an organization before you can contribute a dataset. Please choose an option below:') %} +{% snippet "snippets/greeting_message.html", show_small_message=True, greeting=_('Thanks for contributing - you rock.'), explanation=_('You don\'t currently belong to any organisations. You need to belong to an organisation before you can contribute a dataset. Please choose an option below:') %} {% snippet "organization/snippets/mem_or_org.html", parent_route='dataset_preselect' %} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_new.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_new.html index 2a13cd9215..5ec7a95bb8 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_new.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_new.html @@ -1,10 +1,10 @@ {% extends "organization/new.html" %} {% block toolbar %} - {% snippet "snippets/greeting_message.html", show_small_message=True, greeting=_('Create new organization'), + {% snippet "snippets/greeting_message.html", show_small_message=True, greeting=_('Create new organisation'), explanation=_('We try to get the highest quality data in our system so we review - new organization requests before they are posted on the site. Once you fill out a request form - we try and accept the new organization request within 24h.') %} + new organisation requests before they are posted on the site. Once you fill out a request form + we try and accept the new organisation request within 24h.') %} {% endblock %} {% block primary_content_inner %} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_organization_form.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_organization_form.html index 4614b32301..355f37700e 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_organization_form.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/request_organization_form.html @@ -16,7 +16,7 @@ <h1 class="h1-title uppercase"> {% block page_heading %}1. {{ _('Basic Details') }}{% endblock %} </h1> - <p>{{ _('Tell us some basic details about your organization.') }}</p> + <p>{{ _('Tell us some basic details about your organisation.') }}</p> {{ super() }} </div> </div> @@ -50,7 +50,7 @@ <h1 class="h1-title uppercase"> {{ form.input('your_email', label=_('Your Email'), id='your-email', type='email', placeholder=_('[email protected]'), value=data.your_email, error=errors.your_email, classes=['control-full','org-control','field-with-info', 'mandatory']) }} <div class="org-control-info info-field"> - <div class="org-info-label">{{_('This email should be related to the organization.')}}</div> + <div class="org-info-label">{{_('This email should be related to the organisation.')}}</div> </div> </div> </div> @@ -63,7 +63,7 @@ <h1 class="h1-title uppercase"> <div class="offset1 span9"> <div class="create-org form-actions"> <button class="btn btn-primary create-org-btn" name="save" type="submit"> - {% block save_text %}{{ _('Request New Organization') }}{% endblock %} + {% block save_text %}{{ _('Request New Organisation') }}{% endblock %} </button> </div> </div> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/mem_or_org.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/mem_or_org.html index e988796512..e54a38c2ca 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/mem_or_org.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/mem_or_org.html @@ -1,15 +1,15 @@ {# {% snippet "organization/snippets/mem_or_org.html", parent_route='user_dashboard_organizations' %} #} {% set parent_route = parent_route if parent_route else 'user_dashboard_organizations' %} <div class="row row-fluid"> - <div class="span6 lined"><div class="header-user-message header-user-message-med">1. {{ _("REQUEST MEMBERSHIP IN AN ORGANIZATION")}}</div> - <p>{{ _("There's a good chance that your organization is already a part of our database. You can find your organization and request membership. Once the admin has accepted your request, you should be able to upload datasets. Note that you have to be either an editor or admin role in order to upload datasets, so make sure to request one of those roles.")}}</p> + <div class="span6 lined"><div class="header-user-message header-user-message-med">1. {{ _("REQUEST MEMBERSHIP IN AN ORGANISATION")}}</div> + <p>{{ _("There's a good chance that your organisation is already a part of our database. You can find your organisation and request membership. Once the admin has accepted your request, you should be able to upload datasets. Note that you have to be either an editor or admin role in order to upload datasets, so make sure to request one of those roles.")}}</p> <br> - <center><a href="/organization" class="btn">{{ _("Find Your Organization")}}</a></center> + <center><a href="/organisation" class="btn">{{ _("Find Your Organisation")}}</a></center> </div> - <div class="span6 lined"><div class="header-user-message header-user-message-med">2. {{ _("CREATE A NEW ORGANIZATION")}}</div> -<p>{{ _("If you don't see your organization in our organizations list, you can request the creation of a new organization. We try to get the highest quality data in our system, so we review new org requests before they are posted to the site. Once you fill out a request form, however, we try and accept the new org request within 24 hours.")}}</p> + <div class="span6 lined"><div class="header-user-message header-user-message-med">2. {{ _("CREATE A NEW ORGANISATION")}}</div> +<p>{{ _("If you don't see your organisation in our organisations list, you can request the creation of a new organisation. We try to get the highest quality data in our system, so we review new org requests before they are posted to the site. Once you fill out a request form, however, we try and accept the new org request within 24 hours.")}}</p> <br> - <center><a href="/organization/request_new?from={{ parent_route }}" class="btn">{{ _("Request New Organization")}}</a></center> + <center><a href="/organization/request_new?from={{ parent_route }}" class="btn">{{ _("Request New Organisation")}}</a></center> </div> </div> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/member_item.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/member_item.html index ec0cd85c06..06a8ccf121 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/member_item.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/member_item.html @@ -20,7 +20,7 @@ <h3 class="list-items dataset-heading"> <div class="list-items counter"> <span class="count"> {{ h.hdx_show_singular_plural(member.ds_num, _('Dataset'), _('Datasets')) }}</span> - - <span class="count"> {{ h.hdx_show_singular_plural(member.org_num, _('Organization'), _('Organizations')) }}</span> + <span class="count"> {{ h.hdx_show_singular_plural(member.org_num, _('Organisation'), _('Organisations')) }}</span> - <span class="count"> {{ h.hdx_show_singular_plural(member.grp_num, _('Countries'), _('Countries')) }}</span> </div> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_form.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_form.html index 56b4a987f4..5c36615c10 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_form.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_form.html @@ -8,7 +8,7 @@ {% block basic_fields %} {% set attrs = {'data-module': 'slug-preview-target', 'type':'hidden'} %} <div class="org-control-container"> - {{ form.input('title', label=_('Name of Organization'), id='field-title', value=data.title, error=errors.title, classes=['control-full', 'org-control', 'mandatory', 'field-with-info'], attrs=attrs) }} + {{ form.input('title', label=_('Name of Organisation'), id='field-title', value=data.title, error=errors.title, classes=['control-full', 'org-control', 'mandatory', 'field-with-info'], attrs=attrs) }} {# Perhaps these should be moved into the controller? #} {% set prefix = h.url_for(controller='organization', action='read', id='') %} @@ -19,19 +19,19 @@ <div class="org-info-label">{{_('Be as specific as possible (i.e. don\'t just say WFP, say WFP-Colombia)')}}</div> </div> </div> - {{ form.prepend('name', label=_('URL'), prepend=prefix, id='field-url', placeholder=_('my-organization'), value=data.name, error=errors.name, attrs=attrs) }} + {{ form.prepend('name', label=_('URL'), prepend=prefix, id='field-url', placeholder=_('my-organisation'), value=data.name, error=errors.name, attrs=attrs) }} <div class="org-control-container"> - {{ form.input('org_url', label=_('URL of Organization'), id='field-image-url', type='url', placeholder=_('http://example.com/about'), value=data.image_url, error=errors.image_url, classes=['control-full','org-control','field-with-info']) }} + {{ form.input('org_url', label=_('URL of Organisation'), id='field-image-url', type='url', placeholder=_('http://example.com/about'), value=data.image_url, error=errors.image_url, classes=['control-full','org-control','field-with-info']) }} <div class="org-control-info info-field"> - <div class="org-info-label">{{_('Where can people go to find more about this organization?')}}</div> + <div class="org-info-label">{{_('Where can people go to find more about this organisation?')}}</div> </div> </div> <div class="org-control-container"> - {{ form.textarea('description', label=_('Description of Organization'), id='field-description', value=data.description, error=errors.description, classes=['org-control', 'mandatory','field-with-info']) }} + {{ form.textarea('description', label=_('Description of Organisation'), id='field-description', value=data.description, error=errors.description, classes=['org-control', 'mandatory','field-with-info']) }} <div class="org-control-info-large info-field"> <div class="org-info-label-large"> - {{_('Brief overview of what organization is for.')}}</div> + {{_('Brief overview of what organisation is for.')}}</div> </div> </div> @@ -88,8 +88,8 @@ {% block action_buttons %} <div class="create-org form-actions"> {% block delete_button %} - {% if h.check_access('organization_delete', {'id': data.id}) %} - {% set locale = h.dump_json({'content': _('Are you sure you want to delete this Organization? This will delete all the public and private datasets belonging to this organization.')}) %} + {% if h.check_access('organisation_delete', {'id': data.id}) %} + {% set locale = h.dump_json({'content': _('Are you sure you want to delete this Organisation? This will delete all the public and private datasets belonging to this organisation.')}) %} <a class="btn btn-danger pull-left" href="{% url_for controller='organization', action='delete', id=data.id %}" data-module="confirm-action" data-module-i18n="{{ locale }}">{% block delete_button_text %}{{ _('Delete') }}{% endblock %}</a> {% endif %} {% endblock %} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_preselector_item.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_preselector_item.html index d29ea56b9a..0110ef3d44 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_preselector_item.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_preselector_item.html @@ -25,7 +25,7 @@ <div> {% if not has_add_dataset_rights %} - {% set confirm_delete_message = _('Looks like you\'re not an editor or an admin of this organization, so you can\'t add a dataset. Click the button below to send a request to the admin to make you an editor and close this box.') %} + {% set confirm_delete_message = _('Looks like you\'re not an editor or an admin of this organisation, so you can\'t add a dataset. Click the button below to send a request to the admin to make you an editor and close this box.') %} {% snippet 'snippets/confirmation_post.html', form_url=h.url_for('request_editing_rights',org_id=name, from='dataset_preselect'), header = _('Oops!'), confirm_btn_label = _('Send a request to the admin'), diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/req_membership.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/req_membership.html index a13a0be548..468fd59922 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/req_membership.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/req_membership.html @@ -11,11 +11,11 @@ <div id="{{ modal_div_id }}" class="modal hide hdx-modal" tabindex="-1" role="dialog" aria-labelledby="{{ modal_div_id }}-label" aria-hidden="true"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal" aria-hidden="true">x</button> - <h3 id="{{ modal_div_id }}-label">{{ _('Add Member') }}</h3> + <h3 id="{{ modal_div_id }}-label">{{ _('Request Membership') }}</h3> </div> <div class="modal-body"> {% set format_attrs = {'style':'width: 350px;'} %} - {{ form.input('message', label=_('Message'), placeholder=_('Please add me to this organization'), value=user, classes=['control-medium'], attrs=format_attrs) }} + {{ form.textarea('message', label=_('Message'), placeholder=_('Please add me to this organisation'), value=user, classes=['control-medium'], attrs=format_attrs) }} </div> <div class="modal-footer"> <button class="btn hdx-btn hdx-submit-btn">{{ _('Submit') }}</button> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/package/read_base.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/package/read_base.html index e249646ba5..b34d637e76 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/package/read_base.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/package/read_base.html @@ -22,8 +22,8 @@ {% block content_primary_nav %} {% set num_of_new_related_items = h.hdx_num_of_new_related_items() %} - {#% set num_of_new_related_items_msg = '({num} '.format(num=num_of_new_related_items) + _('New') + '!)' %#} - {% set num_of_new_related_items_msg = '({num}'.format(num=num_of_new_related_items) + ')' %} + {% set num_of_new_related_items_msg = '({num} '.format(num=num_of_new_related_items) + _('New') + '!)' %} + {#% set num_of_new_related_items_msg = '({num}'.format(num=num_of_new_related_items) + ')' %#} {{ h.build_nav_icon('dataset_read', _('Dataset'), id=pkg.name, class_='hdx-tab-button') }} {{ h.build_nav_icon('dataset_activity', _('Activity Stream'), id=pkg.name, class_='hdx-tab-button') }} {{ h.hdx_build_nav_icon_with_message('related_list', _('Related'), id=pkg.name, class_='hdx-tab-button', diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py index cd3b74a13b..f4a724cd76 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version='v0.2.6' \ No newline at end of file +hdx_version='v0.3.0' \ No newline at end of file diff --git a/ckanext-metadata_fields/ckanext/metadata_fields/templates/package/package_onepage.html b/ckanext-metadata_fields/ckanext/metadata_fields/templates/package/package_onepage.html index 8ff910210f..dce3f145bd 100644 --- a/ckanext-metadata_fields/ckanext/metadata_fields/templates/package/package_onepage.html +++ b/ckanext-metadata_fields/ckanext/metadata_fields/templates/package/package_onepage.html @@ -61,7 +61,7 @@ iconAnchor: [15,35] }); - L.tileLayer('https://{s}.tiles.mapbox.com/v3/reliefweb.wrl_hdx/{z}/{x}/{y}.png', { + L.tileLayer('https://{s}.tiles.mapbox.com/v3/reliefweb.im6jg6a0/{z}/{x}/{y}.png', { attribution: '<a href="http://www.mapbox.com/about/maps/" target="_blank">Terms &amp; Feedback</a>', maxZoom: 5 }).addTo(map);
OCHA-DAP__hdx-ckan-1082
Update version number Sprint 26 will be 0.3.3
[ { "content": "hdx_version='v0.3.2'", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
[ { "content": "hdx_version='v0.3.3'", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py index a1e294d081..a55927af9a 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version='v0.3.2' \ No newline at end of file +hdx_version='v0.3.3' \ No newline at end of file
OCHA-DAP__hdx-ckan-1737
Shrink the map and related divs ![image](https://cloud.githubusercontent.com/assets/1654485/4994413/0a14a7e0-69b9-11e4-8329-f12ef8957fa6.png)
[ { "content": "hdx_version = 'v0.4.8'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
[ { "content": "hdx_version = 'v0.4.9'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-page.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-page.js index 46f21c5eda..0a2fa32894 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-page.js +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-page.js @@ -1,8 +1,7 @@ $(document).ready(function() { map = L.map('ebola-map', null, { zoomControl:false }); - L.tileLayer('http://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png', { - attribution: '<a class="mR45" href="http://www.mapbox.com/about/maps/" target="_blank">Terms &amp; Feedback</a>', + L.tileLayer($('#crisis-map-url-div').text(), { maxZoom: 10 }).addTo(map); diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/crisis-page.css b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/crisis-page.css index b02a3ea5cc..bdfdfcd8ce 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/crisis-page.css +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/crisis-page.css @@ -1,12 +1,12 @@ #ebola-map { - height: 450px; + height: 350px; } .ebola-map-title { - height:100px; + height:50px; background-color: rgba(255, 255, 255, 0.4); position: absolute; - top: 350px; + top: 300px; width: 100%; } @@ -25,7 +25,6 @@ font-size: 16px; letter-spacing: 0.01em; margin-bottom: 40px; - text-transform: capitalize; } .item-info .item-info-number { @@ -62,12 +61,18 @@ font-weight: bold; font-size: 28px; letter-spacing: 0.01em; - line-height: 100px; + line-height: 50px; text-transform: capitalize; } /* Dataset search results on crisis page */ .crisis-list-header.list-header { background-color: inherit; } +/* END - Dataset search results on crisis page */ -/* END - Dataset search results on crisis page */ \ No newline at end of file +/* Position Leaflet copyright div on top right*/ +.leaflet-control-attribution.leaflet-control{ + position: relative; + top: -335px; + left: 4px; +} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/header.css b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/header.css index ba0f58fbeb..6f934f8e30 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/header.css +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/header.css @@ -541,6 +541,9 @@ .mTop35{ margin-top: 35px; } + .mTop25{ + margin-top: 25px; + } .mTop20{ margin-top: 20px; } diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis.html index 57badd24fa..f6dddc1af6 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis.html @@ -10,11 +10,12 @@ {% block primary_content %} <div class="row paddingRowHack" style="position: relative;"> + <div id="crisis-map-url-div" style="display: none;">{{ h.hdx_get_ckan_config('hdx.crisismap.url') }}</div> <div id="ebola-map"> </div> <div class="ebola-map-title"> <span class="mL45 crisisTitle"> West Africa: Ebola outbreak </span> - <span class="mR45 pull-right" style="color: black; line-height: 80px; font-weight: bold; font-size: 30px;"> + <span class="mR45 pull-right" style="line-height: 50px;"> <a class="btn hdx-btn org-btn" data-module-placement="left" data-module="bs_popover" data-module-social_div_id="dataset_social" data-module-social_wrapper_div_id="dataset_social_wrapper" title="Share page">Share</a> <div id="dataset_social_wrapper" class="popover-wrapper"></div> @@ -47,7 +48,7 @@ </span> </div> </div> - <div class="row mTop70"> + <div class="row mTop25"> {% block top_line_figures %} {% for item in c.top_line_items %} <div class="col-xs-4"> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py index 4bfd2bf505..e388f91cf8 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.4.8' +hdx_version = 'v0.4.9' diff --git a/common-config-ini.txt b/common-config-ini.txt index 905886f17f..cff76f529e 100644 --- a/common-config-ini.txt +++ b/common-config-ini.txt @@ -154,6 +154,7 @@ hdx.cache.onstartup = true hdx.homepage.extrasources = 13 hdx.datapreview.url = //data.hdx.rwlabs.org/dataproxy hdx.previewmap.url = http://otile{s}.mqcdn.com/tiles/1.0.0/osm/{z}/{x}/{y}.png +hdx.crisismap.url = http://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png hdx.rest.indicator.endpoint = https://manage.hdx.rwlabs.org/hdx/public/api2/values hdx.rest.indicator.endpoint.facets = https://manage.hdx.rwlabs.org/hdx/public/api2
OCHA-DAP__hdx-ckan-1779
Ebola Page>Map: disable scroll wheel zoom CJ - The specific property is here: https://github.com/OCHA-DAP/hdx-design/blob/gh-pages/js/country.js line 111: map.scrollWheelZoom.disable();
[ { "content": "hdx_version = 'v0.5.1'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
[ { "content": "hdx_version = 'v0.5.2'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/crisis-page.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/crisis-page.js index 561fb9369a..908fe8a3f0 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/crisis-page.js +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/crisis-page.js @@ -1,6 +1,6 @@ $(document).ready(function() { map = L.map('ebola-map', { attributionControl: false }); - + map.scrollWheelZoom.disable(); L.tileLayer($('#crisis-map-url-div').text(), { attribution: ' © <a href="http://www.openstreetmap.org/copyright" target="_blank">OpenStreetMap</a> contributors', maxZoom: 10 diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py index 262e989c5c..f5970bc825 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.5.1' +hdx_version = 'v0.5.2'
OCHA-DAP__hdx-ckan-2135
Browse Page Map: opening a country link has different behaviors From the map: open in new tab From the list: open in same tab We should make it the same: open in same tab (unless there was some specification that it should be a new tab that I'm not remembering. Graphic in Colombia page: instead of line (time-series) make it a bar graph. CJ added current action for this issue: - Change "Number of IDPs" graph **from** bar graph **to** line graph. -----------------Original issue text follows--------------------- I think the graph **Number of people with access constrains** would look better if it was a bar graph instead of a line, time-series: ![screen shot 2014-12-10 at 12 50 45 pm](https://cloud.githubusercontent.com/assets/953118/5381033/61da09f2-806b-11e4-8cba-532c10734cc7.png) The reason I think that is that the lines give the impression the indicator changes significantly every month, but in a continuum of time. Bar graphs will help the user compare months as nearly independent measurements, which is influences better consumption of the data in my opinion. I chatted with the Data Team about this (including @JavierTeran) and they've approved this suggestion.
[ { "content": "hdx_version = 'v0.6.1'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
[ { "content": "hdx_version = 'v0.6.2'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/browse/browse.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/browse/browse.js index f911871a34..db2258503f 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/browse/browse.js +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/browse/browse.js @@ -56,7 +56,7 @@ function prepareMap(){ var closeTooltip, country, countryLayer, country_id, feature, featureClicked, first_letter, getStyle, highlightFeature, k, line, map, mapID, onEachFeature, openURL, popup, resetFeature, topLayer, topPane, v, _i, _j, _len, _len1, _ref; //mapID = 'yumiendo.ijchbik8'; openURL = function(url) { - return window.open(url, '_blank').focus(); + return window.open(url, '_self').focus(); }; closeTooltip = window.setTimeout(function() { return map.closePopup(); @@ -173,7 +173,7 @@ function prepareCount() { for (var i in data){ var item = data[i]; - var code = item.id.toUpperCase(); + var code = item.name.toUpperCase(); var newItem = {}; newItem.title = item.title; newItem.dataset_count = item.dataset_count; diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-colombia/crisis-colombia.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-colombia/crisis-colombia.js index 14d0597867..ba02a59c05 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-colombia/crisis-colombia.js +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-colombia/crisis-colombia.js @@ -103,7 +103,7 @@ function drawGraph2() { names: { "Persons": "Number of People with Access Constraints" }, - type: 'area' + type: 'bar' }, legend:{ show: false diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/public/homepage.css b/ckanext-hdx_theme/ckanext/hdx_theme/public/homepage.css index f8593346cf..8b3dd06b2d 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/public/homepage.css +++ b/ckanext-hdx_theme/ckanext/hdx_theme/public/homepage.css @@ -37,7 +37,7 @@ background-position: center; font-family: 'Gotham-Bold',sans-serif; font-weight: 400; - overflow: hidden; + /*overflow: hidden;*/ /*min-width: 1380px;*/ } @@ -194,13 +194,20 @@ margin-left: 34px; } - .homepage-main .rightContent .countRow .span2{ - margin: 0; - } + /* Ugly split for count items*/ - .homepage-main .rightContent .countRow .span2:first-child{ - margin-right: 30px; - } + .homepage-main .rightContent .countRow .span6{ + width: 510px; + } + + .homepage-main .rightContent .countRow .span2{ + margin: 0; + } + + .homepage-main .rightContent .countRow .span2:first-child{ + margin-right: 30px; + } + /**/ .homepageHeaderFooterBackground{ background-color: #ffffff; diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py index b3bfb54a64..5f7bc490cf 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.6.1' +hdx_version = 'v0.6.2'
OCHA-DAP__hdx-ckan-1748
Shrink the spacing on the top line numbers Proposed spacings shown here: ![image](https://cloud.githubusercontent.com/assets/1654485/5008984/58c2d9f8-6a62-11e4-9827-75750791e7f9.png) modified css: .item-info { border-top: 1px solid #cccccc; border-bottom: 1px solid #cccccc; padding: 20px 0; margin-top: -1px; color: #333333; } .item-info .item-info-title { font-family: 'Gotham-Bold', sans-serif; font-weight: 400; font-size: 16px; letter-spacing: 0.01em; margin-bottom: 20px; } .item-info .item-info-number { font-family: 'Gotham-Light', sans-serif; font-size: 74px; line-height: 1; letter-spacing: 0.01em; margin-bottom: 20px; }
[ { "content": "hdx_version = 'v0.4.9'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
[ { "content": "hdx_version = 'v0.4.10'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/crisis-page.css b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/crisis-page.css index 7f4c17f9df..dcc221eba9 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/crisis-page.css +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/crisis-page.css @@ -14,7 +14,7 @@ .item-info { border-top: 1px solid #cccccc; border-bottom: 1px solid #cccccc; - padding: 40px 0; + padding: 20px 0; margin-top: -1px; color: #333333; } @@ -24,7 +24,7 @@ font-weight: 400; font-size: 16px; letter-spacing: 0.01em; - margin-bottom: 40px; + margin-bottom: 20px; } .item-info .item-info-number { @@ -32,7 +32,7 @@ font-size: 74px; line-height: 1; letter-spacing: 0.01em; - margin-bottom: 34px; + margin-bottom: 20px; } .item-info .item-info-number span.small { @@ -40,7 +40,7 @@ font-size: 37px; line-height: 1; letter-spacing: 0.01em; - margin-bottom: 34px; + margin-bottom: 20px; margin-left: -15px; } diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py index e388f91cf8..945394e5ab 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.4.9' +hdx_version = 'v0.4.10'
OCHA-DAP__hdx-ckan-1401
The MailChimp subscribe field could use a little bit more padding-left Right now the input text is too close to the left border. It would be nice to add some padding there. ![screen shot 2014-10-01 at 10 23 58 am](https://cloud.githubusercontent.com/assets/953118/4476520/c03a50be-4976-11e4-9ea2-05e0d2a872f9.png)
[ { "content": "hdx_version = 'v0.3.9'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
[ { "content": "hdx_version = 'v0.3.10'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/public/mailchimp.js b/ckanext-hdx_theme/ckanext/hdx_theme/public/mailchimp.js index 2375baeb2d..e6e0374755 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/public/mailchimp.js +++ b/ckanext-hdx_theme/ckanext/hdx_theme/public/mailchimp.js @@ -44,7 +44,7 @@ function mce_init_form(){ var options = { errorClass: 'mce_inline_error', errorElement: 'div', onkeyup: function(){}, onfocusout:function(){}, onblur:function(){} }; var mce_validator = $("#mc-embedded-subscribe-form").validate(options); $("#mc-embedded-subscribe-form").unbind('submit');//remove the validator so we can get into beforeSubmit on the ajaxform, which then calls the validator - options = { url: 'http://unocha.us2.list-manage.com/subscribe/post-json?u=83487eb1105d72ff2427e4bd7&id=6fd988326c&c=?', type: 'GET', dataType: 'json', contentType: "application/json; charset=utf-8", + options = { url: '//unocha.us2.list-manage.com/subscribe/post-json?u=83487eb1105d72ff2427e4bd7&id=6fd988326c&c=?', type: 'GET', dataType: 'json', contentType: "application/json; charset=utf-8", beforeSubmit: function(){ $('#mce_tmp_error_msg').remove(); $('.datefield','#mc_embed_signup').each( diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/footer.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/footer.html index e38ca6d0f1..94ed084990 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/footer.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/footer.html @@ -31,7 +31,7 @@ <form action="//unocha.us2.list-manage.com/subscribe/post?u=83487eb1105d72ff2427e4bd7&amp;id=6fd988326c" method="post" id="mc-embedded-subscribe-form" name="mc-embedded-subscribe-form" class="validate" target="_blank" novalidate="" _lpchecked="1"> <div class="six columns left" style="padding-left: 0px;"> - <input style="width: 340px; margin: 0; padding: 0;" type="email" value="" name="EMAIL" class="required email" id="mce-EMAIL"> + <input style="width: 340px; margin: 0; padding: 5;" type="email" value="" name="EMAIL" class="required email" id="mce-EMAIL"> <input style="height: 44px; margin-left: 20px;" type="submit" value="Subscribe" name="subscribe" id="mc-embedded-subscribe" class="btn btn-primary"> </div> <div id="mce-responses" class="clear"> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py index e560071d6a..33b7a48515 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.3.9' +hdx_version = 'v0.3.10'
OCHA-DAP__hdx-ckan-2071
Update data on the Ebola map
[ { "content": "hdx_version = 'v0.5.13'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
[ { "content": "hdx_version = 'v0.5.14'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/crisis-ebola.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/crisis-ebola.js index 2d59b49d31..e6b87b6d36 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/crisis-ebola.js +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/crisis-ebola.js @@ -117,7 +117,7 @@ function drawDistricts(map){ } var medicalCentresStyle = function(feature){ - if(feature.properties.Status == "Functional"){ + if(feature.properties.Status == "Open"){ return {radius: 5, fillColor: "#1ebfb3", color: "#000", @@ -147,16 +147,16 @@ function drawDistricts(map){ if (!L.Browser.ie && !L.Browser.opera) { layer.bringToFront(); } - var name = feature.properties["Centre Name"]; + var name = feature.properties["ECF_Name"]; if (name == null) name = ""; - var type = feature.properties["Type1"]; + var type = feature.properties["Type"]; if (type == null) type = ""; var status = feature.properties["Status"]; if (status == null) status = ""; - var organisation = feature.properties["Primary Organisation"]; + var organisation = feature.properties["Partner"]; if (organisation == null) organisation = ""; diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/data.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/data.js index a519f1f72b..75c1e4c3b8 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/data.js +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/data.js @@ -2,87 +2,87 @@ var totalCases = {"GIN001001":"26", "GIN001002":"1", "GIN001003":"0", "GIN001004":"0", -"GIN002001":"265", -"GIN003001":"5", +"GIN002001":"373", +"GIN003001":"10", "GIN003002":"1", -"GIN003003":"17", -"GIN003004":"11", -"GIN004001":"11", -"GIN004002":"136", -"GIN004003":"5", +"GIN003003":"63", +"GIN003004":"103", +"GIN004001":"30", +"GIN004002":"161", +"GIN004003":"19", "GIN004004":"0", -"GIN004005":"20", -"GIN005001":"67", -"GIN005002":"22", -"GIN005003":"22", +"GIN004005":"31", +"GIN005001":"160", +"GIN005002":"74", +"GIN005003":"47", "GIN005004":"0", -"GIN005005":"17", -"GIN005006":"26", +"GIN005005":"81", +"GIN005006":"42", "GIN006001":"0", "GIN006002":"0", "GIN006003":"0", "GIN006004":"0", "GIN006005":"0", -"GIN007001":"12", +"GIN007001":"9", "GIN007002":"0", "GIN007003":"8", -"GIN008001":"38", -"GIN008002":"371", +"GIN008001":"45", +"GIN008002":"383", "GIN008003":"0", -"GIN008004":"655", -"GIN008005":"194", +"GIN008004":"746", +"GIN008005":"248", "GIN008006":"11", -"LBR01":"284", -"LBR02":"302", -"LBR03":"17", -"LBR04":"141", -"LBR05":"92", +"LBR01":"296", +"LBR02":"562", +"LBR03":"34", +"LBR04":"177", +"LBR05":"203", "LBR06":"11", -"LBR07":"34", -"LBR08":"641", -"LBR09":"1192", -"LBR10":"18", -"LBR11":"3854", -"LBR12":"328", +"LBR07":"36", +"LBR08":"649", +"LBR09":"1247", +"LBR10":"23", +"LBR11":"4335", +"LBR12":"330", "LBR13":"19", -"LBR14":"44", -"LBR15":"32", -"SLE0101":"605", -"SLE0102":"591", -"SLE0103":"104", -"SLE0201":"723", -"SLE0202":"60", -"SLE0203":"89", -"SLE0204":"625", -"SLE0205":"286", -"SLE0301":"260", -"SLE0302":"5", -"SLE0303":"152", -"SLE0304":"38", -"SLE0401":"614", -"SLE0402":"810" +"LBR14":"43", +"LBR15":"52", +"SLE0101":"650", +"SLE0102":"646", +"SLE0103":"315", +"SLE0201":"1114", +"SLE0202":"153", +"SLE0203":"196", +"SLE0204":"1490", +"SLE0205":"542", +"SLE0301":"483", +"SLE0302":"16", +"SLE0303":"285", +"SLE0304":"50", +"SLE0401":"1229", +"SLE0402":"2277", }; -var totalDeaths = {"GIN001001":"16", +var totalDeaths = {"GIN001001":"18", "GIN001002":"0", "GIN001003":"0", "GIN001004":"0", -"GIN002001":"112", -"GIN003001":"5", +"GIN002001":"168", +"GIN003001":"10", "GIN003002":"1", -"GIN003003":"12", -"GIN003004":"9", -"GIN004001":"1", -"GIN004002":"70", -"GIN004003":"5", +"GIN003003":"49", +"GIN003004":"64", +"GIN004001":"17", +"GIN004002":"98", +"GIN004003":"12", "GIN004004":"0", -"GIN004005":"7", -"GIN005001":"33", -"GIN005002":"8", -"GIN005003":"14", +"GIN004005":"19", +"GIN005001":"81", +"GIN005002":"28", +"GIN005003":"29", "GIN005004":"0", -"GIN005005":"2", -"GIN005006":"10", +"GIN005005":"50", +"GIN005006":"17", "GIN006001":"0", "GIN006002":"0", "GIN006003":"0", @@ -91,128 +91,128 @@ var totalDeaths = {"GIN001001":"16", "GIN007001":"2", "GIN007002":"0", "GIN007003":"4", -"GIN008001":"20", -"GIN008002":"309", +"GIN008001":"26", +"GIN008002":"316", "GIN008003":"0", -"GIN008004":"402", -"GIN008005":"129", -"GIN008006":"6", -"LBR01":"153", -"LBR02":"88", -"LBR03":"5", -"LBR04":"54", -"LBR05":"52", -"LBR06":"4", -"LBR07":"25", -"LBR08":"375", -"LBR09":"527", -"LBR10":"14", -"LBR11":"1548", -"LBR12":"50", +"GIN008004":"496", +"GIN008005":"175", +"GIN008006":"5", +"LBR01":"167", +"LBR02":"137", +"LBR03":"12", +"LBR04":"79", +"LBR05":"111", +"LBR06":"6", +"LBR07":"27", +"LBR08":"387", +"LBR09":"573", +"LBR10":"19", +"LBR11":"1796", +"LBR12":"57", "LBR13":"8", -"LBR14":"24", -"LBR15":"11", +"LBR14":"26", +"LBR15":"18", "SLE0101":"267", -"SLE0102":"260", -"SLE0103":"39", -"SLE0201":"190", -"SLE0202":"28", -"SLE0203":"44", -"SLE0204":"162", -"SLE0205":"80", -"SLE0301":"112", -"SLE0302":"3", -"SLE0303":"62", +"SLE0102":"267", +"SLE0103":"109", +"SLE0201":"306", +"SLE0202":"61", +"SLE0203":"74", +"SLE0204":"406", +"SLE0205":"150", +"SLE0301":"157", +"SLE0302":"7", +"SLE0303":"86", "SLE0304":"21", -"SLE0401":"101", -"SLE0402":"110" +"SLE0401":"332", +"SLE0402":"515", }; var totalCasesPerArea = {"GIN001001":"5", "GIN001002":"0", "GIN001003":"0", "GIN001004":"0", -"GIN002001":"658", -"GIN003001":"1", +"GIN002001":"927", +"GIN003001":"2", "GIN003002":"0", -"GIN003003":"1", -"GIN003004":"2", -"GIN004001":"1", -"GIN004002":"14", -"GIN004003":"0", +"GIN003003":"5", +"GIN003004":"17", +"GIN004001":"2", +"GIN004002":"17", +"GIN004003":"1", "GIN004004":"0", -"GIN004005":"1", -"GIN005001":"59", -"GIN005002":"6", -"GIN005003":"5", +"GIN004005":"2", +"GIN005001":"140", +"GIN005002":"19", +"GIN005003":"11", "GIN005004":"0", -"GIN005005":"2", -"GIN005006":"3", +"GIN005005":"9", +"GIN005006":"6", "GIN006001":"0", "GIN006002":"0", "GIN006003":"0", "GIN006004":"0", "GIN006005":"0", -"GIN007001":"4", +"GIN007001":"3", "GIN007002":"0", "GIN007003":"2", -"GIN008001":"3", -"GIN008002":"88", +"GIN008001":"4", +"GIN008002":"90", "GIN008003":"0", -"GIN008004":"81", -"GIN008005":"47", +"GIN008004":"92", +"GIN008005":"60", "GIN008006":"3", -"LBR01":"147", -"LBR02":"35", -"LBR03":"2", -"LBR04":"18", -"LBR05":"19", +"LBR01":"153", +"LBR02":"64", +"LBR03":"3", +"LBR04":"23", +"LBR05":"42", "LBR06":"1", "LBR07":"9", "LBR08":"65", -"LBR09":"444", -"LBR10":"8", -"LBR11":"2035", +"LBR09":"464", +"LBR10":"10", +"LBR11":"2290", "LBR12":"28", "LBR13":"4", "LBR14":"8", -"LBR15":"3", -"SLE0101":"153", -"SLE0102":"93", -"SLE0103":"19", -"SLE0201":"87", -"SLE0202":"20", -"SLE0203":"7", -"SLE0204":"104", -"SLE0205":"44", -"SLE0301":"47", -"SLE0302":"1", -"SLE0303":"22", -"SLE0304":"9", -"SLE0401":"1001", -"SLE0402":"9940" +"LBR15":"5", +"SLE0101":"164", +"SLE0102":"101", +"SLE0103":"58", +"SLE0201":"135", +"SLE0202":"51", +"SLE0203":"16", +"SLE0204":"249", +"SLE0205":"84", +"SLE0301":"88", +"SLE0302":"4", +"SLE0303":"41", +"SLE0304":"12", +"SLE0401":"2003", +"SLE0402":"27942", }; var totalDeathsPerArea = {"GIN001001":"3", "GIN001002":"0", "GIN001003":"0", "GIN001004":"0", -"GIN002001":"278", -"GIN003001":"1", +"GIN002001":"417", +"GIN003001":"2", "GIN003002":"0", -"GIN003003":"1", -"GIN003004":"1", -"GIN004001":"0", -"GIN004002":"7", -"GIN004003":"0", +"GIN003003":"4", +"GIN003004":"10", +"GIN004001":"1", +"GIN004002":"10", +"GIN004003":"1", "GIN004004":"0", -"GIN004005":"0", -"GIN005001":"29", -"GIN005002":"2", -"GIN005003":"3", +"GIN004005":"1", +"GIN005001":"71", +"GIN005002":"7", +"GIN005003":"7", "GIN005004":"0", -"GIN005005":"0", -"GIN005006":"1", +"GIN005005":"6", +"GIN005006":"2", "GIN006001":"0", "GIN006002":"0", "GIN006003":"0", @@ -222,127 +222,127 @@ var totalDeathsPerArea = {"GIN001001":"3", "GIN007002":"0", "GIN007003":"1", "GIN008001":"2", -"GIN008002":"73", +"GIN008002":"75", "GIN008003":"0", -"GIN008004":"50", -"GIN008005":"31", +"GIN008004":"61", +"GIN008005":"43", "GIN008006":"2", -"LBR01":"79", -"LBR02":"10", +"LBR01":"86", +"LBR02":"16", "LBR03":"1", -"LBR04":"7", -"LBR05":"11", -"LBR06":"0", -"LBR07":"6", -"LBR08":"38", -"LBR09":"196", -"LBR10":"6", -"LBR11":"818", -"LBR12":"4", +"LBR04":"10", +"LBR05":"23", +"LBR06":"1", +"LBR07":"7", +"LBR08":"39", +"LBR09":"213", +"LBR10":"8", +"LBR11":"949", +"LBR12":"5", "LBR13":"2", -"LBR14":"4", -"LBR15":"1", +"LBR14":"5", +"LBR15":"2", "SLE0101":"67", -"SLE0102":"41", -"SLE0103":"7", -"SLE0201":"23", -"SLE0202":"9", -"SLE0203":"4", -"SLE0204":"27", -"SLE0205":"12", -"SLE0301":"20", -"SLE0302":"1", -"SLE0303":"9", +"SLE0102":"42", +"SLE0103":"20", +"SLE0201":"37", +"SLE0202":"20", +"SLE0203":"6", +"SLE0204":"68", +"SLE0205":"23", +"SLE0301":"29", +"SLE0302":"2", +"SLE0303":"12", "SLE0304":"5", -"SLE0401":"165", -"SLE0402":"1350" +"SLE0401":"541", +"SLE0402":"6320", }; var totalCasesPerPop = {"GIN001001":"13.4", "GIN001002":"0.2", "GIN001003":"0", "GIN001004":"0", -"GIN002001":"23.8", -"GIN003001":"3.3", +"GIN002001":"33.5", +"GIN003001":"6.5", "GIN003002":"0.6", -"GIN003003":"9.3", -"GIN003004":"4.1", -"GIN004001":"3", -"GIN004002":"54.4", -"GIN004003":"2.6", +"GIN003003":"34.4", +"GIN003004":"38.7", +"GIN004001":"8.2", +"GIN004002":"64.5", +"GIN004003":"9.9", "GIN004004":"0", -"GIN004005":"4.8", -"GIN005001":"18.8", -"GIN005002":"16.8", -"GIN005003":"5.9", +"GIN004005":"7.5", +"GIN005001":"45", +"GIN005002":"56.3", +"GIN005003":"12.6", "GIN005004":"0", -"GIN005005":"3.8", -"GIN005006":"10.2", +"GIN005005":"18", +"GIN005006":"16.5", "GIN006001":"0", "GIN006002":"0", "GIN006003":"0", "GIN006004":"0", "GIN006005":"0", -"GIN007001":"6.8", +"GIN007001":"5.1", "GIN007002":"0", "GIN007003":"3.1", -"GIN008001":"17.9", -"GIN008002":"53.8", +"GIN008001":"21.2", +"GIN008002":"55.5", "GIN008003":"0", -"GIN008004":"137.4", -"GIN008005":"52.9", +"GIN008004":"156.5", +"GIN008005":"67.6", "GIN008006":"3.9", -"LBR01":"337.6", -"LBR02":"90.6", -"LBR03":"20.4", -"LBR04":"63.6", -"LBR05":"72.4", +"LBR01":"351.9", +"LBR02":"168.5", +"LBR03":"40.8", +"LBR04":"79.8", +"LBR05":"159.7", "LBR06":"8.8", -"LBR07":"58.7", -"LBR08":"231.5", -"LBR09":"567.8", -"LBR10":"13.2", -"LBR11":"344.6", -"LBR12":"71", +"LBR07":"62.2", +"LBR08":"234.4", +"LBR09":"594", +"LBR10":"16.9", +"LBR11":"387.7", +"LBR12":"71.4", "LBR13":"28.4", -"LBR14":"61.5", -"LBR15":"31.3", -"SLE0101":"130.1", -"SLE0102":"90.5", -"SLE0103":"32", -"SLE0201":"146.3", -"SLE0202":"17.6", -"SLE0203":"33.5", -"SLE0204":"112", -"SLE0205":"65.8", -"SLE0301":"39.7", -"SLE0302":"3", -"SLE0303":"54.7", -"SLE0304":"11.3", -"SLE0401":"232.9", -"SLE0402":"77.8" +"LBR14":"60.1", +"LBR15":"50.8", +"SLE0101":"139.8", +"SLE0102":"98.9", +"SLE0103":"96.9", +"SLE0201":"225.4", +"SLE0202":"44.8", +"SLE0203":"73.8", +"SLE0204":"267", +"SLE0205":"124.6", +"SLE0301":"73.8", +"SLE0302":"9.5", +"SLE0303":"102.5", +"SLE0304":"14.9", +"SLE0401":"466.2", +"SLE0402":"218.8", }; -var totalDeathsPerPop = {"GIN001001":"8.3", +var totalDeathsPerPop = {"GIN001001":"9.3", "GIN001002":"0", "GIN001003":"0", "GIN001004":"0", -"GIN002001":"10.1", -"GIN003001":"3.3", +"GIN002001":"15.1", +"GIN003001":"6.5", "GIN003002":"0.6", -"GIN003003":"6.6", -"GIN003004":"3.4", -"GIN004001":"0.3", -"GIN004002":"28", -"GIN004003":"2.6", +"GIN003003":"26.7", +"GIN003004":"24", +"GIN004001":"4.7", +"GIN004002":"39.2", +"GIN004003":"6.2", "GIN004004":"0", -"GIN004005":"1.7", -"GIN005001":"9.3", -"GIN005002":"6.1", -"GIN005003":"3.8", +"GIN004005":"4.6", +"GIN005001":"22.8", +"GIN005002":"21.3", +"GIN005003":"7.8", "GIN005004":"0", -"GIN005005":"0.4", -"GIN005006":"3.9", +"GIN005005":"11.1", +"GIN005006":"6.7", "GIN006001":"0", "GIN006002":"0", "GIN006003":"0", @@ -351,41 +351,41 @@ var totalDeathsPerPop = {"GIN001001":"8.3", "GIN007001":"1.1", "GIN007002":"0", "GIN007003":"1.5", -"GIN008001":"9.4", -"GIN008002":"44.8", +"GIN008001":"12.2", +"GIN008002":"45.8", "GIN008003":"0", -"GIN008004":"84.3", -"GIN008005":"35.2", -"GIN008006":"2.1", -"LBR01":"181.9", -"LBR02":"26.4", -"LBR03":"6", -"LBR04":"24.4", -"LBR05":"40.9", -"LBR06":"3.2", -"LBR07":"43.2", -"LBR08":"135.4", -"LBR09":"251", -"LBR10":"10.3", -"LBR11":"138.4", -"LBR12":"10.8", +"GIN008004":"104.1", +"GIN008005":"47.7", +"GIN008006":"1.8", +"LBR01":"198.5", +"LBR02":"41.1", +"LBR03":"14.4", +"LBR04":"35.6", +"LBR05":"87.3", +"LBR06":"4.8", +"LBR07":"46.6", +"LBR08":"139.8", +"LBR09":"273", +"LBR10":"14", +"LBR11":"160.6", +"LBR12":"12.3", "LBR13":"12", -"LBR14":"33.6", -"LBR15":"10.7", +"LBR14":"36.4", +"LBR15":"17.6", "SLE0101":"57.4", -"SLE0102":"39.8", -"SLE0103":"12", -"SLE0201":"38.5", -"SLE0202":"8.2", -"SLE0203":"16.6", -"SLE0204":"29", -"SLE0205":"18.4", -"SLE0301":"17.1", -"SLE0302":"1.8", -"SLE0303":"22.3", +"SLE0102":"40.9", +"SLE0103":"33.5", +"SLE0201":"61.9", +"SLE0202":"17.9", +"SLE0203":"27.8", +"SLE0204":"72.8", +"SLE0205":"34.5", +"SLE0301":"24", +"SLE0302":"4.1", +"SLE0303":"30.9", "SLE0304":"6.3", -"SLE0401":"38.3", -"SLE0402":"10.6" +"SLE0401":"125.9", +"SLE0402":"49.5", }; diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/medical_centres.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/medical_centres.js index 31fcba0cde..dc773d6368 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/medical_centres.js +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/medical_centres.js @@ -1,116 +1,74 @@ -var medicalCentres = { +var medicalCentres ={ "type": "FeatureCollection", "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } }, "features": [ -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Tubmanburg Government Hospital - Holding Centre", "Primary Organisation": "MOH Liberia", "Type1": "Triage", "Latitude": 6.868703, "Longitude": -10.824815 }, "geometry": { "type": "Point", "coordinates": [ -10.8248146, 6.8687027 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": "IOM", "Type1": "ETC", "Latitude": 6.867340, "Longitude": -10.830940 }, "geometry": { "type": "Point", "coordinates": [ -10.83094, 6.86734 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Bong County ETC", "Primary Organisation": "IMC", "Type1": "ETC", "Latitude": 7.004720, "Longitude": -9.555000 }, "geometry": { "type": "Point", "coordinates": [ -9.555, 7.00472 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Phebe Hospital", "Primary Organisation": "No Partner Identified", "Type1": "ETC", "Latitude": 7.027940, "Longitude": -9.553290 }, "geometry": { "type": "Point", "coordinates": [ -9.55329, 7.02794 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Bong 2 Totota", "Primary Organisation": null, "Type1": "ETC", "Latitude": 6.809806, "Longitude": -9.941350 }, "geometry": { "type": "Point", "coordinates": [ -9.9413504, 6.8098065 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 7.066670, "Longitude": -10.487500 }, "geometry": { "type": "Point", "coordinates": [ -10.4875, 7.06667 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Arcelor Mittal Hospital (India)", "Primary Organisation": "IOM", "Type1": "ETC", "Latitude": 5.875710, "Longitude": -10.020040 }, "geometry": { "type": "Point", "coordinates": [ -10.02004, 5.87571 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": "IOM", "Type1": "ETC", "Latitude": 6.816390, "Longitude": -11.138950 }, "geometry": { "type": "Point", "coordinates": [ -11.13895, 6.81639 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 6.752570, "Longitude": -11.366950 }, "geometry": { "type": "Point", "coordinates": [ -11.36695, 6.75257 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": "Partners in Health", "Type1": "ETC", "Latitude": 6.068584, "Longitude": -8.133906 }, "geometry": { "type": "Point", "coordinates": [ -8.1339058, 6.0685838 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 4.679722, "Longitude": -8.233889 }, "geometry": { "type": "Point", "coordinates": [ -8.233889, 4.679722 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Foya Case Management Centre (CMC Foya MSF)", "Primary Organisation": "MSF", "Type1": "ETC", "Latitude": 8.378911, "Longitude": -10.205757 }, "geometry": { "type": "Point", "coordinates": [ -10.205757, 8.378911 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Borma Hospital", "Primary Organisation": "MSF", "Type1": "ETC", "Latitude": 8.354540, "Longitude": -10.200370 }, "geometry": { "type": "Point", "coordinates": [ -10.20037, 8.35454 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Telewowan Hospital", "Primary Organisation": "MSF", "Type1": "Triage", "Latitude": 8.422770, "Longitude": -9.753226 }, "geometry": { "type": "Point", "coordinates": [ -9.7532264, 8.42277 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 8.412640, "Longitude": -9.769380 }, "geometry": { "type": "Point", "coordinates": [ -9.76938, 8.41264 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 7.778799, "Longitude": -9.428166 }, "geometry": { "type": "Point", "coordinates": [ -9.4281657, 7.7787995 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Firestone Medical Center", "Primary Organisation": "Firestone Company", "Type1": "ETC", "Latitude": 6.353060, "Longitude": -10.469720 }, "geometry": { "type": "Point", "coordinates": [ -10.46972, 6.35306 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": "Save the Children (build)", "Type1": "ETC", "Latitude": 6.562500, "Longitude": -10.321940 }, "geometry": { "type": "Point", "coordinates": [ -10.32194, 6.5625 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 6.532326, "Longitude": -10.348807 }, "geometry": { "type": "Point", "coordinates": [ -10.3488074, 6.532326 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": "US Department of Defence", "Type1": "ETC", "Latitude": 6.605368, "Longitude": -10.175843 }, "geometry": { "type": "Point", "coordinates": [ -10.1758429, 6.6053676 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Airport JTFPO", "Primary Organisation": "US Department of Defence", "Type1": "ETC", "Latitude": 6.237896, "Longitude": -10.356300 }, "geometry": { "type": "Point", "coordinates": [ -10.3563, 6.237896 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "EBK Barracks", "Primary Organisation": "US Department of Defence", "Type1": "ETC", "Latitude": 6.209466, "Longitude": -10.565051 }, "geometry": { "type": "Point", "coordinates": [ -10.565051, 6.209466 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "eMed at Airport", "Primary Organisation": "US Department of Defence", "Type1": "ETC", "Latitude": 6.228494, "Longitude": -10.361782 }, "geometry": { "type": "Point", "coordinates": [ -10.361782, 6.228494 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Pleebo Health Center", "Primary Organisation": null, "Type1": "Triage", "Latitude": 4.585086, "Longitude": -7.674787 }, "geometry": { "type": "Point", "coordinates": [ -7.6747874, 4.5850862 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 4.376073, "Longitude": -7.707692 }, "geometry": { "type": "Point", "coordinates": [ -7.7076917, 4.3760732 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Unity Convention Centre", "Primary Organisation": null, "Type1": "ETC", "Latitude": 6.389060, "Longitude": -10.792860 }, "geometry": { "type": "Point", "coordinates": [ -10.79286, 6.38906 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "ELWA1 Hospital", "Primary Organisation": "Samaritans Purse", "Type1": "ETC", "Latitude": 6.239381, "Longitude": -10.696514 }, "geometry": { "type": "Point", "coordinates": [ -10.6965137, 6.2393809 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "ELWA2 Hospital", "Primary Organisation": "MOH Liberia", "Type1": "ETC", "Latitude": 6.239650, "Longitude": -10.696000 }, "geometry": { "type": "Point", "coordinates": [ -10.696, 6.23965 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "ELWA3 Hospital", "Primary Organisation": "MSF", "Type1": "ETC", "Latitude": 6.244440, "Longitude": -10.700280 }, "geometry": { "type": "Point", "coordinates": [ -10.70028, 6.24444 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "ELWA4 Hospital", "Primary Organisation": null, "Type1": "ETC", "Latitude": 6.243060, "Longitude": -10.698890 }, "geometry": { "type": "Point", "coordinates": [ -10.69889, 6.24306 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Island Clinic (Oniyama Specialist Hospital), Location: Bushrod Island", "Primary Organisation": "MOH Liberia", "Type1": "ETC", "Latitude": 6.384372, "Longitude": -10.787114 }, "geometry": { "type": "Point", "coordinates": [ -10.7871135, 6.3843716 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Ministry of Health", "Primary Organisation": null, "Type1": "ETC", "Latitude": 6.264720, "Longitude": -10.712780 }, "geometry": { "type": "Point", "coordinates": [ -10.71278, 6.26472 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Redemption Hospital", "Primary Organisation": "MOH Liberia", "Type1": "Triage", "Latitude": 6.369708, "Longitude": -10.791974 }, "geometry": { "type": "Point", "coordinates": [ -10.7919744, 6.3697076 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "West Point holding unit", "Primary Organisation": "MOH Liberia", "Type1": "Triage", "Latitude": 6.324450, "Longitude": -10.807300 }, "geometry": { "type": "Point", "coordinates": [ -10.8073, 6.32445 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Childfund International", "Primary Organisation": "Childfund International", "Type1": "Triage", "Latitude": 6.327981, "Longitude": -10.797770 }, "geometry": { "type": "Point", "coordinates": [ -10.79777, 6.3279815 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "ATS Stadium", "Primary Organisation": null, "Type1": "ETC", "Latitude": 6.308920, "Longitude": -10.804110 }, "geometry": { "type": "Point", "coordinates": [ -10.80411, 6.30892 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "WFP ETU", "Primary Organisation": null, "Type1": "ETC", "Latitude": 6.269690, "Longitude": -10.734700 }, "geometry": { "type": "Point", "coordinates": [ -10.7347, 6.26969 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "AFL\/PAK Camp", "Primary Organisation": "US Department of Defence", "Type1": "ETC", "Latitude": 6.424114, "Longitude": -10.549386 }, "geometry": { "type": "Point", "coordinates": [ -10.549386, 6.424114 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "BTC Site for FP 300", "Primary Organisation": "US Department of Defence", "Type1": "ETC", "Latitude": 6.305482, "Longitude": -10.803725 }, "geometry": { "type": "Point", "coordinates": [ -10.803725, 6.305482 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Camp Ware Facilities", "Primary Organisation": "US Department of Defence", "Type1": "ETC", "Latitude": 6.425222, "Longitude": -10.550534 }, "geometry": { "type": "Point", "coordinates": [ -10.550534, 6.425222 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Camp Ware Main", "Primary Organisation": "US Department of Defence", "Type1": "ETC", "Latitude": 6.430120, "Longitude": -10.543521 }, "geometry": { "type": "Point", "coordinates": [ -10.543521, 6.43012 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Samuel K Stadium (SKD Stadium)", "Primary Organisation": "IRC", "Type1": "ETC", "Latitude": 6.256602, "Longitude": -10.701962 }, "geometry": { "type": "Point", "coordinates": [ -10.701962, 6.256602 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Samuel K Stadium Parking Lot (SKD Stadium)", "Primary Organisation": "IRC", "Type1": "ETC", "Latitude": 6.256762, "Longitude": -10.703646 }, "geometry": { "type": "Point", "coordinates": [ -10.703646, 6.256762 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Springgs Payne Airfield", "Primary Organisation": "US Department of Defence", "Type1": "ETC", "Latitude": 6.287109, "Longitude": -10.760994 }, "geometry": { "type": "Point", "coordinates": [ -10.760994, 6.287109 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Monrovia, Congo Town - Old Ministry of Defence ETU 1", "Primary Organisation": "Cuba", "Type1": "ETC", "Latitude": 6.270280, "Longitude": -10.734170 }, "geometry": { "type": "Point", "coordinates": [ -10.73417, 6.27028 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Monrovia, Congo Town - Old Ministry of Defence ETU 2", "Primary Organisation": "Cuba", "Type1": "ETC", "Latitude": 6.270280, "Longitude": -10.734170 }, "geometry": { "type": "Point", "coordinates": [ -10.73417, 6.27028 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Ganta Hospital", "Primary Organisation": "No Partner Identified", "Type1": "ETC", "Latitude": 7.220300, "Longitude": -8.981300 }, "geometry": { "type": "Point", "coordinates": [ -8.9813, 7.2203 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "JFD Hospital", "Primary Organisation": null, "Type1": "Hospital", "Latitude": 6.493410, "Longitude": -8.844448 }, "geometry": { "type": "Point", "coordinates": [ -8.8444476, 6.4934103 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": "Partners in Health", "Type1": "ETC", "Latitude": 7.242310, "Longitude": -8.979000 }, "geometry": { "type": "Point", "coordinates": [ -8.979, 7.24231 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 6.509830, "Longitude": -8.859990 }, "geometry": { "type": "Point", "coordinates": [ -8.85999, 6.50983 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "G. W. Harley", "Primary Organisation": "No Partner Identified", "Type1": "ETC", "Latitude": 7.352610, "Longitude": -8.717330 }, "geometry": { "type": "Point", "coordinates": [ -8.71733, 7.35261 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 5.197430, "Longitude": -7.875519 }, "geometry": { "type": "Point", "coordinates": [ -7.8755193, 5.1974297 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 5.436940, "Longitude": -9.562220 }, "geometry": { "type": "Point", "coordinates": [ -9.56222, 5.43694 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "F.J. Grante Hospital Temporary Holding Centre", "Primary Organisation": null, "Type1": "Holding Centre", "Latitude": 5.011063, "Longitude": -9.039804 }, "geometry": { "type": "Point", "coordinates": [ -9.0398036, 5.0110631 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 5.009560, "Longitude": -9.038350 }, "geometry": { "type": "Point", "coordinates": [ -9.03835, 5.00956 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Donka Hospital", "Primary Organisation": "MSF", "Type1": "ETC", "Latitude": 9.535640, "Longitude": -13.683200 }, "geometry": { "type": "Point", "coordinates": [ -13.6832, 9.53564 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Dabola", "Primary Organisation": "MSF", "Type1": "Triage", "Latitude": 10.742070, "Longitude": -11.106460 }, "geometry": { "type": "Point", "coordinates": [ -11.10646, 10.74207 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "H?pital de Kissidougou", "Primary Organisation": "MSF", "Type1": "Triage", "Latitude": 9.198440, "Longitude": -10.103520 }, "geometry": { "type": "Point", "coordinates": [ -10.10352, 9.19844 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": null, "Primary Organisation": "MSF", "Type1": "Triage", "Latitude": 10.383369, "Longitude": -9.304758 }, "geometry": { "type": "Point", "coordinates": [ -9.3047576, 10.3833686 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "M?Balia", "Primary Organisation": null, "Type1": "ETC", "Latitude": 9.270569, "Longitude": -9.007620 }, "geometry": { "type": "Point", "coordinates": [ -9.0076196, 9.2705687 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Wonkifong Centre", "Primary Organisation": "MSF", "Type1": "ETC", "Latitude": 9.709040, "Longitude": -13.388956 }, "geometry": { "type": "Point", "coordinates": [ -13.388956, 9.7090404 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Forecariah", "Primary Organisation": null, "Type1": "Transit Centre", "Latitude": 9.431214, "Longitude": -13.091866 }, "geometry": { "type": "Point", "coordinates": [ -13.0918656, 9.4312135 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Forecariah", "Primary Organisation": null, "Type1": "ETC", "Latitude": 9.431214, "Longitude": -13.091866 }, "geometry": { "type": "Point", "coordinates": [ -13.0918656, 9.4312135 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 10.691328, "Longitude": -12.251260 }, "geometry": { "type": "Point", "coordinates": [ -12.2512595, 10.6913277 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 10.374074, "Longitude": -12.083560 }, "geometry": { "type": "Point", "coordinates": [ -12.0835598, 10.3740744 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Guinee Forestiere (CMC Gueckedou MSF)", "Primary Organisation": "MSF", "Type1": "ETC", "Latitude": 8.552229, "Longitude": -10.120623 }, "geometry": { "type": "Point", "coordinates": [ -10.120623, 8.552229 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Macenta Transit Centre", "Primary Organisation": "MOH Guinea", "Type1": "Transit Centre", "Latitude": 8.536880, "Longitude": -9.464810 }, "geometry": { "type": "Point", "coordinates": [ -9.46481, 8.53688 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Nzerekore Transit Centre", "Primary Organisation": null, "Type1": "Transit Centre", "Latitude": 7.762100, "Longitude": -8.814300 }, "geometry": { "type": "Point", "coordinates": [ -8.8143, 7.7621 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Nzerekore", "Primary Organisation": "Ailema (?)", "Type1": "ETC", "Latitude": 7.762100, "Longitude": -8.814300 }, "geometry": { "type": "Point", "coordinates": [ -8.8143, 7.7621 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": "Croix-Rouge fran?aise", "Type1": "ETC", "Latitude": 8.500000, "Longitude": -9.416700 }, "geometry": { "type": "Point", "coordinates": [ -9.4167, 8.5 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 7.569579, "Longitude": -9.259479 }, "geometry": { "type": "Point", "coordinates": [ -9.2594791, 7.5695786 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 8.689848, "Longitude": -8.648345 }, "geometry": { "type": "Point", "coordinates": [ -8.648345, 8.6898481 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Yomou Transit Centre", "Primary Organisation": "N\/A", "Type1": "Transit Centre", "Latitude": 7.569579, "Longitude": -9.259749 }, "geometry": { "type": "Point", "coordinates": [ -9.2597491, 7.5695786 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "CMC Kailahun MSF", "Primary Organisation": "MSF", "Type1": "ETC", "Latitude": 8.297871, "Longitude": -10.556686 }, "geometry": { "type": "Point", "coordinates": [ -10.556686, 8.297871 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Buedu", "Primary Organisation": "MSF", "Type1": "Triage", "Latitude": 8.279709, "Longitude": -10.371495 }, "geometry": { "type": "Point", "coordinates": [ -10.3714952, 8.2797087 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Daru", "Primary Organisation": "MSF", "Type1": "Triage", "Latitude": 7.991518, "Longitude": -10.845919 }, "geometry": { "type": "Point", "coordinates": [ -10.845919, 7.991518 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Koindu", "Primary Organisation": "MSF", "Type1": "Triage", "Latitude": 8.461645, "Longitude": -10.336319 }, "geometry": { "type": "Point", "coordinates": [ -10.3363192, 8.4616449 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Kenema Government Hospital (KGH)", "Primary Organisation": "MOH Sierra Leone", "Type1": "ETC", "Latitude": 7.875170, "Longitude": -11.184730 }, "geometry": { "type": "Point", "coordinates": [ -11.18473, 7.87517 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Rural Kenema Field Hospital ETC", "Primary Organisation": "IFRC", "Type1": "ETC", "Latitude": 8.005088, "Longitude": -11.115603 }, "geometry": { "type": "Point", "coordinates": [ -11.115603, 8.005088 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Koidu Town", "Primary Organisation": "Wellbody", "Type1": "ETC", "Latitude": 8.642281, "Longitude": -10.971631 }, "geometry": { "type": "Point", "coordinates": [ -10.9716307, 8.6422812 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Arab Hospital", "Primary Organisation": null, "Type1": "Holding Centre", "Latitude": 9.323289, "Longitude": -12.195500 }, "geometry": { "type": "Point", "coordinates": [ -12.1955, 9.323289 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Paramedical School", "Primary Organisation": null, "Type1": "Holding Centre", "Latitude": 9.323289, "Longitude": -12.195500 }, "geometry": { "type": "Point", "coordinates": [ -12.1955, 9.323289 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Regional Government Hospital", "Primary Organisation": null, "Type1": "Holding Centre", "Latitude": 9.323289, "Longitude": -12.195500 }, "geometry": { "type": "Point", "coordinates": [ -12.1955, 9.323289 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": "Addax Bio Energy Sierra Leone", "Type1": "ETC", "Latitude": 8.888280, "Longitude": -12.043914 }, "geometry": { "type": "Point", "coordinates": [ -12.0439135, 8.8882804 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Port Loko Hospital", "Primary Organisation": null, "Type1": "ETC", "Latitude": 8.765740, "Longitude": -12.785030 }, "geometry": { "type": "Point", "coordinates": [ -12.78503, 8.76574 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Lunsar ETU", "Primary Organisation": "IMC", "Type1": "ETC", "Latitude": 8.686834, "Longitude": -12.543584 }, "geometry": { "type": "Point", "coordinates": [ -12.543584, 8.6868342 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": "GOAL", "Type1": "ETC", "Latitude": 8.763889, "Longitude": -12.779903 }, "geometry": { "type": "Point", "coordinates": [ -12.7799027, 8.7638892 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Bo Town SLRA", "Primary Organisation": "MSF", "Type1": "ETC", "Latitude": 7.962030, "Longitude": -11.736477 }, "geometry": { "type": "Point", "coordinates": [ -11.7364775, 7.9620299 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Gondama Referral Center", "Primary Organisation": "MSF", "Type1": "Triage", "Latitude": 7.865680, "Longitude": -11.708370 }, "geometry": { "type": "Point", "coordinates": [ -11.70837, 7.86568 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Bo Government Hospital", "Primary Organisation": null, "Type1": "Triage", "Latitude": 7.963986, "Longitude": -11.741801 }, "geometry": { "type": "Point", "coordinates": [ -11.7418008, 7.963986 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": null, "Primary Organisation": null, "Type1": "ETC", "Latitude": 8.159153, "Longitude": -12.431437 }, "geometry": { "type": "Point", "coordinates": [ -12.4314374, 8.159153 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Hasting Police Centre (Police Training School)", "Primary Organisation": "MOH Sierra Leone", "Type1": "ETC", "Latitude": 8.387303, "Longitude": -13.138720 }, "geometry": { "type": "Point", "coordinates": [ -13.1387201, 8.387303 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Newton Police Centre", "Primary Organisation": null, "Type1": "Holding Centre", "Latitude": 8.336255, "Longitude": -13.007144 }, "geometry": { "type": "Point", "coordinates": [ -13.0071441, 8.3362545 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Rokupa Hospital", "Primary Organisation": null, "Type1": "Holding Centre", "Latitude": 8.459444, "Longitude": -13.174167 }, "geometry": { "type": "Point", "coordinates": [ -13.174167, 8.459444 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Sierra Leone-China Friendship Hospital (Jui Hospital)", "Primary Organisation": "Chinese CDC", "Type1": "Holding Centre", "Latitude": 8.409225, "Longitude": -13.133401 }, "geometry": { "type": "Point", "coordinates": [ -13.1334013, 8.4092254 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Kerry Town ETC", "Primary Organisation": "Save the Children", "Type1": "ETC", "Latitude": 8.269334, "Longitude": -13.087852 }, "geometry": { "type": "Point", "coordinates": [ -13.0878517, 8.2693344 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Kerry Town ETC - Section for care of Health Care Workers", "Primary Organisation": "UK MoD", "Type1": "ETC", "Latitude": 8.269334, "Longitude": -13.087852 }, "geometry": { "type": "Point", "coordinates": [ -13.0878517, 8.2693344 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Lakka Isolation Hospital", "Primary Organisation": "EMERGENCY Italian NGO", "Type1": "Holding Centre", "Latitude": 8.397367, "Longitude": -13.263550 }, "geometry": { "type": "Point", "coordinates": [ -13.2635501, 8.3973667 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "34 Military Hospital", "Primary Organisation": null, "Type1": "Holding Centre", "Latitude": 8.475363, "Longitude": -13.263585 }, "geometry": { "type": "Point", "coordinates": [ -13.2635852, 8.4753631 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Connaught Hospital", "Primary Organisation": "King's Health Partners UK", "Type1": "Holding Centre", "Latitude": 8.488230, "Longitude": -13.238400 }, "geometry": { "type": "Point", "coordinates": [ -13.2384, 8.48823 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Macauley Street Hospital", "Primary Organisation": "No Partner Identified", "Type1": "Holding Centre", "Latitude": 8.479002, "Longitude": -13.268016 }, "geometry": { "type": "Point", "coordinates": [ -13.2680158, 8.4790017 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "PCMH (Princess Christian Maternity) Hospital", "Primary Organisation": null, "Type1": "Holding Centre", "Latitude": 8.490200, "Longitude": -13.218900 }, "geometry": { "type": "Point", "coordinates": [ -13.2189, 8.4902 ] } }, -{ "type": "Feature", "properties": { "Status": "Functional", "Centre Name": "Ola During Children?s Hospital", "Primary Organisation": "Cap Anamur", "Type1": "Triage", "Latitude": 8.490000, "Longitude": -13.218700 }, "geometry": { "type": "Point", "coordinates": [ -13.2187, 8.49 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Lakka 2", "Primary Organisation": "South Africa FMT", "Type1": "ETC", "Latitude": 8.479002, "Longitude": -13.268016 }, "geometry": { "type": "Point", "coordinates": [ -13.2680158, 8.4790017 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Goderich", "Primary Organisation": "EMERGENCY", "Type1": "ETC", "Latitude": 8.432966, "Longitude": -13.288871 }, "geometry": { "type": "Point", "coordinates": [ -13.2888708, 8.4329664 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "AHS - Waterloo Hospital", "Primary Organisation": "MOH Sierra Leone", "Type1": "ETC", "Latitude": 8.338977, "Longitude": -13.069573 }, "geometry": { "type": "Point", "coordinates": [ -13.0695732, 8.3389767 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Hastings 2", "Primary Organisation": null, "Type1": "ETC", "Latitude": 8.479002, "Longitude": -13.268016 }, "geometry": { "type": "Point", "coordinates": [ -13.2680158, 8.4790017 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Hastings", "Primary Organisation": "RSLAF", "Type1": "ETC", "Latitude": 8.479002, "Longitude": -13.268016 }, "geometry": { "type": "Point", "coordinates": [ -13.2680158, 8.4790017 ] } }, -{ "type": "Feature", "properties": { "Status": "Pending", "Centre Name": "Centre pour le D?veloppement des Vaccins (CVD-Mali)", "Primary Organisation": null, "Type1": "Triage", "Latitude": 12.650008, "Longitude": -8.000001 }, "geometry": { "type": "Point", "coordinates": [ -8.0000014, 12.6500083 ] } } +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.1", "the_geom": "POINT (-13.280407348999859 8.424321007000174)", "OBJECTID": 0, "ECF_Code": "SLE_0001", "ECF_Name": "Goderich", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0401", "Beds_Open": 0, "Beds_Plan": 100, "Open_Date": "2014-12-15T00:00:00", "Partner": "EMERGENCY", "Lead_Donor": "DFID", "Latitude": 8.424321, "Longitude": -13.280407, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.280407, 8.424321 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.2", "the_geom": "POINT (-13.140651562999949 8.382141677999982)", "OBJECTID": 1, "ECF_Code": "SLE_0002", "ECF_Name": "Hastings Eastside", "Status": "Under Construction", "Type": "ETU", "AdminCode": "SLE0401", "Beds_Open": 0, "Beds_Plan": 100, "Open_Date": "2014-12-20T00:00:00", "Partner": "RSLAF\/KINGS", "Lead_Donor": "DFID", "Latitude": 8.382142, "Longitude": -13.140652, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.140652, 8.382142 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.3", "the_geom": "POINT (-13.125850928999967 8.382704501000092)", "OBJECTID": 2, "ECF_Code": "SLE_0004", "ECF_Name": "Police Training Sch-Hastings 1", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0401", "Beds_Open": 120, "Beds_Plan": 120, "Open_Date": "2014-12-01T00:00:00", "Partner": "MOH \/ RSLAF", "Lead_Donor": "GOSL", "Latitude": 8.382705, "Longitude": -13.125851, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.125851, 8.382705 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.4", "the_geom": "POINT (-13.086370684999906 8.25709548399999)", "OBJECTID": 3, "ECF_Code": "SLE_0005", "ECF_Name": "Kerry Town", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0401", "Beds_Open": 51, "Beds_Plan": 80, "Open_Date": "2014-12-01T00:00:00", "Partner": "Save the Children", "Lead_Donor": "DFID", "Latitude": 8.257095, "Longitude": -13.086371, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "Yes", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.086371, 8.257095 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.5", "the_geom": "POINT (-12.762389999999982 8.776650000000132)", "OBJECTID": 4, "ECF_Code": "SLE_0006", "ECF_Name": "Port Loko", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0204", "Beds_Open": 100, "Beds_Plan": 100, "Open_Date": "2014-11-30T00:00:00", "Partner": "GOAL \/ UK NHS \/ DEMA", "Lead_Donor": "DFID", "Latitude": 8.776650, "Longitude": -12.762390, "Accuracy": "CDC Checked Coordinate", "Updated": "2014-12-15T00:00:00", "LabPresent": "Yes", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -12.76239, 8.77665 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.6", "the_geom": "POINT (-12.576559999999972 8.698540000000037)", "OBJECTID": 5, "ECF_Code": "SLE_0007", "ECF_Name": "Lunsar, Port Loko", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0204", "Beds_Open": 9, "Beds_Plan": 75, "Open_Date": "2014-12-01T00:00:00", "Partner": "IMC", "Lead_Donor": "DFID\/ECHO\/OFDA", "Latitude": 8.698540, "Longitude": -12.576560, "Accuracy": "CDC Checked Coordinate", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -12.57656, 8.69854 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.7", "the_geom": "POINT (-12.412037406999957 8.176575114000116)", "OBJECTID": 6, "ECF_Code": "SLE_0008", "ECF_Name": "Moyamba", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0303", "Beds_Open": 100, "Beds_Plan": 100, "Open_Date": "2014-12-14T00:00:00", "Partner": "MDM \/ Norway \/ UK NHS", "Lead_Donor": "DFID", "Latitude": 8.176575, "Longitude": -12.412037, "Accuracy": null, "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -12.412037, 8.176575 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.8", "the_geom": "POINT (-12.030972037999845 8.798044033000053)", "OBJECTID": 7, "ECF_Code": "SLE_0009", "ECF_Name": "Makeni", "Status": "Under Construction", "Type": "ETU", "AdminCode": "SLE0201", "Beds_Open": 0, "Beds_Plan": 100, "Open_Date": "2014-11-26T00:00:00", "Partner": "IMC", "Lead_Donor": "DFID", "Latitude": 8.798044, "Longitude": -12.030972, "Accuracy": null, "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -12.030972, 8.798044 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.9", "the_geom": "POINT (-13.149548493999873 8.386860908000187)", "OBJECTID": 8, "ECF_Code": "SLE_0010", "ECF_Name": "Jui Hospital - Freetown", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0401", "Beds_Open": 12, "Beds_Plan": 22, "Open_Date": "2014-12-17T00:00:00", "Partner": "Chinese CDC", "Lead_Donor": "China", "Latitude": 8.386861, "Longitude": -13.149548, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "Yes", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.149548, 8.386861 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.10", "the_geom": "POINT (-13.26499999999993 8.403000000000077)", "OBJECTID": 9, "ECF_Code": "SLE_0011", "ECF_Name": "Lakka Hospital ETU", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0401", "Beds_Open": 12, "Beds_Plan": 12, "Open_Date": "2014-12-01T00:00:00", "Partner": "Emergency", "Lead_Donor": null, "Latitude": 8.403000, "Longitude": -13.265000, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "Yes", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.265, 8.403 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.11", "the_geom": "POINT (-10.55759999999998 8.298460000000034)", "OBJECTID": 10, "ECF_Code": "SLE_0012", "ECF_Name": "Kailahun MSF", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0101", "Beds_Open": 72, "Beds_Plan": 100, "Open_Date": "2014-12-01T00:00:00", "Partner": "Canadian Outfit", "Lead_Donor": "Canadian Outfit", "Latitude": 8.298460, "Longitude": -10.557600, "Accuracy": "MSF GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "Yes", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -10.5576, 8.29846 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.12", "the_geom": "POINT (-11.115599999999972 8.005088000000057)", "OBJECTID": 11, "ECF_Code": "SLE_0013", "ECF_Name": "Rural Kenema Field Hospital", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0102", "Beds_Open": 25, "Beds_Plan": 25, "Open_Date": "2014-12-01T00:00:00", "Partner": "IFRC", "Lead_Donor": "IFRC", "Latitude": 8.005088, "Longitude": -11.115600, "Accuracy": "British Red Cross GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -11.1156, 8.005088 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.13", "the_geom": "POINT (-10.95946422999998 8.63389924200004)", "OBJECTID": 12, "ECF_Code": "SLE_0014", "ECF_Name": "Kono - Koidu Town", "Status": "Under Construction", "Type": "ETU", "AdminCode": "SLE0103", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-11-30T00:00:00", "Partner": "Well Body\/ Partners in Health", "Lead_Donor": "GOSL", "Latitude": 8.633899, "Longitude": -10.959464, "Accuracy": "WHO provided", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -10.959464, 8.633899 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.14", "the_geom": "POINT (-11.669699999999978 7.9410400000001005)", "OBJECTID": 13, "ECF_Code": "SLE_0015", "ECF_Name": "Bo Town SLRA", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0301", "Beds_Open": 50, "Beds_Plan": 50, "Open_Date": "2014-12-01T00:00:00", "Partner": "MSF Amsterdam", "Lead_Donor": "CDC", "Latitude": 7.941040, "Longitude": -11.669700, "Accuracy": "MSF GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "Yes", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -11.6697, 7.94104 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.15", "the_geom": "POINT (-13.07354799999996 8.33562100000006)", "OBJECTID": 14, "ECF_Code": "SLE_0017", "ECF_Name": "AHS - Waterloo Hospital", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0401", "Beds_Open": 62, "Beds_Plan": 62, "Open_Date": "2014-11-16T00:00:00", "Partner": "MOH \/ Cuban Brigade \/ Cap Anamur", "Lead_Donor": "GOSL", "Latitude": 8.335621, "Longitude": -13.073548, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.073548, 8.335621 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.16", "the_geom": "POINT (-12.058447000000001 8.846110000000067)", "OBJECTID": 15, "ECF_Code": "SLE_0020", "ECF_Name": "Magbenteh Hospital", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0201", "Beds_Open": 100, "Beds_Plan": 110, "Open_Date": "2014-11-15T00:00:00", "Partner": "MoH\/AU\/WHO", "Lead_Donor": "GOSL", "Latitude": 8.846110, "Longitude": -12.058447, "Accuracy": null, "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -12.058447, 8.84611 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.17", "the_geom": "POINT (-13.008915000000002 8.335112000000038)", "OBJECTID": 16, "ECF_Code": "SLE_0021", "ECF_Name": "HIM - Newton", "Status": "Under Construction", "Type": "ETU", "AdminCode": "SLE0401", "Beds_Open": 0, "Beds_Plan": 120, "Open_Date": "2015-01-15T00:00:00", "Partner": "Unknown", "Lead_Donor": "UNMEER", "Latitude": 8.335112, "Longitude": -13.008915, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.008915, 8.335112 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.18", "the_geom": "POINT (-13.125850928999967 8.382704501000092)", "OBJECTID": 17, "ECF_Code": "SLE_0022", "ECF_Name": "Police Training Sch-Hastings 2", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0401", "Beds_Open": 200, "Beds_Plan": 200, "Open_Date": "2014-12-18T00:00:00", "Partner": "MOH \/ RSLAF \/ Cuban Brigade", "Lead_Donor": "GOSL", "Latitude": 8.382705, "Longitude": -13.125851, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.125851, 8.382705 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.19", "the_geom": "POINT (-12.772400000000005 8.749840000000006)", "OBJECTID": 18, "ECF_Code": "SLE_0023", "ECF_Name": "Maforki ETU", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0204", "Beds_Open": 44, "Beds_Plan": 80, "Open_Date": "2014-12-01T00:00:00", "Partner": "MoH \/ Plan \/ Cuban Brigade", "Lead_Donor": "GOSL", "Latitude": 8.749840, "Longitude": -12.772400, "Accuracy": "CDC Checked Coordinate", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -12.7724, 8.74984 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.20", "the_geom": "POINT (-13.261241999999868 8.464217000000076)", "OBJECTID": 19, "ECF_Code": "SLE_0109", "ECF_Name": "34 Military Hospital", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0402", "Beds_Open": 30, "Beds_Plan": 30, "Open_Date": "2014-12-01T00:00:00", "Partner": "RSLAF", "Lead_Donor": "MOD\/RSLAF", "Latitude": 8.464217, "Longitude": -13.261242, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.261242, 8.464217 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.21", "the_geom": "POINT (-13.086370684999906 8.25709548399999)", "OBJECTID": 21, "ECF_Code": "SLE_0005", "ECF_Name": "Kerry Town - Section for Health Care Workers", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0401", "Beds_Open": 12, "Beds_Plan": 20, "Open_Date": "2014-12-01T00:00:00", "Partner": "DFID", "Lead_Donor": "DFID", "Latitude": 8.270000, "Longitude": -13.086371, "Accuracy": "WAERC Confirmed Coordinates", "Updated": "2014-12-15T00:00:00", "LabPresent": "Yes", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.086371, 8.27 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.22", "the_geom": "POINT (-13.223000000000013 8.480999999999995)", "OBJECTID": 22, "ECF_Code": "SLE_0111", "ECF_Name": "Freetown - SA Build", "Status": "Under Construction", "Type": "ETU", "AdminCode": "SLE0402", "Beds_Open": 0, "Beds_Plan": 40, "Open_Date": "2015-01-31T00:00:00", "Partner": "Right To Care", "Lead_Donor": null, "Latitude": 8.481000, "Longitude": -13.223000, "Accuracy": "WFP ETC Overview document", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.223, 8.481 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.23", "the_geom": "POINT (-13.216999999999985 8.483000000000061)", "OBJECTID": 23, "ECF_Code": "SLE_0112", "ECF_Name": "Freetown - UK Build", "Status": "Completed", "Type": "ETU", "AdminCode": "SLE0402", "Beds_Open": 0, "Beds_Plan": 100, "Open_Date": "2014-12-15T00:00:00", "Partner": "Emergency \/ MoH", "Lead_Donor": "DFID", "Latitude": 8.483000, "Longitude": -13.213000, "Accuracy": "WFP ETC Overview document", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.213, 8.483 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.24", "the_geom": "POINT (-13.245000000000005 8.489000000000033)", "OBJECTID": 24, "ECF_Code": "SLE_0113", "ECF_Name": "Prince of Wales School", "Status": "Open", "Type": "ETU", "AdminCode": "SLE0402", "Beds_Open": 8, "Beds_Plan": 64, "Open_Date": "2014-12-01T00:00:00", "Partner": "Swiss MSF", "Lead_Donor": null, "Latitude": 8.489000, "Longitude": -13.244000, "Accuracy": "UNMEER schools dataset", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.244, 8.489 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.25", "the_geom": "POINT (-13.185999999999979 8.461999999999989)", "OBJECTID": 25, "ECF_Code": "SLE_0114", "ECF_Name": "Methodist Boys High School", "Status": "Under Construction", "Type": "ETU", "AdminCode": "SLE0402", "Beds_Open": 0, "Beds_Plan": 60, "Open_Date": "2014-12-29T00:00:00", "Partner": "Spain MST", "Lead_Donor": null, "Latitude": 8.462000, "Longitude": -13.185000, "Accuracy": "UNMEER schools dataset", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.185, 8.462 ] } }, +{ "type": "Feature", "properties": { "FID": "sle_heal_pt_unmeer_ebolacarefacilities.26", "the_geom": "POINT (-13.178610999999876 8.61611100000016)", "OBJECTID": 26, "ECF_Code": "SLE_0115", "ECF_Name": "Hastings Airfield", "Status": "Under Construction", "Type": "ETU", "AdminCode": "SLE0204", "Beds_Open": 0, "Beds_Plan": 5, "Open_Date": "2014-12-17T00:00:00", "Partner": "Aspen Medical", "Lead_Donor": null, "Latitude": 8.616000, "Longitude": -13.178000, "Accuracy": "UNMEER airfields dataset", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Sierra Leone" }, "geometry": { "type": "Point", "coordinates": [ -13.178, 8.616 ] } }, +{ "type": "Feature", "properties": { "FID": "gin_heal_pt_unmeer_ebolacarefacilities.1", "the_geom": "POINT (-9.46558200499993 8.518240000000048)", "OBJECTID": 0, "ECF_Code": "GIN_0011", "ECF_Name": "Macenta", "Status": "Open", "Type": "ETU", "AdminCode": "GIN00800308", "Beds_Open": 70, "Beds_Plan": 70, "Open_Date": "2014-12-05T00:00:00", "Partner": "FRENCH RED CROSS", "Lead_Donor": null, "Latitude": 8.541000, "Longitude": -9.471000, "Accuracy": "WHO - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "Yes", "Country": "Guinea" }, "geometry": { "type": "Point", "coordinates": [ -9.471, 8.541 ] } }, +{ "type": "Feature", "properties": { "FID": "gin_heal_pt_unmeer_ebolacarefacilities.2", "the_geom": "POINT (-10.120963604999929 8.552846000000045)", "OBJECTID": 1, "ECF_Code": "GIN_0010", "ECF_Name": "Gueckedou", "Status": "Open", "Type": "ETU", "AdminCode": "GIN00800603", "Beds_Open": 100, "Beds_Plan": 100, "Open_Date": "2014-12-05T00:00:00", "Partner": "MSF", "Lead_Donor": null, "Latitude": 8.562000, "Longitude": -10.133000, "Accuracy": "WHO - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "Yes", "Country": "Guinea" }, "geometry": { "type": "Point", "coordinates": [ -10.133, 8.562 ] } }, +{ "type": "Feature", "properties": { "FID": "gin_heal_pt_unmeer_ebolacarefacilities.3", "the_geom": "POINT (-13.68319299999996 9.535421000000042)", "OBJECTID": 2, "ECF_Code": "GIN_0001", "ECF_Name": "Conakry Donka Hospital", "Status": "Open", "Type": "ETU", "AdminCode": "GIN00200101", "Beds_Open": 70, "Beds_Plan": 70, "Open_Date": "2014-12-05T00:00:00", "Partner": "MSF", "Lead_Donor": null, "Latitude": 9.535000, "Longitude": -13.683000, "Accuracy": "WHO - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "Yes", "Country": "Guinea" }, "geometry": { "type": "Point", "coordinates": [ -13.683, 9.535 ] } }, +{ "type": "Feature", "properties": { "FID": "gin_heal_pt_unmeer_ebolacarefacilities.4", "the_geom": "POINT (-13.420806149999976 9.730112226000074)", "OBJECTID": 3, "ECF_Code": "GIN_0003", "ECF_Name": "Coyah", "Status": "Under Construction", "Type": "ETU", "AdminCode": "GIN00500103", "Beds_Open": 0, "Beds_Plan": 70, "Open_Date": "2014-12-01T00:00:00", "Partner": "Cuba?", "Lead_Donor": null, "Latitude": 9.730000, "Longitude": -13.421000, "Accuracy": "Needs location verified", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Guinea" }, "geometry": { "type": "Point", "coordinates": [ -13.421, 9.73 ] } }, +{ "type": "Feature", "properties": { "FID": "gin_heal_pt_unmeer_ebolacarefacilities.5", "the_geom": "POINT (-8.648371163999968 8.689988011000025)", "OBJECTID": 4, "ECF_Code": "GIN_0009", "ECF_Name": "Beyla", "Status": "Under Construction", "Type": "ETU", "AdminCode": "GIN00800101", "Beds_Open": 0, "Beds_Plan": 70, "Open_Date": "2014-12-17T00:00:00", "Partner": "WAHA", "Lead_Donor": null, "Latitude": 8.690000, "Longitude": -8.648000, "Accuracy": "Needs location verified", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Guinea" }, "geometry": { "type": "Point", "coordinates": [ -8.648, 8.69 ] } }, +{ "type": "Feature", "properties": { "FID": "gin_heal_pt_unmeer_ebolacarefacilities.6", "the_geom": "POINT (-8.819621349999977 7.755827287000045)", "OBJECTID": 5, "ECF_Code": "GIN_0007", "ECF_Name": "Nzerekore", "Status": "Open", "Type": "ETU", "AdminCode": "GIN00400203", "Beds_Open": 0, "Beds_Plan": 70, "Open_Date": "2014-12-01T00:00:00", "Partner": "ALIMA", "Lead_Donor": null, "Latitude": 7.756000, "Longitude": -8.820000, "Accuracy": "Needs location verified", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Guinea" }, "geometry": { "type": "Point", "coordinates": [ -8.82, 7.756 ] } }, +{ "type": "Feature", "properties": { "FID": "gin_heal_pt_unmeer_ebolacarefacilities.7", "the_geom": "POINT (-9.00967686 9.271138659999998)", "OBJECTID": 6, "ECF_Code": "GIN_0054", "ECF_Name": "Kerouane", "Status": "Under Construction", "Type": "ETU", "AdminCode": "GIN00400203", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-17T00:00:00", "Partner": "French Red Cross", "Lead_Donor": null, "Latitude": 9.274100, "Longitude": -9.003260, "Accuracy": "Approximate", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Guinea" }, "geometry": { "type": "Point", "coordinates": [ -9.00326, 9.2741 ] } }, +{ "type": "Feature", "properties": { "FID": "gin_heal_pt_unmeer_ebolacarefacilities.8", "the_geom": "POINT (-13.420806149999919 9.730112226000188)", "OBJECTID": 7, "ECF_Code": "GIN_0058", "ECF_Name": "Wonkifong", "Status": "Under Construction", "Type": "ETU", "AdminCode": "GIN005001", "Beds_Open": 0, "Beds_Plan": 70, "Open_Date": "2014-12-22T00:00:00", "Partner": null, "Lead_Donor": null, "Latitude": 9.730112, "Longitude": -13.420810, "Accuracy": "WHO supplied", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Guinea" }, "geometry": { "type": "Point", "coordinates": [ -13.42081, 9.730112 ] } }, +{ "type": "Feature", "properties": { "FID": "gin_heal_pt_unmeer_ebolacarefacilities.9", "the_geom": "POINT (-9.305 10.383)", "OBJECTID": 8, "ECF_Code": "GIN_0059", "ECF_Name": "Kankan", "Status": "Under Construction", "Type": "ETU", "AdminCode": "GN001004", "Beds_Open": 0, "Beds_Plan": 0, "Open_Date": null, "Partner": "MSF", "Lead_Donor": null, "Latitude": 10.383000, "Longitude": -9.305000, "Accuracy": "Needs location supplied", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Guinea" }, "geometry": { "type": "Point", "coordinates": [ -9.305, 10.383 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.1", "the_geom": "POINT (-8.234894946337647 4.676541494899709)", "OBJECTID": null, "ECF_Code": "LBR_0001", "ECF_Name": "Barclayville", "Status": "Under Construction", "Type": "ETU", "AdminCode": "LBR18", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-25T00:00:00", "Partner": "PAE\/Aspen", "Lead_Donor": null, "Latitude": 4.676541, "Longitude": -8.234895, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -8.23489495, 4.67654149 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.2", "the_geom": "POINT (-10.490690797999946 7.063901776000023)", "OBJECTID": null, "ECF_Code": "LBR_0002", "ECF_Name": "Bopulu", "Status": "Under Construction", "Type": "ETU", "AdminCode": "LBR45", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-25T00:00:00", "Partner": "PAE\/Aspen", "Lead_Donor": null, "Latitude": 7.063902, "Longitude": -10.490691, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.4906908, 7.06390178 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.3", "the_geom": "POINT (-10.033222554436634 5.888103365665643)", "OBJECTID": null, "ECF_Code": "LBR_0003", "ECF_Name": "Buchanan", "Status": "Open", "Type": "ETU", "AdminCode": "LBR09", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-06T00:00:00", "Partner": "IOM", "Lead_Donor": null, "Latitude": 5.888103, "Longitude": -10.033223, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.03322255, 5.88810337 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.4", "the_geom": "POINT (-8.859992908999914 6.489959099000089)", "OBJECTID": null, "ECF_Code": "LBR_0026", "ECF_Name": "Tappita", "Status": "Under Construction", "Type": "ETU", "AdminCode": "LBR33", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-14T00:00:00", "Partner": "Heart to Heart\/PAE", "Lead_Donor": null, "Latitude": 6.489959, "Longitude": -8.859993, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -8.85999291, 6.4899591 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.5", "the_geom": "POINT (-9.033332999999857 5.016666999999984)", "OBJECTID": null, "ECF_Code": "LBR_0010", "ECF_Name": "Greenville", "Status": "Under Construction", "Type": "ETU", "AdminCode": "LBR39", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2004-12-10T00:00:00", "Partner": "PAE\/MSB", "Lead_Donor": null, "Latitude": 5.498000, "Longitude": -8.660000, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -8.66, 5.498 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.6", "the_geom": "POINT (-7.692520015999946 4.393410511000127)", "OBJECTID": null, "ECF_Code": "LBR_0011", "ECF_Name": "Harper", "Status": "Under Construction", "Type": "ETU", "AdminCode": "LBR27", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-20T00:00:00", "Partner": "Partners in Health\/PAE", "Lead_Donor": null, "Latitude": 4.393411, "Longitude": -7.692520, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -7.69252002, 4.39341051 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.7", "the_geom": "POINT (-7.755559999999946 5.196390000000122)", "OBJECTID": null, "ECF_Code": "LBR_0006", "ECF_Name": "Fish Town", "Status": "Under Construction", "Type": "ETU", "AdminCode": "LBR42", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-14T00:00:00", "Partner": "ARC", "Lead_Donor": null, "Latitude": 5.196390, "Longitude": -7.876000, "Accuracy": "WFP", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -7.876, 5.19639 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.8", "the_geom": "POINT (-11.133826269999872 6.816054550000047)", "OBJECTID": null, "ECF_Code": "LBR_0025", "ECF_Name": "Sinje", "Status": "Open", "Type": "ETU", "AdminCode": "LBR12", "Beds_Open": 50, "Beds_Plan": 50, "Open_Date": "2014-12-06T00:00:00", "Partner": "IOM", "Lead_Donor": null, "Latitude": 6.816055, "Longitude": -11.133826, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -11.13382627, 6.81605455 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.9", "the_geom": "POINT (-8.138060197999948 6.064957047000064)", "OBJECTID": null, "ECF_Code": "LBR_0030", "ECF_Name": "Zwedru", "Status": "Open", "Type": "ETU", "AdminCode": "LBR15", "Beds_Open": 0, "Beds_Plan": 60, "Open_Date": "2015-01-12T00:00:00", "Partner": "Partners in Health\/PAE", "Lead_Donor": null, "Latitude": 6.064957, "Longitude": -8.138060, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -8.1380602, 6.06495705 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.10", "the_geom": "POINT (-9.74996812199987 8.416583628000126)", "OBJECTID": null, "ECF_Code": "LBR_0028", "ECF_Name": "Voinjama", "Status": "Under Construction", "Type": "ETU", "AdminCode": "LBR21", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-14T00:00:00", "Partner": "WAHA\/GOAL\/PAE\/Aspen", "Lead_Donor": null, "Latitude": 8.416584, "Longitude": -9.749968, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -9.74996812, 8.41658363 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.11", "the_geom": "POINT (-10.773249999999962 6.287230000000079)", "OBJECTID": null, "ECF_Code": "LBR_0012", "ECF_Name": "JFK hospital", "Status": "Closed", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 0, "Beds_Plan": 0, "Open_Date": null, "Partner": "MoH\/WHO", "Lead_Donor": null, "Latitude": 6.287230, "Longitude": -10.773250, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.77325, 6.28723 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.12", "the_geom": "POINT (-10.700455938999937 6.256756063000125)", "OBJECTID": null, "ECF_Code": "LBR_0021", "ECF_Name": "SKD 1", "Status": "Under Construction", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-15T00:00:00", "Partner": "IRC", "Lead_Donor": null, "Latitude": 6.256756, "Longitude": -10.700456, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.70045594, 6.25675606 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.13", "the_geom": "POINT (-10.700525704999961 6.257445946000075)", "OBJECTID": null, "ECF_Code": "LBR_0022", "ECF_Name": "SKD 2", "Status": "Open", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 50, "Beds_Plan": 50, "Open_Date": "2014-12-22T00:00:00", "Partner": "German RC\/German Armed Forces", "Lead_Donor": null, "Latitude": 6.257446, "Longitude": -10.700526, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.7005257, 6.25744595 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.14", "the_geom": "POINT (-10.731190999999853 6.268152000000043)", "OBJECTID": null, "ECF_Code": "LBR_0017", "ECF_Name": "Congo Town 2 (old MoD 2)", "Status": "Closed", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 0, "Beds_Plan": 0, "Open_Date": null, "Partner": "MBS Sweden", "Lead_Donor": null, "Latitude": 6.268152, "Longitude": -10.731191, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.731191, 6.268152 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.15", "the_geom": "POINT (-10.342221360999929 6.53993904400005)", "OBJECTID": null, "ECF_Code": "LBR_0014", "ECF_Name": "Kakata II", "Status": "Closed", "Type": "ETU", "AdminCode": "LBR24", "Beds_Open": 0, "Beds_Plan": 0, "Open_Date": "2014-12-02T00:00:00", "Partner": null, "Lead_Donor": null, "Latitude": 6.539939, "Longitude": -10.342221, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.34222136, 6.53993904 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.16", "the_geom": "POINT (-11.364704647999929 6.752012309999998)", "OBJECTID": null, "ECF_Code": "LBR_0024", "ECF_Name": "Robertsport", "Status": "Closed", "Type": "ETU", "AdminCode": "LBR12", "Beds_Open": 0, "Beds_Plan": 0, "Open_Date": null, "Partner": null, "Lead_Donor": null, "Latitude": 6.752012, "Longitude": -11.364705, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -11.36470465, 6.75201231 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.17", "the_geom": "POINT (-8.977700000000027 7.235110000000134)", "OBJECTID": null, "ECF_Code": "LBR_0048", "ECF_Name": "Ganta", "Status": "Under Construction", "Type": "ETU", "AdminCode": "LBR33", "Beds_Open": 0, "Beds_Plan": 10, "Open_Date": "2014-12-13T00:00:00", "Partner": "Nimba CHT \/ PCI", "Lead_Donor": null, "Latitude": 7.237500, "Longitude": -8.981400, "Accuracy": "WHO provided", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -8.9814, 7.2375 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.18", "the_geom": "POINT (-10.700279999999964 6.244439994000118)", "OBJECTID": null, "ECF_Code": "LBR_0019", "ECF_Name": "ELWA 3", "Status": "Open", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 60, "Beds_Plan": 60, "Open_Date": "2014-12-02T00:00:00", "Partner": "MSF Brussels", "Lead_Donor": null, "Latitude": 6.244440, "Longitude": -10.700280, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.70028, 6.24443999 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.19", "the_geom": "POINT (-10.695999998999923 6.239650002000076)", "OBJECTID": null, "ECF_Code": "LBR_0018", "ECF_Name": "ELWA 2", "Status": "Open", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 100, "Beds_Plan": 100, "Open_Date": "2014-12-02T00:00:00", "Partner": "MoH\/WHO", "Lead_Donor": null, "Latitude": 6.239650, "Longitude": -10.696000, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.696, 6.23965 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.20", "the_geom": "POINT (-10.703363224695579 6.258009053209321)", "OBJECTID": null, "ECF_Code": "LBR_0110", "ECF_Name": "SKD 3 - Chinese", "Status": "Open", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 100, "Beds_Plan": 100, "Open_Date": "2014-12-02T00:00:00", "Partner": "Chinese FMT", "Lead_Donor": null, "Latitude": 6.258009, "Longitude": -10.703363, "Accuracy": "MGRS Coordinate Provided by US DoD", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.70336322, 6.25800905 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.21", "the_geom": "POINT (-10.469719989999874 6.353059977000157)", "OBJECTID": null, "ECF_Code": "LBR_0005", "ECF_Name": "Firestone", "Status": "Open", "Type": "ETU", "AdminCode": "LBR24", "Beds_Open": 31, "Beds_Plan": 31, "Open_Date": "2014-12-09T00:00:00", "Partner": "Firestone", "Lead_Donor": null, "Latitude": 6.353060, "Longitude": -10.469720, "Accuracy": "British Red Cross - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.46971999, 6.35305998 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.22", "the_geom": "POINT (-10.820409883999957 6.868750729000169)", "OBJECTID": null, "ECF_Code": "LBR_0027", "ECF_Name": "Tubmanburg", "Status": "Open", "Type": "ETU", "AdminCode": "LBR03", "Beds_Open": 100, "Beds_Plan": 100, "Open_Date": "2014-12-09T00:00:00", "Partner": "IOM", "Lead_Donor": null, "Latitude": 6.868751, "Longitude": -10.820410, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.82040988, 6.86875073 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.23", "the_geom": "POINT (-10.792832569999916 6.389035218000004)", "OBJECTID": null, "ECF_Code": "LBR_0023", "ECF_Name": "Unity Conference Centre", "Status": "Open", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 50, "Beds_Plan": 50, "Open_Date": "2014-12-09T00:00:00", "Partner": "MoH", "Lead_Donor": null, "Latitude": 6.389035, "Longitude": -10.792833, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.79283257, 6.38903522 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.24", "the_geom": "POINT (-9.55514399999987 7.004669000000035)", "OBJECTID": null, "ECF_Code": "LBR_0009", "ECF_Name": "Suakoko \/ Gbarnga", "Status": "Open", "Type": "ETU", "AdminCode": "LBR06", "Beds_Open": 71, "Beds_Plan": 71, "Open_Date": "2014-12-09T00:00:00", "Partner": "IMC", "Lead_Donor": null, "Latitude": 7.004669, "Longitude": -9.555144, "Accuracy": "British Red Cross - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -9.555144, 7.004669 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.25", "the_geom": "POINT (-10.787113499999919 6.384371600000179)", "OBJECTID": null, "ECF_Code": "LBR_0020", "ECF_Name": "Island Clinic", "Status": "Open", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 150, "Beds_Plan": 150, "Open_Date": "2014-12-09T00:00:00", "Partner": "MoH\/WHO\/AU", "Lead_Donor": null, "Latitude": 6.384372, "Longitude": -10.787114, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.7871135, 6.3843716 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.26", "the_geom": "POINT (-10.801388999999972 6.313333000000171)", "OBJECTID": null, "ECF_Code": "LBR_0015", "ECF_Name": "Monrovia Medical Unit", "Status": "Open", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 22, "Beds_Plan": 22, "Open_Date": "2014-12-09T00:00:00", "Partner": "USPHS", "Lead_Donor": null, "Latitude": 6.313333, "Longitude": -10.801389, "Accuracy": "Needs location verified", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.801389, 6.313333 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.27", "the_geom": "POINT (-10.205756652999867 8.37891129600007)", "OBJECTID": null, "ECF_Code": "LBR_0007", "ECF_Name": "Foya", "Status": "Closed", "Type": "ETU", "AdminCode": "LBR21", "Beds_Open": 0, "Beds_Plan": 0, "Open_Date": "2014-12-09T00:00:00", "Partner": "MSF", "Lead_Donor": null, "Latitude": 8.378911, "Longitude": -10.205757, "Accuracy": "British Red Cross - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.20575665, 8.3789113 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.28", "the_geom": "POINT (-10.734170003999964 6.27027999500018)", "OBJECTID": null, "ECF_Code": "LBR_0016", "ECF_Name": "Congo Town 1 (old MoD)", "Status": "Open", "Type": "ETU", "AdminCode": "LBR30", "Beds_Open": 40, "Beds_Plan": 40, "Open_Date": "2014-12-02T00:00:00", "Partner": "AU\/Cuban FMT\/MoH", "Lead_Donor": null, "Latitude": 6.270280, "Longitude": -10.734170, "Accuracy": "British Red Cross - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.73417, 6.27028 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.29", "the_geom": "POINT (-10.360384231999944 6.527523558000041)", "OBJECTID": null, "ECF_Code": "LBR_0013", "ECF_Name": "Kakata 1", "Status": "Open", "Type": "ETU", "AdminCode": "LBR24", "Beds_Open": 50, "Beds_Plan": 50, "Open_Date": "2014-12-02T00:00:00", "Partner": "IMC", "Lead_Donor": null, "Latitude": 6.527524, "Longitude": -10.360384, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -10.36038423, 6.52752356 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.30", "the_geom": "POINT (-8.981322865999914 7.237326592000045)", "OBJECTID": null, "ECF_Code": "LBR_0008", "ECF_Name": "Ganta United Methodist Hospital", "Status": "Open", "Type": "ETU", "AdminCode": "LBR33", "Beds_Open": 36, "Beds_Plan": 36, "Open_Date": "2014-12-18T00:00:00", "Partner": "Methodist Church \/ PCI", "Lead_Donor": null, "Latitude": 7.237327, "Longitude": -8.981323, "Accuracy": "WFP - GPS", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -8.98132287, 7.23732659 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.31", "the_geom": "POINT (-9.648000000000025 5.687000000000069)", "OBJECTID": null, "ECF_Code": "LBR_0116", "ECF_Name": "Gbediah Town", "Status": "Under Construction", "Type": "ETU", "AdminCode": "LBR36", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-25T00:00:00", "Partner": "PAE\/Aspen", "Lead_Donor": null, "Latitude": 5.483000, "Longitude": -9.588000, "Accuracy": "WHO", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -9.588, 5.483 ] } }, +{ "type": "Feature", "properties": { "FID": "lbr_heal_pt_unmeer_ebolacarefacilities.32", "the_geom": "POINT (-9.428165700000022 7.778799499999991)", "OBJECTID": null, "ECF_Code": "LBR_0119", "ECF_Name": "Zorzor", "Status": "Planned", "Type": "ETU", "AdminCode": "LBR21", "Beds_Open": 0, "Beds_Plan": 50, "Open_Date": "2014-12-19T00:00:00", "Partner": "PAE \/ Aspend", "Lead_Donor": null, "Latitude": 7.776000, "Longitude": -9.423000, "Accuracy": "OSM settlement", "Updated": "2014-12-15T00:00:00", "LabPresent": "No", "Country": "Liberia" }, "geometry": { "type": "Point", "coordinates": [ -9.423, 7.776 ] } } ] }; diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py index 99df3352ce..9015506077 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.5.13' +hdx_version = 'v0.5.14'
OCHA-DAP__hdx-ckan-2012
Title of the Colombia page should be "Colombia Country Page" or alike Right now the title of the [Colombia country page](https://data.hdx.rwlabs.org/group/col) is "Colombia crisis page". I think it should read "Colombia Country Page" or similar. Any ideas? ![screen shot 2014-12-11 at 12 39 13 pm](https://cloud.githubusercontent.com/assets/953118/5398732/e5a76cee-8132-11e4-9f55-900e1c156f6a.png)
[ { "content": "hdx_version = 'v0.5.10'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
[ { "content": "hdx_version = 'v0.5.11'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py" } ]
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/indicator.css b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/indicator.css index ebd7d5e178..82efcaf264 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/indicator.css +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/css/indicator.css @@ -352,7 +352,7 @@ /* Actions */ .indicator-actions a.btn.hdx-btn { - margin-right: 12px; + margin-left: 12px; } /* .indicator-actions { margin-top: 10px; @@ -420,10 +420,4 @@ width: 600px; } -.align-bottom-right { - position:absolute; - bottom: 0px; - right: 0px; -} - /* END - Dataset specific styles */ diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/country/country.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/country/country.html index b49fe5cdb2..2fcfcaa96a 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/country/country.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/country/country.html @@ -1,6 +1,6 @@ {% extends "crisis-base.html" %} -{% block subtitle %}{{ _("Colombia - Humanitarian Data Exchange") }}{% endblock %} +{% block subtitle %}{{ _("Colombia") }}{% endblock %} {% block crisis_title %} {{ _("Colombia") }} {% endblock %} {% block breadcrumb_content %} <li>{% link_for _('Groups'), controller='group', action='index' %}</li> @@ -55,13 +55,13 @@ <div class="col-xs-7"> <div class="row"> <div class="col-xs-12 graph-title"> - Number of Internally Displaced People <span>OCHA - <a href="/dataset/idps-data-by-year">data</a></span> + Number of Internally Displaced People <span>OCHA - <a href="/dataset/idps-data-by-year">Data</a></span> </div> <div class="col-xs-12"> <div id="graph1"></div> </div> <div class="col-xs-12 graph-title"> - Number of People with Access Constraints <span>OCHA - <a href="/dataset/restricciones-de-acceso">data</a></span> + Number of People with Access Constraints <span>OCHA - <a href="/dataset/restricciones-de-acceso">Data</a></span> </div> <div class="col-xs-12"> <div id="graph2"></div> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/indicator/snippets/share_button.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/indicator/snippets/share_button.html index 99fa42a937..85cadd3f86 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/indicator/snippets/share_button.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/indicator/snippets/share_button.html @@ -31,7 +31,7 @@ {% endif %} </a> {# DON'T CHANGE THE XXXX IT'S A PLACEHOLDER THE JS NEEDS #} -<div id="{{ dataset_social_wrapper }}" class="popover-wrapper"></div> +<span id="{{ dataset_social_wrapper }}" class="popover-wrapper"></span> <div style="display:none" id="{{ dataset_social }}"> <div class="resource-social"> <a href="https://plus.google.com/share?url=XXXX" target="_blank"> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/package/hdx_read.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/package/hdx_read.html index 60a004f02a..57d60b9e0e 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/package/hdx_read.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/package/hdx_read.html @@ -24,15 +24,17 @@ <h1 class="itemTitle"> </div> </div> <div class="row mBottom15"> - <div class="col-xs-12"> - {% if c.pkg_notes_formatted %} - <div class="notes embedded-content dataset-notes"> - {{ c.pkg_notes_formatted }} - </div> - {% endif %} - {% snippet "indicator/snippets/indicator_actions_menu.html", pkg = pkg, is_indicator=False, classes='indicator-actions followButtonContainer align-bottom-right' %} - </div> - </div> + <div class="col-xs-12"> + {% if c.pkg_notes_formatted %} + <div class="notes embedded-content"> + {{ c.pkg_notes_formatted }} + </div> + {% endif %} + </div> + <div class="col-xs-12 align-bottom-right"> + {% snippet "indicator/snippets/indicator_actions_menu.html", pkg = pkg, is_indicator=False, classes='indicator-actions followButtonContainer pull-right' %} + </div> + </div> </div> <div class="col-xs-12"> diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py index 741c11dd25..4e130f2f92 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.5.10' +hdx_version = 'v0.5.11'
pwndbg__pwndbg-381
Broken `entry` command ### Description The `entry` command pass arguments differently then the `run` command. ### Steps to reproduce ``` [dc@dc:pwndbg|dev *$%]$ gdb python Loaded 113 commands. Type pwndbg [filter] for a list. Reading symbols from python...(no debugging symbols found)...done. pwndbg> set exception-verbose on Set whether to print a full stacktracefor exceptions raised in Pwndbg commands to True pwndbg> run -c "print(1); print(2)" Starting program: /usr/bin/python -c "print(1); print(2)" [Thread debugging using libthread_db enabled] Using host libthread_db library "/usr/lib/libthread_db.so.1". 1 2 [Inferior 1 (process 20590) exited normally] pwndbg> entry -c "print(1); print(2)" ('-c', 'print(1); print(2)') Running '%s' run -c print(1); print(2) /bin/bash: -c: line 0: syntax error near unexpected token `(' /bin/bash: -c: line 0: `exec /usr/bin/python -c print(1); print(2)' Traceback (most recent call last): File "/home/dc/installed/pwndbg/pwndbg/commands/__init__.py", line 100, in __call__ return self.function(*args, **kwargs) File "/home/dc/installed/pwndbg/pwndbg/commands/__init__.py", line 181, in _OnlyWithFile return function(*a, **kw) File "/home/dc/installed/pwndbg/pwndbg/commands/start.py", line 72, in entry gdb.execute(run, from_tty=False) gdb.error: During startup program exited with code 1. If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues (Please don't forget to search if it hasn't been reported before) PS: Pull requests are welcome ``` ### My version ``` pwndbg> version Gdb: GNU gdb (GDB) 8.0.1 Python: 3.6.3 (default, Oct 24 2017, 14:48:20) [GCC 7.2.0] Pwndbg: 1.0.0 build: 5811010 ```
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gdb\n\nimport pwndbg.color as C\nimport pwndbg.events\nimport pwndbg.gdbutils\nimport pwndbg.memoize\n\nfuncs_list_str = ', '.join(C.purple('$' + f.name) for f in pwndbg.gdbutils.functions.functions)\n\nhint_lines = (\n 'loaded %i commands. Type %s for a list.' % (len(pwndbg.commands.commands), C.purple('pwndbg [filter]')),\n 'created %s gdb functions (can be used with print/break)' % funcs_list_str\n)\n\nfor line in hint_lines:\n print(C.light_red(pwndbg.color.bold('pwndbg: ') + line))\n\ncur = (gdb.selected_inferior(), gdb.selected_thread())\n\n\ndef prompt_hook(*a):\n global cur\n new = (gdb.selected_inferior(), gdb.selected_thread())\n\n if cur != new:\n pwndbg.events.after_reload(start=False)\n cur = new\n\n if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:\n prompt_hook_on_stop(*a)\n\n\[email protected]_on_stop\ndef prompt_hook_on_stop(*a):\n pwndbg.commands.context.context()\n\n\n\ngdb.prompt_hook = prompt_hook\n", "path": "pwndbg/prompt.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gdb\n\nimport pwndbg.color as C\nimport pwndbg.events\nimport pwndbg.gdbutils\nimport pwndbg.memoize\n\nfuncs_list_str = ', '.join(C.purple('$' + f.name) for f in pwndbg.gdbutils.functions.functions)\n\nhint_lines = (\n 'loaded %i commands. Type %s for a list.' % (len(pwndbg.commands.commands), C.purple('pwndbg [filter]')),\n 'created %s gdb functions (can be used with print/break)' % funcs_list_str\n)\n\nfor line in hint_lines:\n print(C.light_red(pwndbg.color.bold('pwndbg: ') + line))\n\ncur = (gdb.selected_inferior(), gdb.selected_thread())\n\n\ndef prompt_hook(*a):\n global cur\n new = (gdb.selected_inferior(), gdb.selected_thread())\n\n if cur != new:\n pwndbg.events.after_reload(start=False)\n cur = new\n\n if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:\n prompt_hook_on_stop(*a)\n\n\[email protected]_on_stop\ndef prompt_hook_on_stop(*a):\n pwndbg.commands.context.context()\n\n\n\n\ngdb.prompt_hook = prompt_hook\n", "path": "pwndbg/prompt.py" } ]
diff --git a/pwndbg/prompt.py b/pwndbg/prompt.py index 79ff5c41925..b19e22cd3bd 100644 --- a/pwndbg/prompt.py +++ b/pwndbg/prompt.py @@ -43,4 +43,5 @@ def prompt_hook_on_stop(*a): + gdb.prompt_hook = prompt_hook
google__pytype-251
Add test_data to MANIFEST.in This PR also needs to be imported and re-exported rather than merged directly. I'm planning to use this one to test the import process fix I sent you. Fixes https://github.com/google/pytype/issues/245.
[ { "content": "# pylint: skip-file\n__version__ = '2019.02.13'\n", "path": "pytype/__version__.py" } ]
[ { "content": "# pylint: skip-file\n__version__ = '2019.03.01'\n", "path": "pytype/__version__.py" } ]
diff --git a/CHANGELOG b/CHANGELOG index ea9c93ba0..6fd775fcc 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,4 +1,4 @@ -Version (upcoming) +Version 2019.03.01 * Make pytype's type stubs loadable in Python 3.4. Version 2019.02.13 diff --git a/MANIFEST.in b/MANIFEST.in index dc42ced06..b300bb485 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,3 +1,4 @@ include DESCRIPTION.rst include LICENSE include pytype/pyi/* +include pytype/test_data/* diff --git a/pytype/__version__.py b/pytype/__version__.py index 243ad403b..78817060a 100644 --- a/pytype/__version__.py +++ b/pytype/__version__.py @@ -1,2 +1,2 @@ # pylint: skip-file -__version__ = '2019.02.13' +__version__ = '2019.03.01' diff --git a/pytype/pyi/parser.yy b/pytype/pyi/parser.yy index 7e26ae217..fb2dbee67 100644 --- a/pytype/pyi/parser.yy +++ b/pytype/pyi/parser.yy @@ -13,7 +13,7 @@ %define parse.error verbose -%name-prefix "pytype" +%define api.prefix {pytype} %define api.namespace {pytype} %code requires {
sbi-dev__sbi-31
Move tests to top folder
[ { "content": "# content of test_compute.py\n\n\ndef test_compute(param1):\n assert param1 < 4", "path": "sbi/dummytests/whateverstuff.py" } ]
[ { "content": null, "path": "sbi/dummytests/whateverstuff.py" } ]
diff --git a/lfi/tests/linearGaussian_test.py b/lfi/tests/linearGaussian_test.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/sbi/dummytests/conftest.py b/sbi/dummytests/conftest.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/sbi/dummytests/whateverstuff.py b/sbi/dummytests/whateverstuff.py deleted file mode 100644 index 19064de0b..000000000 --- a/sbi/dummytests/whateverstuff.py +++ /dev/null @@ -1,5 +0,0 @@ -# content of test_compute.py - - -def test_compute(param1): - assert param1 < 4 \ No newline at end of file
django__channels-1548
Version 3.0 TODOs: - [x] v3 Consumers need as_view() equivalent. (Issue: #1531 PR: #1532) - [x] Update documentation. - [x] Deprecate `AsgiHandler` & not specifying `"http"` key to `ProtocolTypeHandler` #1541 - [x] Simplify BaseMiddleware #1535 - [x] Daphe https://github.com/django/daphne/pull/275 ? - [x] channels_redis https://github.com/django/channels_redis/pull/228 - [ ] What else...?
[ { "content": "__version__ = \"2.4.0\"\n\ntry:\n import django\n\n if django.VERSION < (3, 2):\n default_app_config = \"channels.apps.ChannelsConfig\"\nexcept ModuleNotFoundError:\n pass\n\nDEFAULT_CHANNEL_LAYER = \"default\"\n", "path": "channels/__init__.py" } ]
[ { "content": "__version__ = \"3.0.0\"\n\ntry:\n import django\n\n if django.VERSION < (3, 2):\n default_app_config = \"channels.apps.ChannelsConfig\"\nexcept ModuleNotFoundError:\n pass\n\nDEFAULT_CHANNEL_LAYER = \"default\"\n", "path": "channels/__init__.py" } ]
diff --git a/CHANGELOG.txt b/CHANGELOG.txt index e15a783b7..01dd716ec 100644 --- a/CHANGELOG.txt +++ b/CHANGELOG.txt @@ -1,13 +1,14 @@ Full release notes, with more details and upgrade information, are available at: https://channels.readthedocs.io/en/latest/releases -3.0.0 (UNRELEASED) +3.0.0 (2020-10-30) ------------------ -* Added support for Python 3.9. - -* Dropped support for Python 3.5, which is now end-of-life. +Updated to ASGI v3, and added support for Django 3.0+. +This is a major version change requiring updates to consumers and middleware. +Please see the full `Version 3.0.0 release notes +<https://channels.readthedocs.io/en/latest/releases/3.0.0.html>`_ for details. 2.4.0 (2019-12-18) diff --git a/channels/__init__.py b/channels/__init__.py index 7fb817ca4..b2a4c9b51 100644 --- a/channels/__init__.py +++ b/channels/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.4.0" +__version__ = "3.0.0" try: import django
django__channels-1560
type in deploying.rst I believe there is a typo in deploying.rst after "http": get_asgi_application() there should be a comma ``` import os from channels.auth import AuthMiddlewareStack from channels.routing import ProtocolTypeRouter, URLRouter from django.conf.urls import url from django.core.asgi import get_asgi_application from chat.consumers import AdminChatConsumer, PublicChatConsumer os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings") application = ProtocolTypeRouter({ # Django's ASGI application to handle traditional HTTP requests "http": get_asgi_application() ------ syntax error in dictionary? ---- # WebSocket chat handler "websocket": AuthMiddlewareStack( URLRouter([ url(r"^chat/admin/$", AdminChatConsumer.as_asgi()), url(r"^chat/$", PublicChatConsumer.as_asgi()), ]) ), }) ```
[ { "content": "__version__ = \"3.0.0\"\n\ntry:\n import django\n\n if django.VERSION < (3, 2):\n default_app_config = \"channels.apps.ChannelsConfig\"\nexcept ModuleNotFoundError:\n pass\n\nDEFAULT_CHANNEL_LAYER = \"default\"\n", "path": "channels/__init__.py" } ]
[ { "content": "__version__ = \"3.0.1\"\n\ntry:\n import django\n\n if django.VERSION < (3, 2):\n default_app_config = \"channels.apps.ChannelsConfig\"\nexcept ModuleNotFoundError:\n pass\n\nDEFAULT_CHANNEL_LAYER = \"default\"\n", "path": "channels/__init__.py" } ]
diff --git a/CHANGELOG.txt b/CHANGELOG.txt index 01dd716ec..1f983942b 100644 --- a/CHANGELOG.txt +++ b/CHANGELOG.txt @@ -1,6 +1,13 @@ Full release notes, with more details and upgrade information, are available at: https://channels.readthedocs.io/en/latest/releases +3.0.1 (2020-11-4) +----------------- + +* Fixes a bug in Channels 3.0 where ``SessionMiddleware`` would not correctly + isolate per-instance scopes. + + 3.0.0 (2020-10-30) ------------------ diff --git a/channels/__init__.py b/channels/__init__.py index b2a4c9b51..e6632b5f4 100644 --- a/channels/__init__.py +++ b/channels/__init__.py @@ -1,4 +1,4 @@ -__version__ = "3.0.0" +__version__ = "3.0.1" try: import django diff --git a/docs/deploying.rst b/docs/deploying.rst index 9d40c2787..d60317f44 100644 --- a/docs/deploying.rst +++ b/docs/deploying.rst @@ -33,7 +33,7 @@ Here's an example of what that ``asgi.py`` might look like: application = ProtocolTypeRouter({ # Django's ASGI application to handle traditional HTTP requests - "http": get_asgi_application() + "http": get_asgi_application(), # WebSocket chat handler "websocket": AuthMiddlewareStack( diff --git a/docs/releases/3.0.1.rst b/docs/releases/3.0.1.rst new file mode 100644 index 000000000..a544a6eba --- /dev/null +++ b/docs/releases/3.0.1.rst @@ -0,0 +1,10 @@ +3.0.1 Release Notes +=================== + +Channels 3.0.1 fixes a bug in Channels 3.0. + +Bugfixes +-------- + +* Fixes a bug in Channels 3.0 where ``SessionMiddleware`` would not correctly + isolate per-instance scopes. diff --git a/docs/releases/index.rst b/docs/releases/index.rst index 0b2d583ae..64124ff96 100644 --- a/docs/releases/index.rst +++ b/docs/releases/index.rst @@ -30,3 +30,4 @@ Release Notes 2.3.0 2.4.0 3.0.0 + 3.0.1
RedHatInsights__insights-core-3114
Futures python module is included in Python3 Insights-core currently installs the [futures module](https://pypi.org/project/futures/) in all cases for the [development] target in [setup.py](https://github.com/RedHatInsights/insights-core/blob/7dc392df90a2535014cc1ec7f5df9c03a9d3d95d/setup.py#L64). This module is only necessary for Python2 since it is included in Python3. This is only used in one place in [collect.py](https://github.com/RedHatInsights/insights-core/blob/7dc392df90a2535014cc1ec7f5df9c03a9d3d95d/insights/collect.py#L286). The `futures` module states: > It **does not** work on Python 3 due to Python 2 syntax being used in the codebase. Python 3 users should not attempt to install it, since the package is already included in the standard library. When installed it causes the latest version of `pip` to fail when installing into a virtual environment: ```python Installing build dependencies ... error ERROR: Command errored out with exit status 1: command: /home/bfahr/work/insights/insights-core/venv36/bin/python3.6 /home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip install --ignore-installed --no-user --prefix /tmp/pip-build-env-vujizkqz/overlay --no-warn-script-location --no-binary :none: --only-binary :none: -i https://pypi.org/simple -- 'setuptools>=40.8.0' wheel cwd: None Complete output (29 lines): Traceback (most recent call last): File "/usr/lib64/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/lib64/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/__main__.py", line 29, in <module> from pip._internal.cli.main import main as _main File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/main.py", line 9, in <module> from pip._internal.cli.autocompletion import autocomplete File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/autocompletion.py", line 10, in <module> from pip._internal.cli.main_parser import create_main_parser File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/main_parser.py", line 8, in <module> from pip._internal.cli import cmdoptions File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/cmdoptions.py", line 23, in <module> from pip._internal.cli.parser import ConfigOptionParser File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/parser.py", line 12, in <module> from pip._internal.configuration import Configuration, ConfigurationError File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/configuration.py", line 27, in <module> from pip._internal.utils.misc import ensure_dir, enum File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/utils/misc.py", line 38, in <module> from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_vendor/tenacity/__init__.py", line 35, in <module> from concurrent import futures File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/concurrent/futures/__init__.py", line 8, in <module> from concurrent.futures._base import (FIRST_COMPLETED, File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/concurrent/futures/_base.py", line 357 raise type(self._exception), self._exception, self._traceback ^ SyntaxError: invalid syntax ---------------------------------------- ``` It was only used to create a thread pool for parallel collection in the client. We don't currently use this feature and since `futures` is not installed by the client RPM it would never be used. It is included in the default python on RHEL8 so it could be used if so desired, but again we don't currently use it.
[ { "content": "import os\nimport sys\nfrom setuptools import setup, find_packages\n\n__here__ = os.path.dirname(os.path.abspath(__file__))\n\npackage_info = dict.fromkeys([\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\nfor name in package_info:\n with open(os.path.join(__here__, \"insights\", name)) as f:\n package_info[name] = f.read().strip()\n\nentry_points = {\n 'console_scripts': [\n 'insights-collect = insights.collect:main',\n 'insights-run = insights:main',\n 'insights = insights.command_parser:main',\n 'insights-cat = insights.tools.cat:main',\n 'insights-dupkeycheck = insights.tools.dupkeycheck:main',\n 'insights-inspect = insights.tools.insights_inspect:main',\n 'insights-info = insights.tools.query:main',\n 'insights-ocpshell= insights.ocpshell:main',\n 'client = insights.client:run',\n 'mangle = insights.util.mangle:main'\n ]\n}\n\nruntime = set([\n 'six',\n 'requests',\n 'redis',\n 'cachecontrol',\n 'cachecontrol[redis]',\n 'cachecontrol[filecache]',\n 'defusedxml',\n 'lockfile',\n 'jinja2<=2.11.3',\n])\n\nif (sys.version_info < (2, 7)):\n runtime.add('pyyaml>=3.10,<=3.13')\nelse:\n runtime.add('pyyaml')\n\n\ndef maybe_require(pkg):\n try:\n __import__(pkg)\n except ImportError:\n runtime.add(pkg)\n\n\nmaybe_require(\"importlib\")\nmaybe_require(\"argparse\")\n\n\nclient = set([\n 'requests',\n 'python-gnupg==0.4.6',\n 'oyaml'\n])\n\ndevelop = set([\n 'futures==3.0.5',\n 'wheel',\n])\n\ndocs = set([\n 'docutils',\n 'Sphinx',\n 'nbsphinx',\n 'sphinx_rtd_theme',\n 'ipython',\n 'colorama',\n 'jinja2<=2.11.3',\n 'Pygments',\n 'jedi<0.18.0', # Open issue with jedi 0.18.0 and iPython <= 7.19\n # https://github.com/davidhalter/jedi/issues/1714\n])\n\ntesting = set([\n 'coverage==4.3.4',\n 'pytest==3.0.6',\n 'pytest-cov==2.4.0',\n 'mock==2.0.0',\n])\n\ncluster = set([\n 'ansible',\n 'pandas',\n 'colorama',\n])\n\nopenshift = set([\n 'openshift'\n])\n\nlinting = set([\n 'flake8==2.6.2',\n])\n\noptional = set([\n 'python-cjson',\n 'python-logstash',\n 'python-statsd',\n 'watchdog',\n])\n\nif __name__ == \"__main__\":\n # allows for runtime modification of rpm name\n name = os.environ.get(\"INSIGHTS_CORE_NAME\", package_info[\"NAME\"])\n\n setup(\n name=name,\n version=package_info[\"VERSION\"],\n description=\"Insights Core is a data collection and analysis framework\",\n long_description=open(\"README.rst\").read(),\n url=\"https://github.com/redhatinsights/insights-core\",\n author=\"Red Hat, Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n install_requires=list(runtime),\n package_data={'': ['LICENSE']},\n license='Apache 2.0',\n extras_require={\n 'develop': list(runtime | develop | client | docs | linting | testing | cluster),\n 'develop26': list(runtime | develop | client | linting | testing | cluster),\n 'client': list(runtime | client),\n 'client-develop': list(runtime | develop | client | linting | testing),\n 'cluster': list(runtime | cluster),\n 'openshift': list(runtime | openshift),\n 'optional': list(optional),\n 'docs': list(docs),\n 'linting': list(linting | client),\n 'testing': list(testing | client)\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n entry_points=entry_points,\n include_package_data=True\n )\n", "path": "setup.py" } ]
[ { "content": "import os\nimport sys\nfrom setuptools import setup, find_packages\n\n__here__ = os.path.dirname(os.path.abspath(__file__))\n\npackage_info = dict.fromkeys([\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\nfor name in package_info:\n with open(os.path.join(__here__, \"insights\", name)) as f:\n package_info[name] = f.read().strip()\n\nentry_points = {\n 'console_scripts': [\n 'insights-collect = insights.collect:main',\n 'insights-run = insights:main',\n 'insights = insights.command_parser:main',\n 'insights-cat = insights.tools.cat:main',\n 'insights-dupkeycheck = insights.tools.dupkeycheck:main',\n 'insights-inspect = insights.tools.insights_inspect:main',\n 'insights-info = insights.tools.query:main',\n 'insights-ocpshell= insights.ocpshell:main',\n 'client = insights.client:run',\n 'mangle = insights.util.mangle:main'\n ]\n}\n\nruntime = set([\n 'six',\n 'requests',\n 'redis',\n 'cachecontrol',\n 'cachecontrol[redis]',\n 'cachecontrol[filecache]',\n 'defusedxml',\n 'lockfile',\n 'jinja2<=2.11.3',\n])\n\nif (sys.version_info < (2, 7)):\n runtime.add('pyyaml>=3.10,<=3.13')\nelse:\n runtime.add('pyyaml')\n\n\ndef maybe_require(pkg):\n try:\n __import__(pkg)\n except ImportError:\n runtime.add(pkg)\n\n\nmaybe_require(\"importlib\")\nmaybe_require(\"argparse\")\n\n\nclient = set([\n 'requests',\n 'python-gnupg==0.4.6',\n 'oyaml'\n])\n\ndevelop = set([\n 'wheel',\n])\n\ndocs = set([\n 'docutils',\n 'Sphinx',\n 'nbsphinx',\n 'sphinx_rtd_theme',\n 'ipython',\n 'colorama',\n 'jinja2<=2.11.3',\n 'Pygments',\n 'jedi<0.18.0', # Open issue with jedi 0.18.0 and iPython <= 7.19\n # https://github.com/davidhalter/jedi/issues/1714\n])\n\ntesting = set([\n 'coverage==4.3.4',\n 'pytest==3.0.6',\n 'pytest-cov==2.4.0',\n 'mock==2.0.0',\n])\n\ncluster = set([\n 'ansible',\n 'pandas',\n 'colorama',\n])\n\nopenshift = set([\n 'openshift'\n])\n\nlinting = set([\n 'flake8==2.6.2',\n])\n\noptional = set([\n 'python-cjson',\n 'python-logstash',\n 'python-statsd',\n 'watchdog',\n])\n\nif __name__ == \"__main__\":\n # allows for runtime modification of rpm name\n name = os.environ.get(\"INSIGHTS_CORE_NAME\", package_info[\"NAME\"])\n\n setup(\n name=name,\n version=package_info[\"VERSION\"],\n description=\"Insights Core is a data collection and analysis framework\",\n long_description=open(\"README.rst\").read(),\n url=\"https://github.com/redhatinsights/insights-core\",\n author=\"Red Hat, Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n install_requires=list(runtime),\n package_data={'': ['LICENSE']},\n license='Apache 2.0',\n extras_require={\n 'develop': list(runtime | develop | client | docs | linting | testing | cluster),\n 'develop26': list(runtime | develop | client | linting | testing | cluster),\n 'client': list(runtime | client),\n 'client-develop': list(runtime | develop | client | linting | testing),\n 'cluster': list(runtime | cluster),\n 'openshift': list(runtime | openshift),\n 'optional': list(optional),\n 'docs': list(docs),\n 'linting': list(linting | client),\n 'testing': list(testing | client)\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n entry_points=entry_points,\n include_package_data=True\n )\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 3f99b04179..22227bd564 100644 --- a/setup.py +++ b/setup.py @@ -61,7 +61,6 @@ def maybe_require(pkg): ]) develop = set([ - 'futures==3.0.5', 'wheel', ])
horovod__horovod-3745
No module named 'packaging' when install horovod It seems that the horovod v0.26.0 has some dependency problems. How long does it take for a new patch version to be released or should I pin the horovod version? ^_^ ![image](https://user-images.githubusercontent.com/32220263/195746141-2a0050f5-1eaf-4f7b-9a62-50fd6b13f2ff.png)
[ { "content": "from horovod.runner import run\n\n__version__ = '0.26.0'\n", "path": "horovod/__init__.py" } ]
[ { "content": "from horovod.runner import run\n\n__version__ = '0.26.1'\n", "path": "horovod/__init__.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ddb15c327..ef684bee93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,8 +16,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Fixed -- Fixed packaging import during install to occur after install_requires. ([#3741](https://github.com/horovod/horovod/pull/3741)) +## [v0.26.1] - 2022-10-14 + +### Fixed + +- Fixed packaging import during install to occur after install_requires. ([#3741](https://github.com/horovod/horovod/pull/3741)) ## [v0.26.0] - 2022-10-13 diff --git a/horovod/__init__.py b/horovod/__init__.py index a4a77a42a5..07eb3e8edb 100644 --- a/horovod/__init__.py +++ b/horovod/__init__.py @@ -1,3 +1,3 @@ from horovod.runner import run -__version__ = '0.26.0' +__version__ = '0.26.1'
hpcaitech__ColossalAI-2007
[BUG]: ModuleNotFoundError: No module named 'colossalai.nn.optimizer.zero_optimizer' ### 🐛 Describe the bug I install colossalAI with the command `pip install colossalai==0.1.11rc3+torch1.12cu11.3 -f https://release.colossalai.org` But I get an error when follow https://github.com/hpcaitech/ColossalAI/tree/main/examples/tutorial#-run-opt-finetuning-and-inference, I just run `bash ./run_clm_synthetic.sh` and get an error as follows: ```shell ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /home/he.yan/ColossalAI/examples/tutorial/opt/opt/run_clm.py:46 in <module> │ │ │ │ 43 from colossalai.core import global_context as gpc │ │ 44 from colossalai.logging import disable_existing_loggers, get_dist_logger │ │ 45 from colossalai.nn.optimizer import HybridAdam │ │ ❱ 46 from colossalai.nn.optimizer.zero_optimizer import ZeroOptimizer │ │ 47 from colossalai.nn.parallel import ZeroDDP │ │ 48 from colossalai.tensor import ProcessGroup │ │ 49 from colossalai.utils import get_current_device, get_dataloader │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ ModuleNotFoundError: No module named 'colossalai.nn.optimizer.zero_optimizer' ``` ### Environment Python 3.8.15 torch1.12cu11.3
[ { "content": "from .initialize import (\n get_default_parser,\n initialize,\n launch,\n launch_from_openmpi,\n launch_from_slurm,\n launch_from_torch,\n)\n\n__version__ = '0.1.11rc2'\n", "path": "colossalai/__init__.py" } ]
[ { "content": "from .initialize import (\n get_default_parser,\n initialize,\n launch,\n launch_from_openmpi,\n launch_from_slurm,\n launch_from_torch,\n)\n\n__version__ = '0.1.11rc4'\n", "path": "colossalai/__init__.py" } ]
diff --git a/colossalai/__init__.py b/colossalai/__init__.py index 91df73fa97f3..ff65f0f9c5ed 100644 --- a/colossalai/__init__.py +++ b/colossalai/__init__.py @@ -7,4 +7,4 @@ launch_from_torch, ) -__version__ = '0.1.11rc2' +__version__ = '0.1.11rc4' diff --git a/version.txt b/version.txt index 30e1f7f599a0..beab45ccd574 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.1.11rc3 +0.1.11rc4
Zeroto521__my-data-toolkit-543
PERF: `to_set` speeds up especial for large data <!-- Thanks for contributing a pull request! Please follow these standard acronyms to start the commit message: - ENH: enhancement - BUG: bug fix - DOC: documentation - TYP: type annotations - TST: addition or modification of tests - MAINT: maintenance commit (refactoring, typos, etc.) - BLD: change related to building - REL: related to releasing - API: an (incompatible) API change - DEP: deprecate something, or remove a deprecated object - DEV: development tool or utility - REV: revert an earlier commit - PERF: performance improvement - BOT: always commit via a bot - CI: related to CI or CD - CLN: Code cleanup --> - [ ] closes #xxxx - [x] whatsnew entry | data | `set(s)` | `set(s.unique())` | | -------------------- | ---------------- | ----------------- | | small, `list(range(10)` | 1.83 µs ± 31.6 ns | 1.17 ms ± 144 µs | | large, `list(range(10)*1000` | 9.67 µs ± 564 ns | 255 µs ± 14.9 µs |
[ { "content": "import pandas as pd\n\nfrom dtoolkit.accessor.register import register_index_method\n\n\n@register_index_method\ndef to_set(index: pd.Index) -> set:\n \"\"\"\n Return a :keyword:`set` of the values.\n\n A sugary syntax wraps :keyword:`set`::\n\n set(index)\n\n Different to :meth:`~pandas.Index.unique`, it returns :class:`~pandas.Index`.\n\n Returns\n -------\n set\n\n See Also\n --------\n pandas.Index.unique\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> i = pd.Index([1, 2, 2])\n >>> i\n Int64Index([1, 2, 2], dtype='int64')\n >>> i.to_set()\n {1, 2}\n \"\"\"\n\n return set(index)\n", "path": "dtoolkit/accessor/index/to_set.py" } ]
[ { "content": "import pandas as pd\n\nfrom dtoolkit.accessor.register import register_index_method\n\n\n@register_index_method\ndef to_set(index: pd.Index) -> set:\n \"\"\"\n Return a :keyword:`set` of the values.\n\n A sugary syntax wraps :keyword:`set`::\n\n set(index)\n\n Different to :meth:`~pandas.Index.unique`, it returns :class:`~pandas.Index`.\n\n Returns\n -------\n set\n\n See Also\n --------\n pandas.Index.unique\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> i = pd.Index([1, 2, 2])\n >>> i\n Int64Index([1, 2, 2], dtype='int64')\n >>> i.to_set()\n {1, 2}\n \"\"\"\n\n return set(index.unique())\n", "path": "dtoolkit/accessor/index/to_set.py" } ]
diff --git a/dtoolkit/accessor/index/to_set.py b/dtoolkit/accessor/index/to_set.py index eac96e5d6..4c990fe2a 100644 --- a/dtoolkit/accessor/index/to_set.py +++ b/dtoolkit/accessor/index/to_set.py @@ -33,4 +33,4 @@ def to_set(index: pd.Index) -> set: {1, 2} """ - return set(index) + return set(index.unique())
nautobot__nautobot-3317
Remove legacy `manage.py` <!-- NOTE: This template is for use by maintainers only. Please do not submit an issue using this template unless you have been specifically asked to do so. --> ### Proposed Changes Simply remove `manage.py` from the project root. <!-- Provide justification for the proposed change(s). --> ### Justification This was left there initially in v1.0.0 as a fallback, however it is absolutely no longer needed.
[ { "content": "#!/usr/bin/env python3\n\nimport sys\n\nfrom nautobot.core.cli import main\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "manage.py" } ]
[ { "content": null, "path": "manage.py" } ]
diff --git a/changes/1634.removed b/changes/1634.removed new file mode 100644 index 00000000000..65b8eff5805 --- /dev/null +++ b/changes/1634.removed @@ -0,0 +1 @@ +Removed unnecessary legacy `manage.py` file from Nautobot repository. diff --git a/manage.py b/manage.py deleted file mode 100755 index 9a4e2b483b3..00000000000 --- a/manage.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 - -import sys - -from nautobot.core.cli import main - - -if __name__ == "__main__": - main() diff --git a/nautobot/docs/development/getting-started.md b/nautobot/docs/development/getting-started.md index b4bf4f99ebc..f24b678fa9b 100644 --- a/nautobot/docs/development/getting-started.md +++ b/nautobot/docs/development/getting-started.md @@ -42,10 +42,14 @@ ls nautobot/ Example output: ```no-highlight -CHANGELOG.md README.md docs nautobot.code-workspace site -CONTRIBUTING.md contrib manage.py poetry.lock tasks.py -LICENSE.txt development mkdocs.yml pyproject.toml upgrade.sh -NOTICE dist nautobot scripts +CHANGELOG.md development nautobot.code-workspace +CODE_OF_CONDUCT.md docker poetry.lock +CONTRIBUTING.md docs pyproject.toml +LICENSE.txt examples renovate.json +NOTICE install.sh scripts +README.md invoke.yml.example tasks.py +SECURITY.md mkdocs.yml +changes nautobot ``` ### About Remote Repos
netket__netket-214
module 'netket' has no attribute 'MPI' With the merge #193 we have lost the MPI module
[ { "content": "# Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom . import (\n _C_netket,\n dynamics,\n exact,\n graph,\n hilbert,\n layer,\n machine,\n operator,\n optimizer,\n output,\n sampler,\n stats,\n supervised,\n unsupervised,\n utils,\n variational,\n)\n", "path": "netket/__init__.py" } ]
[ { "content": "# Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom . import (\n _C_netket,\n dynamics,\n exact,\n graph,\n hilbert,\n layer,\n machine,\n operator,\n optimizer,\n output,\n sampler,\n stats,\n supervised,\n unsupervised,\n utils,\n variational,\n)\nfrom ._C_netket import MPI, LookupReal, LookupComplex\n", "path": "netket/__init__.py" } ]
diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e13776db7..d640c75c9f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -222,7 +222,8 @@ add_library(netket MODULE Sources/pynetket.cc Sources/Dynamics/py_dynamics.cpp Sources/Machine/rbm_multival.cc Sources/Machine/rbm_spin_phase.cc Sources/Machine/rbm_spin_real.cc Sources/Machine/rbm_spin_symm.cc Sources/Machine/jastrow.cc Sources/Machine/jastrow_symm.cc - Sources/Machine/mps_periodic.cc Sources/Machine/py_machine.cc Sources/Utils/mpi_interface.cc) + Sources/Machine/mps_periodic.cc Sources/Machine/py_machine.cc + Sources/Utils/mpi_interface.cc Sources/Utils/json_utils.cc Sources/Utils/py_utils.cc) target_link_libraries(netket PUBLIC netket_lib pybind11) set_target_properties(netket PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" SUFFIX "${PYTHON_MODULE_EXTENSION}") diff --git a/Sources/Dynamics/TimeStepper/abstract_time_stepper.hpp b/Sources/Dynamics/TimeStepper/abstract_time_stepper.hpp index 65b6cca845..ea87546b34 100644 --- a/Sources/Dynamics/TimeStepper/abstract_time_stepper.hpp +++ b/Sources/Dynamics/TimeStepper/abstract_time_stepper.hpp @@ -4,8 +4,6 @@ #include <cassert> #include <functional> -#include "Utils/json_helper.hpp" - namespace netket { namespace ode { @@ -13,13 +11,6 @@ struct TimeRange { double tmin; double tmax; double dt; - - static TimeRange FromJson(const json& pars) { - double tmin = FieldVal(pars, "StartTime"); - double tmax = FieldVal(pars, "EndTime"); - double dt = FieldVal(pars, "TimeStep"); - return {tmin, tmax, dt}; - } }; /** diff --git a/Sources/Graph/graph.hpp b/Sources/Graph/graph.hpp index 13e00c3496..1cfb5665c5 100644 --- a/Sources/Graph/graph.hpp +++ b/Sources/Graph/graph.hpp @@ -15,14 +15,9 @@ #ifndef NETKET_GRAPH_HPP #define NETKET_GRAPH_HPP -#include <array> -#include <unordered_map> -#include <vector> -#include "Utils/json_utils.hpp" -#include "Utils/memory_utils.hpp" -#include "abstract_graph.hpp" -#include "custom_graph.hpp" -#include "hypercube.hpp" -#include "lattice.hpp" +#include "Graph/abstract_graph.hpp" +#include "Graph/custom_graph.hpp" +#include "Graph/hypercube.hpp" +#include "Graph/lattice.hpp" #endif diff --git a/Sources/Hilbert/hilbert.hpp b/Sources/Hilbert/hilbert.hpp index a94961960c..8a21eb58c6 100644 --- a/Sources/Hilbert/hilbert.hpp +++ b/Sources/Hilbert/hilbert.hpp @@ -15,14 +15,9 @@ #ifndef NETKET_HILBERT_HPP #define NETKET_HILBERT_HPP -#include <memory> -#include <set> -#include "Graph/graph.hpp" -#include "Utils/json_utils.hpp" -#include "Utils/parallel_utils.hpp" -#include "abstract_hilbert.hpp" -#include "bosons.hpp" -#include "custom_hilbert.hpp" -#include "spins.hpp" +#include "Hilbert/abstract_hilbert.hpp" +#include "Hilbert/bosons.hpp" +#include "Hilbert/custom_hilbert.hpp" +#include "Hilbert/spins.hpp" #endif diff --git a/Sources/Machine/rbm_multival.hpp b/Sources/Machine/rbm_multival.hpp index 1ff428b478..e42d6e3667 100644 --- a/Sources/Machine/rbm_multival.hpp +++ b/Sources/Machine/rbm_multival.hpp @@ -12,18 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include <Eigen/Dense> -#include <iostream> +#ifndef NETKET_RBM_MULTIVAL_HPP +#define NETKET_RBM_MULTIVAL_HPP + #include <map> #include <vector> -#include "Utils/all_utils.hpp" -#include "Utils/lookup.hpp" -#include "rbm_spin.hpp" -#ifndef NETKET_RBM_MULTIVAL_HPP -#define NETKET_RBM_MULTIVAL_HPP +#include <Eigen/Dense> -#include "abstract_machine.hpp" +#include "Machine/abstract_machine.hpp" +#include "Machine/rbm_spin.hpp" namespace netket { diff --git a/Sources/Operator/MatrixWrapper/matrix_wrapper.hpp b/Sources/Operator/MatrixWrapper/matrix_wrapper.hpp index 8f511cb754..9247cfa8c7 100644 --- a/Sources/Operator/MatrixWrapper/matrix_wrapper.hpp +++ b/Sources/Operator/MatrixWrapper/matrix_wrapper.hpp @@ -4,7 +4,6 @@ #include <memory> #include "Operator/abstract_operator.hpp" -#include "Utils/json_helper.hpp" #include "abstract_matrix_wrapper.hpp" #include "dense_matrix_wrapper.hpp" diff --git a/Sources/Operator/bosonhubbard.hpp b/Sources/Operator/bosonhubbard.hpp index 37894756d0..20a8a580a2 100644 --- a/Sources/Operator/bosonhubbard.hpp +++ b/Sources/Operator/bosonhubbard.hpp @@ -22,7 +22,6 @@ #include "Graph/graph.hpp" #include "Hilbert/abstract_hilbert.hpp" #include "Utils/exceptions.hpp" -#include "Utils/json_helper.hpp" #include "Utils/messages.hpp" #include "abstract_operator.hpp" diff --git a/Sources/Operator/graph_operator.hpp b/Sources/Operator/graph_operator.hpp index c882feb45a..15beb870b6 100644 --- a/Sources/Operator/graph_operator.hpp +++ b/Sources/Operator/graph_operator.hpp @@ -21,7 +21,6 @@ #include <vector> #include "Graph/graph.hpp" #include "Hilbert/abstract_hilbert.hpp" -#include "Utils/json_helper.hpp" #include "abstract_operator.hpp" #include "local_operator.hpp" diff --git a/Sources/Output/json_output_writer.hpp b/Sources/Output/json_output_writer.hpp index 41f02493b2..369536e3e3 100644 --- a/Sources/Output/json_output_writer.hpp +++ b/Sources/Output/json_output_writer.hpp @@ -4,16 +4,13 @@ #include <cassert> #include <fstream> +#include <mpi.h> #include <nonstd/optional.hpp> #include "Machine/abstract_machine.hpp" #include "Stats/obs_manager.hpp" -#include "Utils/json_dumps.hpp" -#include "Utils/json_helper.hpp" - -#ifndef NDEBUG -#include <mpi.h> -#endif +#include "Utils/json_utils.hpp" +#include "Utils/messages.hpp" namespace netket { diff --git a/Sources/Utils/array_hasher.hpp b/Sources/Utils/array_hasher.hpp index f337e4281d..950fe23a20 100644 --- a/Sources/Utils/array_hasher.hpp +++ b/Sources/Utils/array_hasher.hpp @@ -15,11 +15,13 @@ #ifndef NETKET_ARRAYHASHER_HPP #define NETKET_ARRAYHASHER_HPP +#include <array> + namespace netket { // Special hash functor for the EdgeColors unordered_map // Same as hash_combine from boost struct ArrayHasher { - std::size_t operator()(const std::array<int, 2>& a) const { + std::size_t operator()(const std::array<int, 2>& a) const noexcept { return *reinterpret_cast<std::size_t const*>(a.data()); } }; diff --git a/Sources/Utils/array_search.hpp b/Sources/Utils/array_search.hpp index 7c62cc9195..d8d17d459b 100644 --- a/Sources/Utils/array_search.hpp +++ b/Sources/Utils/array_search.hpp @@ -15,6 +15,8 @@ #ifndef NETKET_ARRAYSEARCH_HPP #define NETKET_ARRAYSEARCH_HPP +#include "Utils/exceptions.hpp" + namespace netket { /** diff --git a/Sources/Utils/array_utils.hpp b/Sources/Utils/array_utils.hpp index e424a9b233..cb791bd8fe 100644 --- a/Sources/Utils/array_utils.hpp +++ b/Sources/Utils/array_utils.hpp @@ -7,12 +7,13 @@ #include <algorithm> #include <complex> +#include <vector> namespace netket { template <typename T> std::vector<std::vector<T>> transpose_vecvec( - const std::vector<std::vector<T>> data) { + const std::vector<std::vector<T>>& data) { typedef typename std::vector<T>::size_type size_type; // this assumes that all inner vectors have the same size and diff --git a/Sources/Utils/json_dumps.hpp b/Sources/Utils/json_dumps.hpp deleted file mode 100644 index 4334a85b75..0000000000 --- a/Sources/Utils/json_dumps.hpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef NETKET_JSON_DUMPS_HPP -#define NETKET_JSON_DUMPS_HPP - -#include <Eigen/Dense> -#include <complex> -#include <iostream> -#include <json.hpp> -#include <vector> - -#include "exceptions.hpp" - -namespace Eigen { - -template <class T> -void to_json(nlohmann::json &js, const Matrix<T, Eigen::Dynamic, 1> &v) { - std::vector<T> temp(v.size()); - for (std::size_t i = 0; i < std::size_t(v.size()); i++) { - temp[i] = v(i); - } - js = nlohmann::json(temp); -} - -template <class T> -void from_json(const nlohmann::json &js, Matrix<T, Eigen::Dynamic, 1> &v) { - std::vector<T> temp = js.get<std::vector<T>>(); - v.resize(temp.size()); - for (std::size_t i = 0; i < temp.size(); i++) { - v(i) = temp[i]; - } -} - -template <class T> -void to_json(nlohmann::json &js, - const Matrix<T, Eigen::Dynamic, Eigen::Dynamic> &v) { - std::vector<std::vector<T>> temp(v.rows()); - for (std::size_t i = 0; i < std::size_t(v.rows()); i++) { - temp[i].resize(v.cols()); - for (std::size_t j = 0; j < std::size_t(v.cols()); j++) { - temp[i][j] = v(i, j); - } - } - js = nlohmann::json(temp); -} - -template <class T> -void from_json(const nlohmann::json &js, - Matrix<T, Eigen::Dynamic, Eigen::Dynamic> &v) { - std::vector<std::vector<T>> temp = js.get<std::vector<std::vector<T>>>(); - - if (temp[0].size() == 0) { - throw netket::InvalidInputError( - "Error while loading Eigen Matrix from Json"); - } - - v.resize(temp.size(), temp[0].size()); - for (std::size_t i = 0; i < temp.size(); i++) { - for (std::size_t j = 0; j < temp[i].size(); j++) { - if (temp[i].size() != temp[0].size()) { - throw netket::InvalidInputError( - "Error while loading Eigen Matrix from Json"); - } - v(i, j) = temp[i][j]; - } - } -} - -} // namespace Eigen - -namespace std { - -inline void to_json(nlohmann::json &js, const std::complex<double> &p) { - js = nlohmann::json{p.real(), p.imag()}; -} - -inline void from_json(const nlohmann::json &js, std::complex<double> &p) { - if (js.is_array()) { - p = std::complex<double>(js[0].get<double>(), js[1].get<double>()); - } else { - p = std::complex<double>(js.get<double>(), 0.); - } -} - -} // namespace std - -#endif diff --git a/Sources/Utils/json_helper.hpp b/Sources/Utils/json_helper.hpp deleted file mode 100644 index 12ec1cec84..0000000000 --- a/Sources/Utils/json_helper.hpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef NETKET_JSONHELPER_HPP -#define NETKET_JSONHELPER_HPP - -#include <fstream> -#include <iostream> -#include <json.hpp> -#include <string> -#include <vector> - -#include "exceptions.hpp" - -namespace netket { - -using json = nlohmann::json; - -inline bool FieldExists(const json& pars, const std::string& field) { - return pars.count(field) > 0; -} - -/** - * Checks whether @param field exists in @param pars and throws an - * InvalidInputError if not. - * @param context is used in the error message to help users locate the location - * of the error. - * - * Example usage: CheckFieldExists(pars["Key"], "SubKey", "Key"); - * If SubKey does not exists, this will throw and error with message - * "Field 'SubKey' (below 'Key') is not defined in the input". - */ - -inline void CheckFieldExists(const json& pars, const std::string& field, - const std::string& context = "") { - if (!FieldExists(pars, field)) { - std::stringstream s; - s << "Field '" << field << "' "; - if (context.size() > 0) { - s << "(below '" << context << "') "; - } - s << "is not defined in the input"; - throw InvalidInputError(s.str()); - } -} - -inline json FieldVal(const json& pars, const std::string& field, - const std::string& context = "") { - CheckFieldExists(pars, field, context); - return pars[field]; -} - -template <class Value> -json FieldVal(const json& pars, const std::string& field, - const std::string& context = "") { - CheckFieldExists(pars, field, context); - return pars[field].get<Value>(); -} - -inline void FieldArray(const json& pars, const std::string& field, - std::vector<int>& arr, const std::string& context = "") { - CheckFieldExists(pars, field, context); - arr.resize(pars[field].size()); - for (std::size_t i = 0; i < pars[field].size(); i++) { - arr[i] = pars[field][i]; - } -} - -template <class Value> -Value FieldOrDefaultVal(const json& pars, std::string field, Value defval) { - if (FieldExists(pars, field)) { - return pars[field]; - } else { - return defval; - } -} - -inline json ReadJsonFromFile(std::string filename) { - json pars; - - std::ifstream filein(filename); - if (filein.is_open()) { - filein >> pars; - } else { - std::stringstream s; - s << "Cannot read Json from file: " << filename; - throw InvalidInputError(s.str()); - } - return pars; -} - -} // namespace netket -#endif diff --git a/Sources/Utils/json_utils.cc b/Sources/Utils/json_utils.cc new file mode 100644 index 0000000000..157762b6c6 --- /dev/null +++ b/Sources/Utils/json_utils.cc @@ -0,0 +1,171 @@ +// Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Utils/json_utils.hpp" + +#include <complex> +#include <fstream> +#include <vector> + +#include <Eigen/Core> +#include <json.hpp> + +#include "exceptions.hpp" + +namespace nlohmann { +template <typename T> +void adl_serializer<std::complex<T>>::to_json(json &js, + const std::complex<T> &p) { + js = nlohmann::json{p.real(), p.imag()}; +} + +template <typename T> +void adl_serializer<std::complex<T> /**/>::from_json(const json &js, + std::complex<T> &p) { + if (js.is_array()) { + p = std::complex<T>(js[0].get<T>(), js[1].get<T>()); + } else { + p = std::complex<T>(js.get<T>(), 0.); + } +} + +template struct adl_serializer<std::complex<float>>; +template struct adl_serializer<std::complex<double>>; +template struct adl_serializer<std::complex<long double>>; +} // namespace nlohmann + +namespace Eigen { +template <class T> +void to_json(nlohmann::json &js, const Matrix<T, Eigen::Dynamic, 1> &v) { + std::vector<T> temp(v.size()); + for (std::size_t i = 0; i < std::size_t(v.size()); i++) { + temp[i] = v(i); + } + js = nlohmann::json(temp); +} + +template <class T> +void to_json(nlohmann::json &js, + const Matrix<T, Eigen::Dynamic, Eigen::Dynamic> &v) { + std::vector<std::vector<T>> temp(v.rows()); + for (std::size_t i = 0; i < std::size_t(v.rows()); i++) { + temp[i].resize(v.cols()); + for (std::size_t j = 0; j < std::size_t(v.cols()); j++) { + temp[i][j] = v(i, j); + } + } + js = nlohmann::json(temp); +} + +template <class T> +void from_json(const nlohmann::json &js, Matrix<T, Eigen::Dynamic, 1> &v) { + std::vector<T> temp = js.get<std::vector<T>>(); + v.resize(temp.size()); + for (std::size_t i = 0; i < temp.size(); i++) { + v(i) = temp[i]; + } +} + +template <class T> +void from_json(const nlohmann::json &js, + Matrix<T, Eigen::Dynamic, Eigen::Dynamic> &v) { + std::vector<std::vector<T>> temp = js.get<std::vector<std::vector<T>>>(); + + if (temp[0].size() == 0) { + throw netket::InvalidInputError( + "Error while loading Eigen Matrix from Json"); + } + + v.resize(temp.size(), temp[0].size()); + for (std::size_t i = 0; i < temp.size(); i++) { + for (std::size_t j = 0; j < temp[i].size(); j++) { + if (temp[i].size() != temp[0].size()) { + throw netket::InvalidInputError( + "Error while loading Eigen Matrix from Json"); + } + v(i, j) = temp[i][j]; + } + } +} + +template void to_json(nlohmann::json &, + const Matrix<double, Eigen::Dynamic, 1> &); +template void to_json(nlohmann::json &, + const Matrix<std::complex<double>, Eigen::Dynamic, 1> &); +template void to_json(nlohmann::json &, + const Matrix<double, Eigen::Dynamic, Eigen::Dynamic> &); +template void to_json( + nlohmann::json &, + const Matrix<std::complex<double>, Eigen::Dynamic, Eigen::Dynamic> &); + +template void from_json(const nlohmann::json &, + Matrix<double, Eigen::Dynamic, 1> &); +template void from_json(const nlohmann::json &, + Matrix<std::complex<double>, Eigen::Dynamic, 1> &); +template void from_json(const nlohmann::json &, + Matrix<double, Eigen::Dynamic, Eigen::Dynamic> &); +template void from_json( + const nlohmann::json &, + Matrix<std::complex<double>, Eigen::Dynamic, Eigen::Dynamic> &); +} // namespace Eigen + +namespace netket { + +bool FieldExists(const json &pars, const std::string &field) { + return pars.count(field) > 0; +} + +void CheckFieldExists(const json &pars, const std::string &field, + const std::string &context) { + if (!FieldExists(pars, field)) { + std::stringstream s; + s << "Field '" << field << "' "; + if (context.size() > 0) { + s << "(below '" << context << "') "; + } + s << "is not defined in the input"; + throw InvalidInputError(s.str()); + } +} + +json FieldVal(const json &pars, const std::string &field, + const std::string &context) { + CheckFieldExists(pars, field, context); + return pars[field]; +} + +void FieldArray(const json &pars, const std::string &field, + std::vector<int> &arr, const std::string &context) { + CheckFieldExists(pars, field, context); + arr.resize(pars[field].size()); + for (std::size_t i = 0; i < pars[field].size(); i++) { + arr[i] = pars[field][i]; + } +} + +json ReadJsonFromFile(std::string const &filename) { + json pars; + + std::ifstream filein(filename); + if (filein.is_open()) { + filein >> pars; + } else { + std::stringstream s; + s << "Cannot read Json from file: " << filename; + throw InvalidInputError(s.str()); + } + return pars; +} + +} // namespace netket diff --git a/Sources/Utils/json_utils.hpp b/Sources/Utils/json_utils.hpp index 189d9e8e3b..ab01f0fd9a 100644 --- a/Sources/Utils/json_utils.hpp +++ b/Sources/Utils/json_utils.hpp @@ -15,8 +15,67 @@ #ifndef NETKET_JSONUTILS_HPP #define NETKET_JSONUTILS_HPP -#include <json.hpp> -#include "json_dumps.hpp" -#include "json_helper.hpp" +#include <complex> -#endif +#include <Eigen/Core> +#include <nlohmann/json.hpp> + +namespace nlohmann { +template <typename T> +struct adl_serializer<std::complex<T>> { + static void to_json(json& js, const std::complex<T>& p); + static void from_json(const json& js, std::complex<T>& p); +}; +} // namespace nlohmann + +namespace Eigen { +template <class T> +void to_json(nlohmann::json& js, const Matrix<T, Eigen::Dynamic, 1>& v); +template <class T> +void to_json(nlohmann::json& js, + const Matrix<T, Eigen::Dynamic, Eigen::Dynamic>& v); + +template <class T> +void from_json(const nlohmann::json& js, Matrix<T, Eigen::Dynamic, 1>& v); +template <class T> +void from_json(const nlohmann::json& js, + Matrix<T, Eigen::Dynamic, Eigen::Dynamic>& v); +} // namespace Eigen + +namespace netket { + +using json = nlohmann::json; + +bool FieldExists(const json& pars, const std::string& field); + +void CheckFieldExists(const json& pars, const std::string& field, + const std::string& context = ""); + +json FieldVal(const json& pars, const std::string& field, + const std::string& context = ""); + +void FieldArray(const json& pars, const std::string& field, + std::vector<int>& arr, const std::string& context = ""); + +json ReadJsonFromFile(std::string filename); + +template <class Value, class JSON> +Value FieldVal(const JSON& pars, const std::string& field, + const std::string& context = "") { + CheckFieldExists(pars, field, context); + return pars[field].template get<Value>(); +} + +template <class Value, class JSON> +Value FieldOrDefaultVal(const JSON& pars, const std::string& field, + Value defval) { + if (FieldExists(pars, field)) { + return pars[field]; + } else { + return defval; + } +} + +} // namespace netket + +#endif // NETKET_JSONUTILS_HPP diff --git a/Sources/Utils/messages.hpp b/Sources/Utils/messages.hpp index 5e75d2b56a..513ca7f9f8 100644 --- a/Sources/Utils/messages.hpp +++ b/Sources/Utils/messages.hpp @@ -16,7 +16,7 @@ #define NETKET_MESSAGES_HPP #include <mpi.h> -#include <ostream> +#include <iostream> #include <streambuf> #include <string> diff --git a/Sources/Utils/mpi_interface.cc b/Sources/Utils/mpi_interface.cc index 1e4358f7f3..83ec7a0175 100644 --- a/Sources/Utils/mpi_interface.cc +++ b/Sources/Utils/mpi_interface.cc @@ -46,7 +46,14 @@ std::unique_ptr<void, Unload> TryPreload(void) { return {handle, Unload{}}; } -} // namespace + +struct MPIInitializer { + MPIInitializer(); + ~MPIInitializer(); + + private: + bool have_initialized_; +}; MPIInitializer::MPIInitializer() { int already_initialized; @@ -78,6 +85,9 @@ MPIInitializer::~MPIInitializer() { #endif } } +} // namespace + +static MPIInitializer initiaze_mpi_when_loading_the_module{}; } // namespace detail } // namespace netket diff --git a/Sources/Utils/mpi_interface.hpp b/Sources/Utils/mpi_interface.hpp index ef1801e59c..5c0e66e345 100644 --- a/Sources/Utils/mpi_interface.hpp +++ b/Sources/Utils/mpi_interface.hpp @@ -197,16 +197,6 @@ struct MPIHelpers { } }; -namespace detail { -struct MPIInitializer { - MPIInitializer(); - ~MPIInitializer(); - - private: - bool have_initialized_; -}; -} // namespace detail - } // namespace netket #endif diff --git a/Sources/Utils/py_utils.cc b/Sources/Utils/py_utils.cc new file mode 100644 index 0000000000..aa98a6d84a --- /dev/null +++ b/Sources/Utils/py_utils.cc @@ -0,0 +1,44 @@ +// Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "py_utils.hpp" + +#include "Utils/all_utils.hpp" + +namespace py = pybind11; + +namespace netket { + +void AddUtilsModule(py::module m) { + auto subm = m.def_submodule("utils"); + + py::class_<netket::default_random_engine>(subm, "RandomEngine") + .def(py::init<netket::default_random_engine::result_type>(), + py::arg("seed") = netket::default_random_engine::default_seed) + .def("seed", static_cast<void (netket::default_random_engine::*)( + netket::default_random_engine::result_type)>( + &netket::default_random_engine::seed)); + + py::class_<Lookup<double>>(m, "LookupReal").def(py::init<>()); + + py::class_<Lookup<Complex>>(m, "LookupComplex").def(py::init<>()); + + py::class_<MPIHelpers>(m, "MPI") + .def("rank", &MPIHelpers::MPIRank, + R"EOF(int: The MPI rank for the current process. )EOF") + .def("size", &MPIHelpers::MPISize, + R"EOF(int: The total number of MPI ranks currently active. )EOF"); +} + +} // namespace netket diff --git a/Sources/Utils/py_utils.hpp b/Sources/Utils/py_utils.hpp index 28ec1bee18..90e541c808 100644 --- a/Sources/Utils/py_utils.hpp +++ b/Sources/Utils/py_utils.hpp @@ -15,42 +15,12 @@ #ifndef NETKET_PYUTILS_HPP #define NETKET_PYUTILS_HPP -#include <mpi.h> -#include <pybind11/complex.h> -#include <pybind11/eigen.h> #include <pybind11/pybind11.h> -#include <pybind11/stl.h> -#include <pybind11/stl_bind.h> -#include <complex> -#include <vector> -#include "all_utils.hpp" - -namespace py = pybind11; namespace netket { -void AddUtilsModule(py::module &m) { - auto subm = m.def_submodule("utils"); - - py::class_<netket::default_random_engine>(subm, "RandomEngine") - .def(py::init<netket::default_random_engine::result_type>(), - py::arg("seed") = netket::default_random_engine::default_seed) - .def("seed", (void (netket::default_random_engine::*)( - netket::default_random_engine::result_type)) & - netket::default_random_engine::seed); - - py::class_<Lookup<double>>(m, "LookupReal").def(py::init<>()); - - py::class_<Lookup<Complex>>(m, "LookupComplex") - .def(py::init<>()); - - py::class_<MPIHelpers>(m, "MPI") - .def("rank", &MPIHelpers::MPIRank, - R"EOF(int: The MPI rank for the current process. )EOF") - .def("size", &MPIHelpers::MPISize, - R"EOF(int: The total number of MPI ranks currently active. )EOF"); -} +void AddUtilsModule(pybind11::module m); } // namespace netket -#endif +#endif // NETKET_PYUTILS_HPP diff --git a/Sources/pynetket.cc b/Sources/pynetket.cc index 98d2ccd0c4..d3664cb8cf 100644 --- a/Sources/pynetket.cc +++ b/Sources/pynetket.cc @@ -34,10 +34,6 @@ namespace netket { -namespace detail { -static MPIInitializer _do_not_use_me_dummy_{}; -} // namespace detail - PYBIND11_MODULE(_C_netket, m) { AddDynamicsModule(m); AddGraphModule(m); diff --git a/netket/__init__.py b/netket/__init__.py index d5a0a19b20..0673826570 100644 --- a/netket/__init__.py +++ b/netket/__init__.py @@ -31,3 +31,4 @@ utils, variational, ) +from ._C_netket import MPI, LookupReal, LookupComplex
pennersr__django-allauth-106
Make it easier for people to set config variables when running the example application It could be easier to configure the example without needing to touch repo code.
[ { "content": "# Django settings for example project.\nimport os\n\nPROJECT_ROOT = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'example.db', # Or path to database file if using sqlite3.\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\nLOCALE_PATHS = ( os.path.join(PROJECT_ROOT, 'locale'), )\n\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = ''\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = ''\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 't8_)kj3v!au0!_i56#gre**mkg0&z1df%3bw(#5^#^5e_64!$_'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n \"allauth.account.auth_backends.AuthenticationBackend\",\n)\n\nROOT_URLCONF = 'example.urls'\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n\n \"allauth.account.context_processors.account\",\n \"allauth.socialaccount.context_processors.socialaccount\",\n)\n\nTEMPLATE_DIRS = (\n # allauth templates: you could copy this directory into your\n # project and tweak it according to your needs\n os.path.join(PROJECT_ROOT, 'templates', 'uniform', 'allauth'),\n # example project specific templates\n os.path.join(PROJECT_ROOT, 'templates', 'uniform', 'example')\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n\n 'uni_form',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n # 'allauth.socialaccount.providers.twitter',\n # 'allauth.socialaccount.providers.openid',\n # 'allauth.socialaccount.providers.facebook',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n", "path": "example/settings.py" } ]
[ { "content": "# Django settings for example project.\nimport os\n\nPROJECT_ROOT = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'example.db', # Or path to database file if using sqlite3.\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\nLOCALE_PATHS = ( os.path.join(PROJECT_ROOT, 'locale'), )\n\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = ''\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = ''\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 't8_)kj3v!au0!_i56#gre**mkg0&z1df%3bw(#5^#^5e_64!$_'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n \"allauth.account.auth_backends.AuthenticationBackend\",\n)\n\nROOT_URLCONF = 'example.urls'\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n\n \"allauth.account.context_processors.account\",\n \"allauth.socialaccount.context_processors.socialaccount\",\n)\n\nTEMPLATE_DIRS = (\n # allauth templates: you could copy this directory into your\n # project and tweak it according to your needs\n os.path.join(PROJECT_ROOT, 'templates', 'uniform', 'allauth'),\n # example project specific templates\n os.path.join(PROJECT_ROOT, 'templates', 'uniform', 'example')\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n\n 'uni_form',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n # 'allauth.socialaccount.providers.twitter',\n # 'allauth.socialaccount.providers.openid',\n # 'allauth.socialaccount.providers.facebook',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\ntry:\n from local_settings import *\nexcept ImportError:\n pass\n", "path": "example/settings.py" } ]
diff --git a/.gitignore b/.gitignore index 75c5d55afc..959cf43aae 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ docs/_build build dist django_allauth.egg-info +example/local_settings.py diff --git a/example/local_settings.example b/example/local_settings.example new file mode 100644 index 0000000000..072f9ce0f8 --- /dev/null +++ b/example/local_settings.example @@ -0,0 +1,78 @@ +# django-allauth Configuration variables you might like to change. +# +# DO NOT CHANGE THIS FILE. Instead, copy it to local_settings.py +# and make your changes there. + + +# Specifies the login method to use -- whether the user logs in by entering +# his username, e-mail address, or either one of both. Possible values +# are 'username' | 'email' | 'username_email' +# ACCOUNT_AUTHENTICATION_METHOD + +# The URL to redirect to after a successful e-mail confirmation, in case no +# user is logged in. Default value is settings.LOGIN_URL. +# ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL + +# The URL to redirect to after a successful e-mail confirmation, in case of +# an authenticated user. Default is settings.LOGIN_REDIRECT_URL +# ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL + +# Determines the expiration date of email confirmation mails (# of days). +# ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3 + +# The user is required to hand over an e-mail address when signing up. +# ACCOUNT_EMAIL_REQUIRED = False + +# After signing up, keep the user account inactive until the e-mail address +# is verified. +# ACCOUNT_EMAIL_VERIFICATION = False + +# Subject-line prefix to use for email messages sent. By default, the name +# of the current Site (django.contrib.sites) is used. +# ACCOUNT_EMAIL_SUBJECT_PREFIX = '[Site] ' + +# A string pointing to a custom form class (e.g. 'myapp.forms.SignupForm') +# that is used during signup to ask the user for additional input +# (e.g. newsletter signup, birth date). This class should implement a +# 'save' method, accepting the newly signed up user as its only parameter. +# ACCOUNT_SIGNUP_FORM_CLASS = None + +# When signing up, let the user type in his password twice to avoid typ-o's. +# ACCOUNT_SIGNUP_PASSWORD_VERIFICATION = True + +# Enforce uniqueness of e-mail addresses. +# ACCOUNT_UNIQUE_EMAIL = True + +# A callable (or string of the form 'some.module.callable_name') that takes +# a user as its only argument and returns the display name of the user. The +# default implementation returns user.username. +# ACCOUNT_USER_DISPLAY + +# The user is required to enter a username when signing up. Note that the +# user will be asked to do so even if ACCOUNT_AUTHENTICATION_METHOD is set +# to email. Set to False when you do not wish to prompt the user to enter a +# username. +# ACCOUNT_USERNAME_REQUIRED = True + +# render_value parameter as passed to PasswordInput fields. +# ACCOUNT_PASSWORD_INPUT_RENDER_VALUE = False + +# An integer specifying the minimum password length. +# ACCOUNT_PASSWORD_MIN_LENGTH = 6 + +# Request e-mail address from 3rd party account provider? E.g. using OpenID +# AX, or the Facebook 'email' permission. +# SOCIALACCOUNT_QUERY_EMAIL = ACCOUNT_EMAIL_REQUIRED + +# Attempt to bypass the signup form by using fields (e.g. username, email) +# retrieved from the social account provider. If a conflict arises due to a +# duplicate e-mail address the signup form will still kick in. +# SOCIALACCOUNT_AUTO_SIGNUP = True + +# Enable support for django-avatar. When enabled, the profile image of the +# user is copied locally into django-avatar at signup. Default is +# 'avatar' in settings.INSTALLED_APPS. +# SOCIALACCOUNT_AVATAR_SUPPORT + +# Dictionary containing provider specific settings. +# SOCIALACCOUNT_PROVIDERS diff --git a/example/settings.py b/example/settings.py index 49d34f341d..ee19cc0445 100644 --- a/example/settings.py +++ b/example/settings.py @@ -168,3 +168,8 @@ }, } } + +try: + from local_settings import * +except ImportError: + pass
open-telemetry__opentelemetry-python-1889
Run tests on Windows in CI
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# flask_example.py\nimport flask\nimport requests\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.flask import FlaskInstrumentor\nfrom opentelemetry.instrumentation.requests import RequestsInstrumentor\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n BatchSpanProcessor,\n ConsoleSpanExporter,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntrace.get_tracer_provider().add_span_processor(\n BatchSpanProcessor(ConsoleSpanExporter())\n)\n\napp = flask.Flask(__name__)\nFlaskInstrumentor().instrument_app(app)\nRequestsInstrumentor().instrument()\n\ntracer = trace.get_tracer(__name__)\n\n\[email protected](\"/\")\ndef hello():\n with tracer.start_as_current_span(\"example-request\"):\n requests.get(\"http://www.example.com\")\n return \"hello\"\n\n\napp.run(debug=True, port=5000)\n", "path": "docs/getting_started/flask_example.py" } ]
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# flask_example.py\nimport flask\nimport requests\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.flask import FlaskInstrumentor\nfrom opentelemetry.instrumentation.requests import RequestsInstrumentor\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n BatchSpanProcessor,\n ConsoleSpanExporter,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntrace.get_tracer_provider().add_span_processor(\n BatchSpanProcessor(ConsoleSpanExporter())\n)\n\napp = flask.Flask(__name__)\nFlaskInstrumentor().instrument_app(app)\nRequestsInstrumentor().instrument()\n\ntracer = trace.get_tracer(__name__)\n\n\[email protected](\"/\")\ndef hello():\n with tracer.start_as_current_span(\"example-request\"):\n requests.get(\"http://www.example.com\")\n return \"hello\"\n\n\napp.run(port=5000)\n", "path": "docs/getting_started/flask_example.py" } ]
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f66ad7dbbb4..de0c43cd8c3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -10,7 +10,7 @@ env: # Otherwise, set variable to the commit of your branch on # opentelemetry-python-contrib which is compatible with these Core repo # changes. - CONTRIB_REPO_SHA: 2a92e255f7595024242e45c0050c8f3de7140b6b + CONTRIB_REPO_SHA: dde62cebffe519c35875af6d06fae053b3be65ec jobs: build: @@ -20,7 +20,7 @@ jobs: py37: 3.7 py38: 3.8 py39: 3.9 - pypy3: pypy3 + pypy3: pypy-3.7 RUN_MATRIX_COMBINATION: ${{ matrix.python-version }}-${{ matrix.package }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: @@ -28,7 +28,7 @@ jobs: matrix: python-version: [ py36, py37, py38, py39, pypy3 ] package: ["instrumentation", "core", "exporter", "propagator"] - os: [ ubuntu-latest ] + os: [ ubuntu-20.04, windows-2019 ] steps: - name: Checkout Core Repo @ SHA - ${{ github.sha }} uses: actions/checkout@v2 @@ -42,14 +42,22 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ env[matrix.python-version] }} + architecture: 'x64' - name: Install tox run: pip install -U tox-factor - name: Cache tox environment # Preserves .tox directory between runs for faster installs uses: actions/cache@v2 with: - path: .tox - key: tox-cache-${{ env.RUN_MATRIX_COMBINATION }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }}-core + path: | + .tox + ~/.cache/pip + key: v2-tox-cache-${{ env.RUN_MATRIX_COMBINATION }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }}-core + # tox fails on windows and Python3.6 when tox dir is reused between builds so we remove it + - name: fix for windows + py3.6 + if: ${{ matrix.os == 'windows-2019' && matrix.python-version == 'py36' }} + shell: pwsh + run: Remove-Item .\.tox\ -Force -Recurse -ErrorAction Ignore - name: run tox run: tox -f ${{ matrix.python-version }}-${{ matrix.package }} -- --benchmark-json=${{ env.RUN_MATRIX_COMBINATION }}-benchmark.json - name: Find and merge benchmarks @@ -81,7 +89,7 @@ jobs: matrix: tox-environment: [ "docker-tests", "lint", "docs", "mypy", "mypyinstalled", "tracecontext" ] name: ${{ matrix.tox-environment }} - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Checkout Core Repo @ SHA - ${{ github.sha }} uses: actions/checkout@v2 @@ -95,14 +103,17 @@ jobs: uses: actions/setup-python@v2 with: python-version: 3.9 + architecture: 'x64' - name: Install tox run: pip install -U tox - name: Cache tox environment # Preserves .tox directory between runs for faster installs uses: actions/cache@v2 with: - path: .tox - key: tox-cache-${{ matrix.tox-environment }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }}-core + path: | + .tox + ~/.cache/pip + key: v2-tox-cache-${{ matrix.tox-environment }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }}-core - name: run tox run: tox -e ${{ matrix.tox-environment }} contrib-build: @@ -119,7 +130,7 @@ jobs: matrix: python-version: [ py36, py37, py38, py39, pypy3 ] package: ["instrumentation", "exporter"] - os: [ ubuntu-latest ] + os: [ ubuntu-20.04] steps: - name: Checkout Contrib Repo @ SHA - ${{ env.CONTRIB_REPO_SHA }} uses: actions/checkout@v2 @@ -135,14 +146,17 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ env[matrix.python-version] }} + architecture: 'x64' - name: Install tox run: pip install -U tox-factor - name: Cache tox environment # Preserves .tox directory between runs for faster installs uses: actions/cache@v2 with: - path: .tox - key: tox-cache-${{ matrix.python-version }}-${{ matrix.package }}-${{ matrix.os }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }}-contrib + path: | + .tox + ~/.cache/pip + key: v2-tox-cache-${{ matrix.python-version }}-${{ matrix.package }}-${{ matrix.os }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }}-contrib - name: run tox run: tox -f ${{ matrix.python-version }}-${{ matrix.package }} contrib-misc: @@ -151,7 +165,7 @@ jobs: matrix: tox-environment: [ "docker-tests"] name: ${{ matrix.tox-environment }} - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Checkout Contrib Repo @ SHA - ${{ env.CONTRIB_REPO_SHA }} uses: actions/checkout@v2 @@ -167,13 +181,16 @@ jobs: uses: actions/setup-python@v2 with: python-version: 3.9 + architecture: 'x64' - name: Install tox run: pip install -U tox - name: Cache tox environment # Preserves .tox directory between runs for faster installs uses: actions/cache@v2 with: - path: .tox - key: tox-cache-${{ matrix.tox-environment }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }}-contrib + path: | + .tox + ~/.cache/pip + key: v2-tox-cache-${{ matrix.tox-environment }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }}-contrib - name: run tox run: tox -e ${{ matrix.tox-environment }} diff --git a/docs/getting_started/flask_example.py b/docs/getting_started/flask_example.py index ddde3aa839f..64ed606c7f6 100644 --- a/docs/getting_started/flask_example.py +++ b/docs/getting_started/flask_example.py @@ -44,4 +44,4 @@ def hello(): return "hello" -app.run(debug=True, port=5000) +app.run(port=5000) diff --git a/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler/test_asyncio.py b/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler/test_asyncio.py index b4619b4ed80..64cfea933c4 100644 --- a/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler/test_asyncio.py +++ b/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler/test_asyncio.py @@ -129,9 +129,9 @@ async def do_task(): spans = self.tracer.finished_spans() self.assertEqual(len(spans), 3) - spans = sorted(spans, key=lambda x: x.start_time) parent_span = get_one_by_operation_name(spans, "parent") self.assertIsNotNone(parent_span) - self.assertIsChildOf(spans[1], parent_span) - self.assertIsNotChildOf(spans[2], parent_span) + spans = [span for span in spans if span != parent_span] + self.assertIsChildOf(spans[0], parent_span) + self.assertIsNotChildOf(spans[1], parent_span) diff --git a/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler/test_threads.py b/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler/test_threads.py index 4ab8b2a075e..36c0a8b8414 100644 --- a/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler/test_threads.py +++ b/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler/test_threads.py @@ -115,5 +115,7 @@ def test_bad_solution_to_set_parent(self): parent_span = get_one_by_operation_name(spans, "parent") self.assertIsNotNone(parent_span) - self.assertIsChildOf(spans[1], parent_span) - self.assertIsChildOf(spans[2], parent_span) + spans = [s for s in spans if s != parent_span] + self.assertEqual(len(spans), 2) + for span in spans: + self.assertIsChildOf(span, parent_span) diff --git a/tests/util/src/opentelemetry/test/test_base.py b/tests/util/src/opentelemetry/test/test_base.py index ad124bf445d..4945d1cb32d 100644 --- a/tests/util/src/opentelemetry/test/test_base.py +++ b/tests/util/src/opentelemetry/test/test_base.py @@ -46,6 +46,11 @@ def tearDownClass(cls): def setUp(self): self.memory_exporter.clear() + def get_finished_spans(self): + return FinishedTestSpans( + self, self.memory_exporter.get_finished_spans() + ) + def assertEqualSpanInstrumentationInfo(self, span, module): self.assertEqual(span.instrumentation_info.name, module.__name__) self.assertEqual(span.instrumentation_info.version, module.__version__) @@ -56,6 +61,12 @@ def assertSpanHasAttributes(self, span, attributes): self.assertEqual(val, span.attributes[key]) def sorted_spans(self, spans): # pylint: disable=R0201 + """ + Sorts spans by span creation time. + + Note: This method should not be used to sort spans in a deterministic way as the + order depends on timing precision provided by the platform. + """ return sorted( spans, key=lambda s: s._start_time, # pylint: disable=W0212 @@ -91,3 +102,23 @@ def disable_logging(highest_level=logging.CRITICAL): yield finally: logging.disable(logging.NOTSET) + + +class FinishedTestSpans(list): + def __init__(self, test, spans): + super().__init__(spans) + self.test = test + + def by_name(self, name): + for span in self: + if span.name == name: + return span + self.test.fail("Did not find span with name {}".format(name)) + return None + + def by_attr(self, key, value): + for span in self: + if span.attributes.get(key) == value: + return span + self.test.fail("Did not find span with attrs {}={}".format(key, value)) + return None
MongoEngine__mongoengine-2424
When will be new release? When are you planning a new release? Some features has been added since last one in May. Thanks!
[ { "content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 20, 0)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py" } ]
[ { "content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 21, 0)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py" } ]
diff --git a/docs/changelog.rst b/docs/changelog.rst index 1ef42232a..247bd8ef2 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -6,18 +6,23 @@ Changelog Development =========== - (Fill this out as you fix issues and develop your features). + +Changes in 0.21.0 +================= - Bug fix in DynamicDocument which is not parsing known fields in constructor like Document do #2412 - When using pymongo >= 3.7, make use of Collection.count_documents instead of Collection.count and Cursor.count that got deprecated in pymongo >= 3.7. This should have a negative impact on performance of count see Issue #2219 - Fix a bug that made the queryset drop the read_preference after clone(). - Remove Py3.5 from CI as it reached EOL and add Python 3.9 -- Fix some issues related with db_field conflict in constructor #2414 -- Fix the behavior of Doc.objects.limit(0) which should return all documents (similar to mongodb) #2311 +- Fix some issues related with db_field/field conflict in constructor #2414 +- BREAKING CHANGE: Fix the behavior of Doc.objects.limit(0) which should return all documents (similar to mongodb) #2311 - Bug fix in ListField when updating the first item, it was saving the whole list, instead of - just replacing the first item (as it's usually done) #2392 + just replacing the first item (as usually done when updating 1 item of the list) #2392 - Add EnumField: ``mongoengine.fields.EnumField`` - Refactoring - Remove useless code related to Document.__only_fields and Queryset.only_fields +- Fix query transformation regarding special operators #2365 +- Bug Fix: Document.save() fails when shard_key is not _id #2154 Changes in 0.20.0 ================= diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py index dbd88a683..d0a0f1f4a 100644 --- a/mongoengine/__init__.py +++ b/mongoengine/__init__.py @@ -28,7 +28,7 @@ ) -VERSION = (0, 20, 0) +VERSION = (0, 21, 0) def get_version():
aws-cloudformation__cfn-lint-2249
E3002 Invalid Property Lambda/Properties/EphemeralStorage *cfn-lint version: (`cfn-lint --version`)* `cfn-lint 0.58.4` *Description of issue.* Looks like it does not yet recognize `EphemeralStorage` as a valid property for lambdas ```yml Lambda: Type: AWS::Lambda::Function Properties: Role: !GetAtt Role.Arn Timeout: 600 MemorySize: 2048 EphemeralStorage: Size: 1024 ``` Link to the [**docs**](https://aws.amazon.com/blogs/compute/using-larger-ephemeral-storage-for-aws-lambda/) where it shows the new feature Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)
[ { "content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\n__version__ = '0.58.4'\n", "path": "src/cfnlint/version.py" } ]
[ { "content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\n__version__ = '0.59.0'\n", "path": "src/cfnlint/version.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index a48a7a412c..2e2d92c56c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +### v0.59.0 +###### Features +- Update `aws-sam-translator` to `1.45.0` (pull #[2245](https://github.com/aws-cloudformation/cfn-lint/pull/2245)) +- Remove dependency on `six` (pull #[2204](https://github.com/aws-cloudformation/cfn-lint/pull/2204)) +- New rule [E3504](https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/rules.md#E3504) to validate resources with `AWS::Backup::BackupPlan`. The property `DeleteAfterDays` cannot be less than 90 days from `MoveToColdStorageAfterDays` (pull #[2230](https://github.com/aws-cloudformation/cfn-lint/pull/2230)) +###### CloudFormation Specifications +- Update CloudFormation specs to `66.0.0` (pull #[2245](https://github.com/aws-cloudformation/cfn-lint/pull/2245)) + ### v0.58.4 ###### CloudFormation Specifications - Update CloudFormation specs to `61.0.0` (pull #[2232](https://github.com/aws-cloudformation/cfn-lint/pull/2232)) diff --git a/README.md b/README.md index 8789975fb2..eb7f7f2a44 100644 --- a/README.md +++ b/README.md @@ -301,7 +301,7 @@ If you'd like cfn-lint to be run automatically when making changes to files in y ```yaml repos: - repo: https://github.com/aws-cloudformation/cfn-python-lint - rev: v0.58.4 # The version of cfn-lint to use + rev: v0.59.0 # The version of cfn-lint to use hooks: - id: cfn-python-lint files: path/to/cfn/dir/.*\.(json|yml|yaml)$ diff --git a/src/cfnlint/version.py b/src/cfnlint/version.py index dd8696c2bb..309bd5896e 100644 --- a/src/cfnlint/version.py +++ b/src/cfnlint/version.py @@ -3,4 +3,4 @@ SPDX-License-Identifier: MIT-0 """ -__version__ = '0.58.4' +__version__ = '0.59.0'
aws-cloudformation__cfn-lint-1456
AWS::AutoScaling::AutoScalingGroup MaxInstanceLifetime Validation *cfn-lint version: 0.29.2* *Description of issue.* When using the parameter `MaxInstanceLifetime` for `AWS::AutoScaling::AutoScalingGroup` we are hit with the following lint error: ``` $ cfn-lint templates/proj/rgs/rgs_autoscale_stretch_elb.yml E3002 Invalid Property Resources/autoscalegroup/Properties/MaxInstanceLifetime templates/proj/rgs/rgs_autoscale_stretch_elb.yml:194:7 ``` The template which leads to the error: ``` [...] autoscalegroup: Type: AWS::AutoScaling::AutoScalingGroup Properties: AvailabilityZones: !Ref AvailabilityZones Cooldown: '300' HealthCheckGracePeriod: !Ref GracePeriod HealthCheckType: ELB MaxSize: !Ref MaxSize MinSize: !Ref MinSize MaxInstanceLifetime: !Ref MaxInstanceLifetime VPCZoneIdentifier: !Ref EC2SubnetIDs TargetGroupARNs: - !Ref elbtargetgroup LaunchConfigurationName: !Ref launchconfiguration Tags: [...] PropagateAtLaunch: true TerminationPolicies: - Default [..] ``` It seems the parameter is currently not supported by cfn-lint, would be cool to see support for it.
[ { "content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\n__version__ = '0.29.3'\n", "path": "src/cfnlint/version.py" } ]
[ { "content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\n__version__ = '0.29.4'\n", "path": "src/cfnlint/version.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 63c3b1db44..ebb0c692b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +### v0.29.4 +###### Features +- Add `--build-graph` parameter to create a graph of dependencies (pull #[1411](https://github.com/aws-cloudformation/cfn-python-lint/pull/1411)) +###### CloudFormation Specifications +- Update CloudFormation specs to 12.1.0 (pull #[1455](https://github.com/aws-cloudformation/cfn-python-lint/pull/1455)) +###### Fixes +- Add `found unknown escape character` to start of err problem to determine when to use json parsing (pull #[1454](https://github.com/aws-cloudformation/cfn-python-lint/pull/1454)) + ### v0.29.3 ###### CloudFormation Specifications - Update CloudFormation specs to 12.0.0 (pull #[1448](https://github.com/aws-cloudformation/cfn-python-lint/pull/1448)) diff --git a/README.md b/README.md index 8cdd2eef48..91d4351ee7 100644 --- a/README.md +++ b/README.md @@ -243,7 +243,7 @@ If you'd like cfn-lint to be run automatically when making changes to files in y ```yaml repos: - repo: https://github.com/aws-cloudformation/cfn-python-lint - rev: v0.29.3 # The version of cfn-lint to use + rev: v0.29.4 # The version of cfn-lint to use hooks: - id: cfn-python-lint files: path/to/cfn/dir/.*\.(json|yml|yaml)$ diff --git a/src/cfnlint/version.py b/src/cfnlint/version.py index b2b9e6a353..161b9e86bc 100644 --- a/src/cfnlint/version.py +++ b/src/cfnlint/version.py @@ -3,4 +3,4 @@ SPDX-License-Identifier: MIT-0 """ -__version__ = '0.29.3' +__version__ = '0.29.4'
mlflow__mlflow-2797
[SETUP-BUG] ResolvePackageNotFound - python=3.5.2 Thank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md) for information on what types of issues we address. Please fill in this template and do not delete it unless you are sure your issue is outside its scope. ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 16.04 - **MLflow installed from (source or binary)**: binary (pip install mlflow) - **MLflow version (run ``mlflow --version``)**: 1.2.0 - **Python version**: 3.5.2 - **Exact command to reproduce**: mlflow models build-docker -m /path/to/model -n "my-model" ### Describe the problem mlflow models build-docker -m /path/to/model -n "my-model" ### Other info / logs Warning: you have pip-installed dependencies in your environment file, but you do not list pip itself as one of your condadependencies. Conda may not use the correct pip to install your packages, and they may end up in the wrong place. Pleaseadd an explicit pip dependency. I'm adding one for you, but still nagging you. Collecting package metadata (repodata.json): ...working... done Solving environment: ...working... failed ResolvePackageNotFound: - python=3.5.2 Traceback (most recent call last): File "<string>", line 1, in <module> File "/miniconda/lib/python3.7/site-packages/mlflow/models/container/__init__.py", line 102, in _install_pyfunc_deps raise Exception("Failed to create model environment.") Exception: Failed to create model environment. creating and activating custom environment The command '/bin/sh -c python -c 'from mlflow.models.container import _install_pyfunc_deps; _install_pyfunc_deps("/opt/ml/model", install_mlflow=False)'' returned a non-zero code: 1
[ { "content": "import yaml\n\nfrom mlflow.utils import PYTHON_VERSION\n\n_conda_header = \"\"\"\\\nname: mlflow-env\nchannels:\n - defaults\n\"\"\"\n\n\ndef _mlflow_conda_env(path=None, additional_conda_deps=None, additional_pip_deps=None,\n additional_conda_channels=None, install_mlflow=True):\n \"\"\"\n Creates a Conda environment with the specified package channels and dependencies. If there are\n any pip dependencies, including from the install_mlflow parameter, then pip will be added to\n the conda dependencies. This is done to ensure that the pip inside the conda environment is\n used to install the pip dependencies.\n\n :param path: Local filesystem path where the conda env file is to be written. If unspecified,\n the conda env will not be written to the filesystem; it will still be returned\n in dictionary format.\n :param additional_conda_deps: List of additional conda dependencies passed as strings.\n :param additional_pip_deps: List of additional pip dependencies passed as strings.\n :param additional_conda_channels: List of additional conda channels to search when resolving\n packages.\n :return: ``None`` if ``path`` is specified. Otherwise, the a dictionary representation of the\n Conda environment.\n \"\"\"\n pip_deps = ([\"mlflow\"] if install_mlflow else []) + (\n additional_pip_deps if additional_pip_deps else [])\n conda_deps = (additional_conda_deps if additional_conda_deps else []) + (\n [\"pip\"] if pip_deps else [])\n\n env = yaml.safe_load(_conda_header)\n env[\"dependencies\"] = [\"python={}\".format(PYTHON_VERSION)]\n if conda_deps is not None:\n env[\"dependencies\"] += conda_deps\n env[\"dependencies\"].append({\"pip\": pip_deps})\n if additional_conda_channels is not None:\n env[\"channels\"] += additional_conda_channels\n\n if path is not None:\n with open(path, \"w\") as out:\n yaml.safe_dump(env, stream=out, default_flow_style=False)\n return None\n else:\n return env\n", "path": "mlflow/utils/environment.py" } ]
[ { "content": "import yaml\n\nfrom mlflow.utils import PYTHON_VERSION\n\n_conda_header = \"\"\"\\\nname: mlflow-env\nchannels:\n - defaults\n - conda-forge\n\"\"\"\n\n\ndef _mlflow_conda_env(path=None, additional_conda_deps=None, additional_pip_deps=None,\n additional_conda_channels=None, install_mlflow=True):\n \"\"\"\n Creates a Conda environment with the specified package channels and dependencies. If there are\n any pip dependencies, including from the install_mlflow parameter, then pip will be added to\n the conda dependencies. This is done to ensure that the pip inside the conda environment is\n used to install the pip dependencies.\n\n :param path: Local filesystem path where the conda env file is to be written. If unspecified,\n the conda env will not be written to the filesystem; it will still be returned\n in dictionary format.\n :param additional_conda_deps: List of additional conda dependencies passed as strings.\n :param additional_pip_deps: List of additional pip dependencies passed as strings.\n :param additional_conda_channels: List of additional conda channels to search when resolving\n packages.\n :return: ``None`` if ``path`` is specified. Otherwise, the a dictionary representation of the\n Conda environment.\n \"\"\"\n pip_deps = ([\"mlflow\"] if install_mlflow else []) + (\n additional_pip_deps if additional_pip_deps else [])\n conda_deps = (additional_conda_deps if additional_conda_deps else []) + (\n [\"pip\"] if pip_deps else [])\n\n env = yaml.safe_load(_conda_header)\n env[\"dependencies\"] = [\"python={}\".format(PYTHON_VERSION)]\n if conda_deps is not None:\n env[\"dependencies\"] += conda_deps\n env[\"dependencies\"].append({\"pip\": pip_deps})\n if additional_conda_channels is not None:\n env[\"channels\"] += additional_conda_channels\n\n if path is not None:\n with open(path, \"w\") as out:\n yaml.safe_dump(env, stream=out, default_flow_style=False)\n return None\n else:\n return env\n", "path": "mlflow/utils/environment.py" } ]
diff --git a/mlflow/utils/environment.py b/mlflow/utils/environment.py index ea67cf5ae1622..439c728201901 100644 --- a/mlflow/utils/environment.py +++ b/mlflow/utils/environment.py @@ -6,6 +6,7 @@ name: mlflow-env channels: - defaults + - conda-forge """
googleapis__google-api-python-client-1864
Stop using external 'mock' dependency As of Python 3.4, 'mock' is included in the standard library under the unittest module, and since the lowest supported version of Python is greater than that, we can remove the external dependency.
[ { "content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\n\nimport nox\n\nBLACK_VERSION = \"black==22.3.0\"\nISORT_VERSION = \"isort==5.10.1\"\nBLACK_PATHS = [\n \"apiclient\",\n \"googleapiclient\",\n \"scripts\",\n \"tests\",\n \"describe.py\",\n \"expandsymlinks.py\",\n \"noxfile.py\",\n \"owlbot.py\",\n \"setup.py\",\n]\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=\"3.8\")\ndef format(session):\n \"\"\"\n Run isort to sort imports. Then run black\n to format code to uniform standard.\n \"\"\"\n session.install(BLACK_VERSION, ISORT_VERSION)\n # Use the --fss option to sort imports using strict alphabetical order.\n # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections\n session.run(\n \"isort\",\n \"--fss\",\n *BLACK_PATHS,\n )\n session.run(\n \"black\",\n *BLACK_PATHS,\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n # Clean up dist and build folders\n shutil.rmtree(\"dist\", ignore_errors=True)\n shutil.rmtree(\"build\", ignore_errors=True)\n\n session.install(*test_dependencies)\n session.install(oauth2client)\n\n # Create and install wheels\n session.run(\"python3\", \"setup.py\", \"bdist_wheel\")\n session.install(os.path.join(\"dist\", os.listdir(\"dist\").pop()))\n\n # Run tests from a different directory to test the package artifacts\n root_dir = os.path.dirname(os.path.realpath(__file__))\n temp_dir = session.create_tmp()\n session.chdir(temp_dir)\n shutil.copytree(os.path.join(root_dir, \"tests\"), \"tests\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n\n\[email protected](python=[\"3.9\"])\ndef scripts(session):\n session.install(*test_dependencies)\n session.install(\"-e\", \".\")\n session.install(\"-r\", \"scripts/requirements.txt\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=scripts\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=91\",\n \"scripts\",\n *session.posargs,\n )\n", "path": "noxfile.py" } ]
[ { "content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\n\nimport nox\n\nBLACK_VERSION = \"black==22.3.0\"\nISORT_VERSION = \"isort==5.10.1\"\nBLACK_PATHS = [\n \"apiclient\",\n \"googleapiclient\",\n \"scripts\",\n \"tests\",\n \"describe.py\",\n \"expandsymlinks.py\",\n \"noxfile.py\",\n \"owlbot.py\",\n \"setup.py\",\n]\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=\"3.8\")\ndef format(session):\n \"\"\"\n Run isort to sort imports. Then run black\n to format code to uniform standard.\n \"\"\"\n session.install(BLACK_VERSION, ISORT_VERSION)\n # Use the --fss option to sort imports using strict alphabetical order.\n # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections\n session.run(\n \"isort\",\n \"--fss\",\n *BLACK_PATHS,\n )\n session.run(\n \"black\",\n *BLACK_PATHS,\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n # Clean up dist and build folders\n shutil.rmtree(\"dist\", ignore_errors=True)\n shutil.rmtree(\"build\", ignore_errors=True)\n\n session.install(*test_dependencies)\n session.install(oauth2client)\n\n # Create and install wheels\n session.run(\"python3\", \"setup.py\", \"bdist_wheel\")\n session.install(os.path.join(\"dist\", os.listdir(\"dist\").pop()))\n\n # Run tests from a different directory to test the package artifacts\n root_dir = os.path.dirname(os.path.realpath(__file__))\n temp_dir = session.create_tmp()\n session.chdir(temp_dir)\n shutil.copytree(os.path.join(root_dir, \"tests\"), \"tests\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n\n\[email protected](python=[\"3.9\"])\ndef scripts(session):\n session.install(*test_dependencies)\n session.install(\"-e\", \".\")\n session.install(\"-r\", \"scripts/requirements.txt\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=scripts\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=91\",\n \"scripts\",\n *session.posargs,\n )\n", "path": "noxfile.py" } ]
diff --git a/noxfile.py b/noxfile.py index 2e2e03abc3a..fda6416526f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -42,7 +42,6 @@ "pytest-cov", "webtest", "coverage", - "mock", ] diff --git a/tests/test__auth.py b/tests/test__auth.py index 6809647b785..8b6b5624032 100644 --- a/tests/test__auth.py +++ b/tests/test__auth.py @@ -13,11 +13,11 @@ # limitations under the License. import unittest +from unittest import mock import google.auth.credentials import google_auth_httplib2 import httplib2 -import mock import oauth2client.client from googleapiclient import _auth diff --git a/tests/test__helpers.py b/tests/test__helpers.py index 51e5c595260..56df6f88079 100644 --- a/tests/test__helpers.py +++ b/tests/test__helpers.py @@ -15,10 +15,9 @@ """Unit tests for googleapiclient._helpers.""" import unittest +from unittest import mock import urllib -import mock - from googleapiclient import _helpers diff --git a/tests/test_discovery.py b/tests/test_discovery.py index 36a0d524f26..8f4bdec1f03 100644 --- a/tests/test_discovery.py +++ b/tests/test_discovery.py @@ -35,6 +35,7 @@ import re import sys import unittest +from unittest import mock import urllib import google.api_core.exceptions @@ -42,7 +43,6 @@ from google.auth.exceptions import MutualTLSChannelError import google_auth_httplib2 import httplib2 -import mock from oauth2client import GOOGLE_TOKEN_URI from oauth2client.client import GoogleCredentials, OAuth2Credentials from parameterized import parameterized diff --git a/tests/test_discovery_cache.py b/tests/test_discovery_cache.py index d9b43470b27..678350505d6 100644 --- a/tests/test_discovery_cache.py +++ b/tests/test_discovery_cache.py @@ -19,8 +19,7 @@ import datetime import unittest - -import mock +from unittest import mock from googleapiclient.discovery_cache import DISCOVERY_DOC_MAX_AGE diff --git a/tests/test_http.py b/tests/test_http.py index 3233f668ce7..e7b57dfaff3 100644 --- a/tests/test_http.py +++ b/tests/test_http.py @@ -34,10 +34,10 @@ import ssl import time import unittest +from unittest import mock import urllib import httplib2 -import mock from oauth2client.client import Credentials from googleapiclient.discovery import build
airctic__icevision-995
Fix installation in documentation • Improve Installation Guide We need to improve the installation guide for IceVision. Too many people are getting stuck installing the library. We need clear instructions for: * Colab * MacOS * Windows (WSL2) * Ubuntu
[ { "content": "from setuptools import setup\n\nif __name__ == \"__main__\":\n setup()\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup\n\n\nif __name__ == \"__main__\":\n setup()\n", "path": "setup.py" } ]
diff --git a/.github/workflows/build-pkg.yml b/.github/workflows/build-pkg.yml index c7b712657..e0a50562c 100644 --- a/.github/workflows/build-pkg.yml +++ b/.github/workflows/build-pkg.yml @@ -23,6 +23,6 @@ jobs: - name: Install package run: | - pip install numpy +# pip install numpy pip install -e . python -c "from icevision.all import *" diff --git a/_pyproject.toml b/_pyproject.toml new file mode 100644 index 000000000..37b8e208e --- /dev/null +++ b/_pyproject.toml @@ -0,0 +1,70 @@ +[tool.poetry] +name = "icevision" +version = "0.11.0" +description = "Agnostic Computer Vision Framework" +authors = ["Lucas Goulart Vazquez <[email protected]>, Farid Hassainia <[email protected]>"] +license = "Apache-2.0" + +[tool.poetry.dependencies] +python = ">=3.7,<3.9" +pillow = ">8.0.0,<9" +torch = "1.10.0+cu102" +torchvision = "0.11.0+cu102" +fastcore = ">=1.3.0,<1.4" +tqdm = ">=4.49.0,<5" +opencv-python = ">=4.1.1,<5" +albumentations = ">=1.0.3,<1.1" +matplotlib = ">=3.2.2,<4" +pycocotools = ">=2.0.2,<3" +requests = ">=2.23.0,<3" +loguru = ">=0.5.3" +importlib-metadata = ">=1" +fastai = "~2.5.2" +ipykernel = ">=4.10.1,<6" +pytorch-lightning = "~1.4.5" +effdet = "~0.2.1" +omegaconf = "~2" +dataclasses = "0.6" +wandb = "^0.12.9" +sahi = ">0.8.19,<1" +resnest = ">=0.0.6b20201125,<0.0.7" +yolov5-icevision = "~6" + + +[[tool.poetry.source]] +name = "torch_rep" +url = "https://eternalphane.github.io/pytorch-pypi" + +[[tool.poetry.source]] +name = "mmcv_full_rep" +#url = "http://localhost:8000/" +url = "https://download.openmmlab.com/mmcv/dist/cu102/torch1.10.0/index.html" +default=true + +#poe the poetry artist +#[tool.poe.tasks] +### PyTorch with CUDA 11.1. If PyTorch is imported first, importing Tensorflow will detect CUDA + cuDNN bundled with PyTorch +### Run with the command "poe force-cuda11" +### See https://github.com/python-poetry/poetry/issues/2543 +#force-cuda102 = "pip install torch==1.10.0+cu102 torchvision==0.11.1+cu102 -f https://download.pytorch.org/whl/torch_stable.html" +#force-mmcv = "pip install mmcv-full==1.3.17 -f https://download.openmmlab.com/mmcv/dist/cu102/torch1.10.0/index.html" + + + +[tool.poetry.dev-dependencies] +black = "==20.8b1" +pytest = ">=6,<7" +keras-autodoc = "==0.6.0" +mkdocs = ">=1.1.2,<2" +mike = ">=1.0" +jupyter = ">=1.0.0,<2" +pymdown-extensions = ">=8.0,<9" +Sphinx = ">=3.1.0,<4" +pytest-cov = ">=2.10.1,<3" +flake8 = ">=3.8.3,<4" +pre-commit = ">=2.8.2,<3" +pytest-mock = ">=3.6.1" + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/docs/INSTALL.md b/docs/INSTALL.md index 60ee6f78f..3aa1952dc 100644 --- a/docs/INSTALL.md +++ b/docs/INSTALL.md @@ -1,99 +1,186 @@ -!!! danger "Important" - We currently only support Linux/MacOS installations - -!!! info "Note" - Please do not forget to install the other optional dependencies if you would like to use them: - - - MMCV+MMDetection, and/or - - - YOLOv5 - -## A- Installation using pip - -### **Option 1:** Installing from pypi repository **[Stable Version]** - -To install icevision package together with almost all dependencies: - <div class="termy"> ```console +$ pip install torch==1.10.0+cu102 torchvision==0.11.1+cu102 -f https://download.pytorch.org/whl/torch_stable.html +$ pip install mmcv-full==1.3.17 -f https://download.openmmlab.com/mmcv/dist/cu102/torch1.10.0/index.html +$ pip install mmdet==2.17.0 $ pip install icevision[all] ``` </div> +!!! danger "Important" + We currently only support Linux/MacOS installations -### **Option 2:** Installing an editable package locally **[For Developers]** +## installation using pip +### **torch** +Depending on what version of cuda driver you'd like to use, you can install different versions of torch builds. If you're not sure which version to choose, we advise to use the current torch default `cuda-10.2` + +=== "cuda-10.2" + ``` + pip install torch==1.10.0+cu102 torchvision==0.11.1+cu102 -f https://download.pytorch.org/whl/torch_stable.html + ``` + +=== "cuda-11.1" + ``` + pip install torch==1.10.0+cu111 torchvision==0.11.1+cu111 -f https://download.pytorch.org/whl/torch_stable.html + ``` +=== "cpu" + ``` + pip install torch==1.10.0+cpu torchvision==0.11.1+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + ``` + +??? note "checking your `torch`-`cuda` version" + To see what version of `torch` and `cuda` is installed in your current environment, run: + ``` + python -c "import torch;print(torch.__version__, torch.version.cuda)" + ``` + output: + ``` + 1.10.1+cu102 10.2 + ``` + Your installed torch version will determine which version of `mmcv-full` you can install. + +### **mmcv-full** *(optional)* + +Installing `mmcv-full` is optional, yet it will let you unleash the full potential of `icevision` and allow you to use the large library of models available in `mmdet`, therefore we strongly recommend doing it. + +=== "cuda-10.2" + ``` + pip install mmcv-full==1.3.17 -f https://download.openmmlab.com/mmcv/dist/cu102/torch1.10.0/index.html + pip install mmdet==2.17.0 + ``` + +=== "cuda-11.1" + ``` + pip install mmcv-full==1.3.17 -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.10.0/index.html + pip install mmdet==2.17.0 + ``` + +=== "cpu" + ``` + pip install mmcv-full==1.3.17 -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.10.0/index.html + pip install mmdet==2.17.0 + ``` + + +??? "testing `mmcv` installation" + + Installing `mmcv-full` can be tricky as it depends on both the exact `torch` and `cuda` version. + We highly recommend that you test your installation. You can verify it by executing the following command inside your virtual environment: + ```bash + curl -sSL https://raw.githubusercontent.com/open-mmlab/mmcv/master/.dev_scripts/check_installation.py | python - + ``` + + &nbsp; + If everything went fine, you should see something like the following: + + ``` + Start checking the installation of mmcv-full ... + CPU ops were compiled successfully. + CUDA ops were compiled successfully. + mmcv-full has been installed successfully. + + Environment information: + ----------------------------------------------------------- + sys.platform: linux + Python: 3.8.12 (default, Oct 12 2021, 13:49:34) [GCC 7.5.0] + CUDA available: True + GPU 0: GeForce RTX 2060 + CUDA_HOME: /usr/local/cuda + NVCC: Build cuda_11.1.TC455_06.29069683_0 + GCC: gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0 + PyTorch: 1.10.0+cu111 + PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.2.3 (Git Hash 7336ca9f055cf1bfa13efb658fe15dc9b41f0740) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.1 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.0.5 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.1, CUDNN_VERSION=8.0.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.10.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, + + TorchVision: 0.11.1+cu111 + OpenCV: 4.5.4 + MMCV: 1.3.17 + MMCV Compiler: GCC 7.3 + MMCV CUDA Compiler: 11.1 + ----------------------------------------------------------- + ``` + + + &nbsp; -!!! info "Note" - This method is used by developers who are usually either: +### **icevision** +Icevision is distributed in 2 different eggs: - - actively contributing to `icevision` project by adding new features or fixing bugs, or +- `icevision[all]` - **recommended** - complete icevision package with all dependencies +- `icevision[inference]` - minimal dependencies, useful for deployment or simply parsing and viewing your dataset - - creating their own extensions, and making sure that their source code stay in sync with the `icevision` latest version. +we recommend to install the stable release but if you want to use the most recent, bleeding edge version of the library or would like to contribute, here is how to do it: -Then, clone the repo and install the package: -<div class="termy"> -```console -$ git clone --depth=1 https://github.com/airctic/icevision.git -$ cd icevision -$ pip install -e .[all,dev] -$ pre-commit install +- **stable** +```bash +pip install icevision[all] ``` -</div> - -### **Option 3:** Installing a non-editable package from GitHub: - -To install the icevision package from its GitHub repo, run the command here below. This option can be used in Google Colab, -for example, where you might install the icevision latest version (from the `master` branch) - -<div class="termy"> -```console -$ pip install git+https://github.com/airctic/icevision.git#egg=icevision[all] --upgrade +- **bleeding edge** +```bash +pip install git+https://github.com/airctic/icevision.git@master#egg=icevision[all] --upgrade ``` -</div> - - -## B- Installation using conda -Creating a conda environment is considered as a best practice because it avoids polluting the default (base) environment, and reduces dependencies conflicts. Use the following command in order to create a conda environment called **icevision** -<div class="termy"> -```console -$ conda create -n icevision python=3.8 anaconda -$ conda activate icevision -$ pip install icevision[all] +- **editable mode (*for developers*)** +```bash +git clone --depth=1 https://github.com/airctic/icevision.git +cd icevision +pip install -e .[dev] +pre-commit install ``` -</div> +??? "installing using different cuda version" + + Installing icevision with different cuda version is possible, however it is only + recommended for more experienced users. -## Optional dependencies + The main constraint here is `mmcv-full` and `torch` versions compatibility. In short, + torch is build for a specific cuda driver version, mmcv-full on the other hand is + distributed for a specific torch build. -### MMDetection Installation + To see which mmcv-full wheels are available for which versions of torch, check the + table at [mmcv installation guide](https://mmcv.readthedocs.io/en/latest/get_started/installation.html). -We need to provide the appropriate version of the `mmcv-full` package as well as the `cuda` and the `torch` versions. Here are some examples for both the **CUDA** and the **CPU** versions -!!! danger "Torch and CUDA version" - For the torch version use `torch.__version__` and replace the last number with 0. - For the cuda version use: `torch.version.cuda`. +!!! note + running `pip install icevision` will install `icevision[inference]` by default - Example: `TORCH_VERSION = torch1.8.0`; `CUDA_VERSION = cu101` +## installation using conda -#### CUDA-Version Installation Example -<div class="termy"> -```console -$ pip install mmcv-full=="1.3.3" -f https://download.openmmlab.com/mmcv/dist/CUDA_VERSION/TORCH_VERSION/index.html --upgrade -$ pip install mmdet -``` -</div> +The easiest way to install `icevision` with all its dependencies is to use our conda +`environment.yml` file. Creating a conda environment is considered as a best practice +because it avoids polluting the default (base) environment, and reduces dependencies +conflicts. -#### CPU-Version Installation <div class="termy"> ```console -$ pip install mmcv-full=="1.3.3+torch.1.8.0+cpu" -f https://download.openmmlab.com/mmcv/dist/index.html --upgrade -$ pip install mmdet +$ curl -O https://raw.githubusercontent.com/airctic/icevision/master/environment.yml +$ conda env create -f environment.yml ``` + </div> +!!! note + please note that installation may take up to 5 mins. + +!!! warning + using the `environment.yml` works only on cuda-10.2 enabled devices. If your GPU + architecture is Ampere or newer, you have to use the pip installation method. + -## Troubleshooting +&nbsp;&nbsp; +## troubleshooting ### MMCV is not installing with cuda support If you are installing MMCV from the wheel like described above and still are having problems with CUDA you will probably have to compile it locally. Do that by running: diff --git a/docs/css/termynal.css b/docs/css/termynal.css index 0484e65d4..fe86fa76f 100644 --- a/docs/css/termynal.css +++ b/docs/css/termynal.css @@ -7,9 +7,9 @@ */ :root { - --color-bg: #252a33; - --color-text: #eee; - --color-text-subtle: #a2a2a2; + --color-bg: #e5e1db; + --color-text: #404040; + --color-text-subtle: #b7657b; } [data-termynal] { @@ -17,7 +17,7 @@ max-width: 100%; background: var(--color-bg); color: var(--color-text); - font-size: 18px; + font-size: 14px; /* font-family: 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; */ font-family: 'Roboto Mono', 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; border-radius: 4px; diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index c8a7eaef6..4a71262a6 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -18,6 +18,8 @@ markdown_extensions: - codehilite - pymdownx.snippets: base_path: docs + - pymdownx.tabbed: + alternate_style: true - pymdownx.superfences: custom_fences: - name: mermaid @@ -27,8 +29,8 @@ markdown_extensions: emoji_index: !!python/name:materialx.emoji.twemoji emoji_generator: !!python/name:materialx.emoji.to_svg - admonition - - pymdownx.tabbed - attr_list + - pymdownx.details extra: manifest: manifest.webmanifest extra_css: diff --git a/environment.yml b/environment.yml index 5766ca800..2eced792f 100644 --- a/environment.yml +++ b/environment.yml @@ -1,24 +1,19 @@ -name: ice +name: icevision channels: - - fastai - pytorch + - fastai - defaults dependencies: - - jupyter - - pytorch>=1.3.0 - - torchvision>=0.5 - - matplotlib - - pandas - - requests - - pyyaml - - fastprogress>=1.0.0 - - pillow - - python>=3.6 + - pillow >8.0.0,<9 + - python>3.6,<4 + - pytorch==1.10.0 + - torchvision==0.11.1 + - cudatoolkit=10.2 + - fastcore >=1.3.0,<1.4 + - fastai >=2.5.2,<2.6 - pip - - scikit-learn - - scipy - - Cython - - pip: - - fastai - pip: - - git+https://github.com/airctic/icevision.git + - icevision[all]==0.12.0rc1 + - --find-links https://download.openmmlab.com/mmcv/dist/cu102/torch1.10.0/index.html + - mmcv-full==1.3.17 + - mmdet==2.17.0 \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index 276fb3c3a..95cf83e3c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,7 +2,7 @@ [metadata] name = icevision -version = 0.11.0 +version = 0.12.0rc1 author = Lucas Goulart Vazquez, Farid Hassainia author_email = [email protected], [email protected] description = Agnostic Computer Vision Framework @@ -13,62 +13,59 @@ license = Apache-2.0 classifiers = Development Status :: 4 - Beta Intended Audience :: Developers - Programming Language :: Python :: 3.6 +# Programming Language :: Python :: 3.6 ## numpy >= 1.20.0 dropped support for python 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Topic :: Scientific/Engineering :: Artificial Intelligence Topic :: Scientific/Engineering :: Image Recognition [options] -python_requires = >=3.6,<4 +python_requires = >=3.7,<4 zip_safe = False include_package_data = True packages = find: install_requires = - pillow > 8.0.0,<9 + pillow >8.0.0,<9 torch >=1.9.0,<1.11 torchvision >=0.10.0,<0.12 fastcore >=1.3.0,<1.4 tqdm >=4.49.0,<5 opencv-python >=4.1.1,<5 albumentations >=1.0.3,<1.1 - matplotlib >=3.2.2,<4 - pycocotools>=2.0.2,<3 - requests >=2.23.0,<3 + resnest >=0.0.6b20201125,<0.0.7 + effdet >=0.2.1,<0.3 + sahi >=0.8.19,<1.0 + yolov5-icevision >=6.0.0 + importlib-metadata >=1;python_version<"3.8" + ipykernel >=4.10.1,<6 + dataclasses ==0.6 loguru >=0.5.3 - importlib-metadata>=1;python_version<"3.8" + [options.extras_require] +inference = + icevision + all = fastai >=2.5.2,<2.6 - ipykernel >=4.10.1,<6 pytorch-lightning >=1.4.5 - effdet >=0.2.1,<0.3 - omegaconf >=2,<3 - dataclasses ==0.6 wandb >=0.10.7 - resnest >=0.0.6b20201125,<0.0.7 - sahi >=0.8.19,<1.0 - yolov5-icevision >=6.0.0 -inference = - effdet >=0.2.1,<0.3 - omegaconf >=2,<3 - dataclasses ==0.6 - resnest >=0.0.6b20201125,<0.0.7 + dev = - black ==20.8b1 - pytest >=6,<7 - keras-autodoc ==0.6.0 - mkdocs >=1.1.2,<2 - mkdocs-material >=7.0.6,<8 - mike >=1.0 - jupyter >=1.0.0,<2 - pymdown-extensions >=8.0,<9 - Sphinx >=3.1.0,<4 - pytest-cov >=2.10.1,<3 - flake8 >=3.8.3,<4 - pre-commit >=2.8.2,<3 - pytest-mock >=3.6.1 + icevision[all] + keras-autodoc ==0.6.0 + black ==20.8b1 + pytest >=6,<7 + mkdocs >=1.1.2,<2 + mkdocs-material >=7.3.1,<8 + mike >=1.0 + jupyter >=1.0.0,<2 + Sphinx >=3.1.0,<4 + pytest-cov >=2.10.1,<3 + flake8 >=3.8.3,<4 + pre-commit >=2.8.2,<3 + pytest-mock >=3.6.1 + [flake8] # from ci-all-testing.yaml diff --git a/setup.py b/setup.py index 7f1a1763c..26e08e48e 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,5 @@ from setuptools import setup + if __name__ == "__main__": setup()
StackStorm__st2-5104
Add version string to st2tests to make it installable Prior to this change, this will fail: cd st2tests/st2tests pip install . After this change that command successfully installs the `st2tests` package. This will also work for installing via GitHub as in: pip install -e git+https://github.com/StackStorm/[email protected]#egg=st2tests&subdirectory=st2tests The original request in #2574 is to get st2tests onto PyPI, and I'm not sure if this will accomplish that request, but this is a good first step.
[ { "content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2tests.base import EventletTestCase\nfrom st2tests.base import DbTestCase\nfrom st2tests.base import ExecutionDbTestCase\nfrom st2tests.base import DbModelTestCase\nfrom st2tests.base import WorkflowTestCase\n\n\n__all__ = [\n 'EventletTestCase',\n 'DbTestCase',\n 'ExecutionDbTestCase',\n 'DbModelTestCase',\n 'WorkflowTestCase'\n]\n", "path": "st2tests/st2tests/__init__.py" } ]
[ { "content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2tests.base import EventletTestCase\nfrom st2tests.base import DbTestCase\nfrom st2tests.base import ExecutionDbTestCase\nfrom st2tests.base import DbModelTestCase\nfrom st2tests.base import WorkflowTestCase\n\n\n__all__ = [\n 'EventletTestCase',\n 'DbTestCase',\n 'ExecutionDbTestCase',\n 'DbModelTestCase',\n 'WorkflowTestCase'\n]\n\n__version__ = '3.3dev'\n", "path": "st2tests/st2tests/__init__.py" } ]
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2c28d8551e..1edeb2b090 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -29,6 +29,9 @@ Changed Fixed ~~~~~~~~~ +* Fixed issue were st2tests was not getting installed using pip because no version was specified. + Contributed by @anirudhbagri + * Added monkey patch fix to st2stream to enable it to work with mongodb via SSL. (bug fix) #5078 #5091 * Fix nginx buffering long polling stream to client. Instead of waiting for closed connection wait for final event to be sent to client. (bug fix) #4842 #5042 diff --git a/st2tests/st2tests/__init__.py b/st2tests/st2tests/__init__.py index 646488c6a5..77d3574a20 100644 --- a/st2tests/st2tests/__init__.py +++ b/st2tests/st2tests/__init__.py @@ -29,3 +29,5 @@ 'DbModelTestCase', 'WorkflowTestCase' ] + +__version__ = '3.3dev'
sonic-net__sonic-utilities-2090
sonic-installer install fails in armhf <!-- If you are reporting a new issue, make sure that we do not have any duplicates already open. You can ensure this by searching the issue list for this repository. If there is a duplicate, please close your issue and add a comment to the existing issue instead. If you suspect your issue is a bug, please edit your issue description to include the BUG REPORT INFORMATION shown below. If you fail to provide this information within 7 days, we cannot debug your issue and will close it. We will, however, reopen it if you later provide the information. For more information about reporting issues, see https://github.com/Azure/SONiC/wiki#report-issues --------------------------------------------------- GENERAL SUPPORT INFORMATION --------------------------------------------------- The GitHub issue tracker is for bug reports and feature requests. General support can be found at the following locations: - SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject --------------------------------------------------- BUG REPORT INFORMATION --------------------------------------------------- Use the commands below to provide key information from your environment: You do NOT have to include this information if this is a FEATURE REQUEST --> #### Description <!-- Briefly describe the problem you are having in a few paragraphs. --> Sonic-installer failure log using install operation. File “/usr/local/lib/python3.9/dist-packages/click/core.py”, line 956, in invoke return ctx.invoke(self.callback, **ctx.params) File “/usr/local/lib/python3.9/dist-packages/click/core.py”, line 555, in invoke return callback(*args, **kwargs) File “/usr/local/lib/python3.9/dist-packages/sonic_installer/main.py”, line 543, in install if not skip_platform_check and not bootloader.verify_image_platform(image_path): File “/usr/local/lib/python3.9/dist-packages/sonic_installer/bootloader/uboot.py”, line 81, in verify_image_platform return os.path.isfile(image_path) NameError: name ‘os’ is not defined ## Steps to reproduce the issue 1. sonic-installer install <image> #### Describe the results you received #### Describe the results you expected sonic-installer to work seamlessly. #### Additional information you deem important (e.g. issue happens only occasionally) #### Output of `show version` ``` # show version SONiC Software Version: SONiC.HEAD.0-dirty-20220302.124544 Distribution: Debian 11.2 Kernel: 5.10.0-8-2-armmp Build commit: 94b778c39 Build date: Wed Mar 2 08:25:34 UTC 2022 Built by: marvell@cpss-build1 Platform: armhf-nokia_ixs7215_52x-r0 HwSKU: Nokia-7215 ASIC: marvell ASIC Count: 1 Serial Number: NK203110011 Model Number: 3HE16794AARA01 Hardware Revision: N/A Uptime: 16:49:12 up 33 min, 1 user, load average: 0.21, 0.69, 0.82 Docker images: REPOSITORY TAG IMAGE ID SIZE docker-dhcp-relay latest 39c289f394ba 484MB docker-teamd HEAD.0-dirty-20220302.124544 897670943f24 483MB docker-teamd latest 897670943f24 483MB docker-syncd-mrvl HEAD.0-dirty-20220302.124544 ded0f3c5116c 607MB docker-syncd-mrvl latest ded0f3c5116c 607MB docker-snmp HEAD.0-dirty-20220302.124544 50b2af07aa43 514MB docker-snmp latest 50b2af07aa43 514MB docker-sflow HEAD.0-dirty-20220302.124544 80d19598c760 484MB docker-sflow latest 80d19598c760 484MB docker-router-advertiser HEAD.0-dirty-20220302.124544 ff951f4fa02e 474MB docker-router-advertiser latest ff951f4fa02e 474MB docker-platform-monitor HEAD.0-dirty-20220302.124544 87406f9f212c 695MB docker-platform-monitor latest 87406f9f212c 695MB docker-orchagent HEAD.0-dirty-20220302.124544 fafd5ae1c574 571MB docker-orchagent latest fafd5ae1c574 571MB docker-nat HEAD.0-dirty-20220302.124544 c6a2c3a9d794 485MB docker-nat latest c6a2c3a9d794 485MB docker-mux HEAD.0-dirty-20220302.124544 e1246be5c510 492MB docker-mux latest e1246be5c510 492MB docker-macsec HEAD.0-dirty-20220302.124544 87fbc786165c 485MB docker-macsec latest 87fbc786165c 485MB docker-lldp HEAD.0-dirty-20220302.124544 498091e0d9f6 478MB docker-lldp latest 498091e0d9f6 478MB docker-fpm-frr HEAD.0-dirty-20220302.124544 95d1d947a343 497MB docker-fpm-frr latest 95d1d947a343 497MB docker-database HEAD.0-dirty-20220302.124544 3e5047261b1c 471MB docker-database latest 3e5047261b1c 471MB ``` <!-- Also attach debug file produced by `sudo generate_dump` -->
[ { "content": "\"\"\"\nBootloader implementation for uboot based platforms\n\"\"\"\n\nimport platform\nimport subprocess\n\nimport click\n\nfrom ..common import (\n HOST_PATH,\n IMAGE_DIR_PREFIX,\n IMAGE_PREFIX,\n run_command,\n)\nfrom .onie import OnieInstallerBootloader\n\nclass UbootBootloader(OnieInstallerBootloader):\n\n NAME = 'uboot'\n\n def get_installed_images(self):\n images = []\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n sonic_version_1\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if IMAGE_PREFIX in image:\n images.append(image)\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n sonic_version_2\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if IMAGE_PREFIX in image:\n images.append(image)\n return images\n\n def get_next_image(self):\n images = self.get_installed_images()\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n boot_next\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if \"sonic_image_2\" in image:\n next_image_index = 1\n else:\n next_image_index = 0\n return images[next_image_index]\n\n def set_default_image(self, image):\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_1\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_2\"')\n return True\n\n def set_next_image(self, image):\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_once \"run sonic_image_1\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_once \"run sonic_image_2\"')\n return True\n\n def install_image(self, image_path):\n run_command(\"bash \" + image_path)\n\n def remove_image(self, image):\n click.echo('Updating next boot ...')\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_2\"')\n run_command('/usr/bin/fw_setenv sonic_version_1 \"NONE\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_1\"')\n run_command('/usr/bin/fw_setenv sonic_version_2 \"NONE\"')\n image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX)\n click.echo('Removing image root filesystem...')\n subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir])\n click.echo('Done')\n\n def verify_image_platform(self, image_path):\n return os.path.isfile(image_path)\n\n @classmethod\n def detect(cls):\n arch = platform.machine()\n return (\"arm\" in arch) or (\"aarch64\" in arch)\n", "path": "sonic_installer/bootloader/uboot.py" } ]
[ { "content": "\"\"\"\nBootloader implementation for uboot based platforms\n\"\"\"\n\nimport platform\nimport subprocess\nimport os\n\nimport click\n\nfrom ..common import (\n HOST_PATH,\n IMAGE_DIR_PREFIX,\n IMAGE_PREFIX,\n run_command,\n)\nfrom .onie import OnieInstallerBootloader\n\nclass UbootBootloader(OnieInstallerBootloader):\n\n NAME = 'uboot'\n\n def get_installed_images(self):\n images = []\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n sonic_version_1\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if IMAGE_PREFIX in image:\n images.append(image)\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n sonic_version_2\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if IMAGE_PREFIX in image:\n images.append(image)\n return images\n\n def get_next_image(self):\n images = self.get_installed_images()\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n boot_next\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if \"sonic_image_2\" in image:\n next_image_index = 1\n else:\n next_image_index = 0\n return images[next_image_index]\n\n def set_default_image(self, image):\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_1\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_2\"')\n return True\n\n def set_next_image(self, image):\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_once \"run sonic_image_1\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_once \"run sonic_image_2\"')\n return True\n\n def install_image(self, image_path):\n run_command(\"bash \" + image_path)\n\n def remove_image(self, image):\n click.echo('Updating next boot ...')\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_2\"')\n run_command('/usr/bin/fw_setenv sonic_version_1 \"NONE\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_1\"')\n run_command('/usr/bin/fw_setenv sonic_version_2 \"NONE\"')\n image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX)\n click.echo('Removing image root filesystem...')\n subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir])\n click.echo('Done')\n\n def verify_image_platform(self, image_path):\n return os.path.isfile(image_path)\n\n @classmethod\n def detect(cls):\n arch = platform.machine()\n return (\"arm\" in arch) or (\"aarch64\" in arch)\n", "path": "sonic_installer/bootloader/uboot.py" } ]
diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py index cb6c756091..bc4b98daeb 100644 --- a/sonic_installer/bootloader/uboot.py +++ b/sonic_installer/bootloader/uboot.py @@ -4,6 +4,7 @@ import platform import subprocess +import os import click
nextcloud__appstore-67
After clicking confirm button I got a 404 - click the confirm link in the email - click the button on that page - getting redirected to https://.../accounts/login/ instead of https://.../login/ which is not available cc @BernhardPosselt @adsworth
[ { "content": "\"\"\"\nDjango settings for nextcloudappstore project.\n\nGenerated by 'django-admin startproject' using Django 1.9.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nfrom os.path import dirname, abspath, join, pardir, realpath\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nfrom django.conf.global_settings import LANGUAGES\n\nBASE_DIR = realpath(join(dirname(dirname(abspath(__file__))), pardir))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = [\n 'nextcloudappstore.core.apps.CoreConfig',\n 'parler',\n 'captcha',\n 'rest_framework',\n 'corsheaders',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.github',\n 'allauth.socialaccount.providers.bitbucket',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'nextcloudappstore.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'nextcloudappstore.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': join(BASE_DIR, 'test.sqlite3'),\n }\n }\n}\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.NumericPasswordValidator',\n },\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': (\n 'djangorestframework_camel_case.render.CamelCaseJSONRenderer',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'djangorestframework_camel_case.parser.CamelCaseJSONParser',\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'app_upload': '100/day'\n }\n}\n\nSITE_ID = 1\n\n# Allauth configuration\n# http://django-allauth.readthedocs.io/en/latest/configuration.html\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\nACCOUNT_LOGOUT_ON_GET = True\nACCOUNT_LOGOUT_REDIRECT_URL = 'home'\nACCOUNT_SESSION_REMEMBER = True\nACCOUNT_SIGNUP_FORM_CLASS = \\\n 'nextcloudappstore.core.user.forms.SignupFormRecaptcha'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nPARLER_LANGUAGES = {\n 1: [{'code': code} for code, trans in LANGUAGES],\n 'default': {\n 'fallbacks': ['en'],\n 'hide_untranslated': False,\n }\n}\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nMEDIA_ROOT = join(BASE_DIR, 'media')\nRELEASE_DOWNLOAD_ROOT = None\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\n# Default security settings\nSECURE_BROWSER_XSS_FILTER = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\nCORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken',\n 'if-none-match',\n)\nCORS_EXPOSE_HEADERS = (\n 'etag',\n 'x-content-type-options',\n 'content-type',\n)\n\n# use modern no Captcha reCaptcha\nNOCAPTCHA = True\n\nLOGIN_REDIRECT_URL = 'home'\n", "path": "nextcloudappstore/settings/base.py" } ]
[ { "content": "\"\"\"\nDjango settings for nextcloudappstore project.\n\nGenerated by 'django-admin startproject' using Django 1.9.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nfrom os.path import dirname, abspath, join, pardir, realpath\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nfrom django.conf.global_settings import LANGUAGES\n\nBASE_DIR = realpath(join(dirname(dirname(abspath(__file__))), pardir))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = [\n 'nextcloudappstore.core.apps.CoreConfig',\n 'parler',\n 'captcha',\n 'rest_framework',\n 'corsheaders',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.github',\n 'allauth.socialaccount.providers.bitbucket',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'nextcloudappstore.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'nextcloudappstore.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': join(BASE_DIR, 'test.sqlite3'),\n }\n }\n}\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.NumericPasswordValidator',\n },\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': (\n 'djangorestframework_camel_case.render.CamelCaseJSONRenderer',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'djangorestframework_camel_case.parser.CamelCaseJSONParser',\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'app_upload': '100/day'\n }\n}\n\nSITE_ID = 1\n\n# Allauth configuration\n# http://django-allauth.readthedocs.io/en/latest/configuration.html\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\nACCOUNT_LOGOUT_ON_GET = True\nACCOUNT_LOGOUT_REDIRECT_URL = 'home'\nACCOUNT_SESSION_REMEMBER = True\nACCOUNT_SIGNUP_FORM_CLASS = \\\n 'nextcloudappstore.core.user.forms.SignupFormRecaptcha'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nPARLER_LANGUAGES = {\n 1: [{'code': code} for code, trans in LANGUAGES],\n 'default': {\n 'fallbacks': ['en'],\n 'hide_untranslated': False,\n }\n}\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nMEDIA_ROOT = join(BASE_DIR, 'media')\nRELEASE_DOWNLOAD_ROOT = None\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\n# Default security settings\nSECURE_BROWSER_XSS_FILTER = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\nCORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken',\n 'if-none-match',\n)\nCORS_EXPOSE_HEADERS = (\n 'etag',\n 'x-content-type-options',\n 'content-type',\n)\n\n# use modern no Captcha reCaptcha\nNOCAPTCHA = True\n\nLOGIN_REDIRECT_URL = 'home'\nLOGIN_URL = 'account_login'\n", "path": "nextcloudappstore/settings/base.py" } ]
diff --git a/nextcloudappstore/settings/base.py b/nextcloudappstore/settings/base.py index e46c163dd2c..07b5c6b412b 100644 --- a/nextcloudappstore/settings/base.py +++ b/nextcloudappstore/settings/base.py @@ -188,3 +188,4 @@ NOCAPTCHA = True LOGIN_REDIRECT_URL = 'home' +LOGIN_URL = 'account_login'
ipython__ipython-5701
Move ssh out of external and into lib This module does not belong in external - it cannot be replaced by an external system module.
[ { "content": "", "path": "IPython/external/ssh/__init__.py" } ]
[ { "content": "\"\"\"This is a copy of zmq.ssh\"\"\"\n\ntry:\n from zmq.ssh import *\nexcept ImportError:\n from . import tunnel\n from .tunnel import *\n", "path": "IPython/external/ssh/__init__.py" } ]
diff --git a/IPython/external/ssh/__init__.py b/IPython/external/ssh/__init__.py index e69de29bb2d..9e5e8b9c070 100644 --- a/IPython/external/ssh/__init__.py +++ b/IPython/external/ssh/__init__.py @@ -0,0 +1,7 @@ +"""This is a copy of zmq.ssh""" + +try: + from zmq.ssh import * +except ImportError: + from . import tunnel + from .tunnel import *
Project-MONAI__MONAI-2568
Missing `return` in `__call__` of transforms: `SaveImage`, `NiftiSaver` and `PNGSaver` **To Reproduce** Steps to reproduce the behavior: ``` from monai.transforms import SaveImage, Compose saver = Compose([SaveImage(output_dir="./output", output_ext=".png", output_postfix="seg")]) img = torch.randn([3, 32, 32]) output = saver(img) print(output) ```
[ { "content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of \"vanilla\" transforms for IO functions\nhttps://github.com/Project-MONAI/MONAI/wiki/MONAI_Design\n\"\"\"\n\nimport sys\nfrom typing import Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.config import DtypeLike\nfrom monai.data.image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader\nfrom monai.data.nifti_saver import NiftiSaver\nfrom monai.data.png_saver import PNGSaver\nfrom monai.transforms.transform import Transform\nfrom monai.utils import GridSampleMode, GridSamplePadMode\nfrom monai.utils import ImageMetaKey as Key\nfrom monai.utils import InterpolateMode, ensure_tuple, optional_import\n\nnib, _ = optional_import(\"nibabel\")\nImage, _ = optional_import(\"PIL.Image\")\n\n__all__ = [\"LoadImage\", \"SaveImage\"]\n\n\ndef switch_endianness(data, new=\"<\"):\n \"\"\"\n Convert the input `data` endianness to `new`.\n\n Args:\n data: input to be converted.\n new: the target endianness, currently support \"<\" or \">\".\n \"\"\"\n if isinstance(data, np.ndarray):\n # default to system endian\n sys_native = ((sys.byteorder == \"little\") and \"<\") or \">\"\n current_ = sys_native if data.dtype.byteorder not in (\"<\", \">\") else data.dtype.byteorder\n if new not in (\"<\", \">\"):\n raise NotImplementedError(f\"Not implemented option new={new}.\")\n if current_ != new:\n data = data.byteswap().newbyteorder(new)\n elif isinstance(data, tuple):\n data = tuple(switch_endianness(x, new) for x in data)\n elif isinstance(data, list):\n data = [switch_endianness(x, new) for x in data]\n elif isinstance(data, dict):\n data = {k: switch_endianness(v, new) for k, v in data.items()}\n elif isinstance(data, (bool, str, float, int, type(None))):\n pass\n else:\n raise AssertionError(f\"Unknown type: {type(data).__name__}\")\n return data\n\n\nclass LoadImage(Transform):\n \"\"\"\n Load image file or files from provided path based on reader.\n Automatically choose readers based on the supported suffixes and in below order:\n - User specified reader at runtime when call this loader.\n - Registered readers from the latest to the first in list.\n - Default readers: (nii, nii.gz -> NibabelReader), (png, jpg, bmp -> PILReader),\n (npz, npy -> NumpyReader), (others -> ITKReader).\n\n \"\"\"\n\n def __init__(\n self,\n reader: Optional[Union[ImageReader, str]] = None,\n image_only: bool = False,\n dtype: DtypeLike = np.float32,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"\n Args:\n reader: register reader to load image file and meta data, if None, still can register readers\n at runtime or use the default readers. If a string of reader name provided, will construct\n a reader object with the `*args` and `**kwargs` parameters, supported reader name: \"NibabelReader\",\n \"PILReader\", \"ITKReader\", \"NumpyReader\".\n image_only: if True return only the image volume, otherwise return image data array and header dict.\n dtype: if not None convert the loaded image to this data type.\n args: additional parameters for reader if providing a reader name.\n kwargs: additional parameters for reader if providing a reader name.\n\n Note:\n The transform returns image data array if `image_only` is True,\n or a tuple of two elements containing the data array, and the meta data in a dict format otherwise.\n\n \"\"\"\n # set predefined readers as default\n self.readers: List[ImageReader] = [ITKReader(), NumpyReader(), PILReader(), NibabelReader()]\n if reader is not None:\n if isinstance(reader, str):\n supported_readers = {\n \"nibabelreader\": NibabelReader,\n \"pilreader\": PILReader,\n \"itkreader\": ITKReader,\n \"numpyreader\": NumpyReader,\n }\n reader = reader.lower()\n if reader not in supported_readers:\n raise ValueError(f\"unsupported reader type: {reader}, available options: {supported_readers}.\")\n self.register(supported_readers[reader](*args, **kwargs))\n else:\n self.register(reader)\n\n self.image_only = image_only\n self.dtype = dtype\n\n def register(self, reader: ImageReader) -> List[ImageReader]:\n \"\"\"\n Register image reader to load image file and meta data, latest registered reader has higher priority.\n Return all the registered image readers.\n\n Args:\n reader: registered reader to load image file and meta data based on suffix,\n if all registered readers can't match suffix at runtime, use the default readers.\n\n \"\"\"\n if not isinstance(reader, ImageReader):\n raise ValueError(f\"reader must be ImageReader object, but got {type(reader)}.\")\n self.readers.append(reader)\n return self.readers\n\n def __call__(\n self,\n filename: Union[Sequence[str], str],\n reader: Optional[ImageReader] = None,\n ):\n \"\"\"\n Args:\n filename: path file or file-like object or a list of files.\n will save the filename to meta_data with key `filename_or_obj`.\n if provided a list of files, use the filename of first file.\n reader: runtime reader to load image file and meta data.\n\n \"\"\"\n if reader is None or not reader.verify_suffix(filename):\n for r in reversed(self.readers):\n if r.verify_suffix(filename):\n reader = r\n break\n\n if reader is None:\n raise RuntimeError(\n f\"can not find suitable reader for this file: {filename}. \\\n Please install dependency libraries: (nii, nii.gz) -> Nibabel, (png, jpg, bmp) -> PIL, \\\n (npz, npy) -> Numpy, others -> ITK. Refer to the installation instruction: \\\n https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies.\"\n )\n\n img = reader.read(filename)\n img_array, meta_data = reader.get_data(img)\n img_array = img_array.astype(self.dtype)\n\n if self.image_only:\n return img_array\n meta_data[Key.FILENAME_OR_OBJ] = ensure_tuple(filename)[0]\n # make sure all elements in metadata are little endian\n meta_data = switch_endianness(meta_data, \"<\")\n\n return img_array, meta_data\n\n\nclass SaveImage(Transform):\n \"\"\"\n Save transformed data into files, support NIfTI and PNG formats.\n It can work for both numpy array and PyTorch Tensor in both preprocessing transform\n chain and postprocessing transform chain.\n The name of saved file will be `{input_image_name}_{output_postfix}{output_ext}`,\n where the input image name is extracted from the provided meta data dictionary.\n If no meta data provided, use index from 0 as the filename prefix.\n It can also save a list of PyTorch Tensor or numpy array without `batch dim`.\n\n Note: image should be channel-first shape: [C,H,W,[D]].\n\n Args:\n output_dir: output image directory.\n output_postfix: a string appended to all output file names, default to `trans`.\n output_ext: output file extension name, available extensions: `.nii.gz`, `.nii`, `.png`.\n resample: whether to resample before saving the data array.\n if saving PNG format image, based on the `spatial_shape` from metadata.\n if saving NIfTI format image, based on the `original_affine` from metadata.\n mode: This option is used when ``resample = True``. Defaults to ``\"nearest\"``.\n\n - NIfTI files {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n - PNG files {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n\n padding_mode: This option is used when ``resample = True``. Defaults to ``\"border\"``.\n\n - NIfTI files {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n - PNG files\n This option is ignored.\n\n scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling\n [0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.\n it's used for PNG format only.\n dtype: data type during resampling computation. Defaults to ``np.float64`` for best precision.\n if None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n it's used for NIfTI format only.\n output_dtype: data type for saving data. Defaults to ``np.float32``.\n it's used for NIfTI format only.\n squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel\n has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and\n then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false,\n image will always be saved as (H,W,D,C).\n it's used for NIfTI format only.\n data_root_dir: if not empty, it specifies the beginning parts of the input file's\n absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from\n `data_root_dir` to preserve folder structure when saving in case there are files in different\n folders with the same file names. for example:\n input_file_name: /foo/bar/test1/image.nii,\n output_postfix: seg\n output_ext: nii.gz\n output_dir: /output,\n data_root_dir: /foo/bar,\n output will be: /output/test1/image/image_seg.nii.gz\n print_log: whether to print log about the saved file path, etc. default to `True`.\n\n \"\"\"\n\n def __init__(\n self,\n output_dir: str = \"./\",\n output_postfix: str = \"trans\",\n output_ext: str = \".nii.gz\",\n resample: bool = True,\n mode: Union[GridSampleMode, InterpolateMode, str] = \"nearest\",\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n scale: Optional[int] = None,\n dtype: DtypeLike = np.float64,\n output_dtype: DtypeLike = np.float32,\n squeeze_end_dims: bool = True,\n data_root_dir: str = \"\",\n print_log: bool = True,\n ) -> None:\n self.saver: Union[NiftiSaver, PNGSaver]\n if output_ext in (\".nii.gz\", \".nii\"):\n self.saver = NiftiSaver(\n output_dir=output_dir,\n output_postfix=output_postfix,\n output_ext=output_ext,\n resample=resample,\n mode=GridSampleMode(mode),\n padding_mode=padding_mode,\n dtype=dtype,\n output_dtype=output_dtype,\n squeeze_end_dims=squeeze_end_dims,\n data_root_dir=data_root_dir,\n print_log=print_log,\n )\n elif output_ext == \".png\":\n self.saver = PNGSaver(\n output_dir=output_dir,\n output_postfix=output_postfix,\n output_ext=output_ext,\n resample=resample,\n mode=InterpolateMode(mode),\n scale=scale,\n data_root_dir=data_root_dir,\n print_log=print_log,\n )\n else:\n raise ValueError(f\"unsupported output extension: {output_ext}.\")\n\n def __call__(self, img: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None):\n \"\"\"\n Args:\n img: target data content that save into file.\n meta_data: key-value pairs of meta_data corresponding to the data.\n\n \"\"\"\n self.saver.save(img, meta_data)\n", "path": "monai/transforms/io/array.py" } ]
[ { "content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of \"vanilla\" transforms for IO functions\nhttps://github.com/Project-MONAI/MONAI/wiki/MONAI_Design\n\"\"\"\n\nimport sys\nfrom typing import Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.config import DtypeLike\nfrom monai.data.image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader\nfrom monai.data.nifti_saver import NiftiSaver\nfrom monai.data.png_saver import PNGSaver\nfrom monai.transforms.transform import Transform\nfrom monai.utils import GridSampleMode, GridSamplePadMode\nfrom monai.utils import ImageMetaKey as Key\nfrom monai.utils import InterpolateMode, ensure_tuple, optional_import\n\nnib, _ = optional_import(\"nibabel\")\nImage, _ = optional_import(\"PIL.Image\")\n\n__all__ = [\"LoadImage\", \"SaveImage\"]\n\n\ndef switch_endianness(data, new=\"<\"):\n \"\"\"\n Convert the input `data` endianness to `new`.\n\n Args:\n data: input to be converted.\n new: the target endianness, currently support \"<\" or \">\".\n \"\"\"\n if isinstance(data, np.ndarray):\n # default to system endian\n sys_native = ((sys.byteorder == \"little\") and \"<\") or \">\"\n current_ = sys_native if data.dtype.byteorder not in (\"<\", \">\") else data.dtype.byteorder\n if new not in (\"<\", \">\"):\n raise NotImplementedError(f\"Not implemented option new={new}.\")\n if current_ != new:\n data = data.byteswap().newbyteorder(new)\n elif isinstance(data, tuple):\n data = tuple(switch_endianness(x, new) for x in data)\n elif isinstance(data, list):\n data = [switch_endianness(x, new) for x in data]\n elif isinstance(data, dict):\n data = {k: switch_endianness(v, new) for k, v in data.items()}\n elif isinstance(data, (bool, str, float, int, type(None))):\n pass\n else:\n raise AssertionError(f\"Unknown type: {type(data).__name__}\")\n return data\n\n\nclass LoadImage(Transform):\n \"\"\"\n Load image file or files from provided path based on reader.\n Automatically choose readers based on the supported suffixes and in below order:\n - User specified reader at runtime when call this loader.\n - Registered readers from the latest to the first in list.\n - Default readers: (nii, nii.gz -> NibabelReader), (png, jpg, bmp -> PILReader),\n (npz, npy -> NumpyReader), (others -> ITKReader).\n\n \"\"\"\n\n def __init__(\n self,\n reader: Optional[Union[ImageReader, str]] = None,\n image_only: bool = False,\n dtype: DtypeLike = np.float32,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"\n Args:\n reader: register reader to load image file and meta data, if None, still can register readers\n at runtime or use the default readers. If a string of reader name provided, will construct\n a reader object with the `*args` and `**kwargs` parameters, supported reader name: \"NibabelReader\",\n \"PILReader\", \"ITKReader\", \"NumpyReader\".\n image_only: if True return only the image volume, otherwise return image data array and header dict.\n dtype: if not None convert the loaded image to this data type.\n args: additional parameters for reader if providing a reader name.\n kwargs: additional parameters for reader if providing a reader name.\n\n Note:\n The transform returns image data array if `image_only` is True,\n or a tuple of two elements containing the data array, and the meta data in a dict format otherwise.\n\n \"\"\"\n # set predefined readers as default\n self.readers: List[ImageReader] = [ITKReader(), NumpyReader(), PILReader(), NibabelReader()]\n if reader is not None:\n if isinstance(reader, str):\n supported_readers = {\n \"nibabelreader\": NibabelReader,\n \"pilreader\": PILReader,\n \"itkreader\": ITKReader,\n \"numpyreader\": NumpyReader,\n }\n reader = reader.lower()\n if reader not in supported_readers:\n raise ValueError(f\"unsupported reader type: {reader}, available options: {supported_readers}.\")\n self.register(supported_readers[reader](*args, **kwargs))\n else:\n self.register(reader)\n\n self.image_only = image_only\n self.dtype = dtype\n\n def register(self, reader: ImageReader) -> List[ImageReader]:\n \"\"\"\n Register image reader to load image file and meta data, latest registered reader has higher priority.\n Return all the registered image readers.\n\n Args:\n reader: registered reader to load image file and meta data based on suffix,\n if all registered readers can't match suffix at runtime, use the default readers.\n\n \"\"\"\n if not isinstance(reader, ImageReader):\n raise ValueError(f\"reader must be ImageReader object, but got {type(reader)}.\")\n self.readers.append(reader)\n return self.readers\n\n def __call__(\n self,\n filename: Union[Sequence[str], str],\n reader: Optional[ImageReader] = None,\n ):\n \"\"\"\n Args:\n filename: path file or file-like object or a list of files.\n will save the filename to meta_data with key `filename_or_obj`.\n if provided a list of files, use the filename of first file.\n reader: runtime reader to load image file and meta data.\n\n \"\"\"\n if reader is None or not reader.verify_suffix(filename):\n for r in reversed(self.readers):\n if r.verify_suffix(filename):\n reader = r\n break\n\n if reader is None:\n raise RuntimeError(\n f\"can not find suitable reader for this file: {filename}. \\\n Please install dependency libraries: (nii, nii.gz) -> Nibabel, (png, jpg, bmp) -> PIL, \\\n (npz, npy) -> Numpy, others -> ITK. Refer to the installation instruction: \\\n https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies.\"\n )\n\n img = reader.read(filename)\n img_array, meta_data = reader.get_data(img)\n img_array = img_array.astype(self.dtype)\n\n if self.image_only:\n return img_array\n meta_data[Key.FILENAME_OR_OBJ] = ensure_tuple(filename)[0]\n # make sure all elements in metadata are little endian\n meta_data = switch_endianness(meta_data, \"<\")\n\n return img_array, meta_data\n\n\nclass SaveImage(Transform):\n \"\"\"\n Save transformed data into files, support NIfTI and PNG formats.\n It can work for both numpy array and PyTorch Tensor in both preprocessing transform\n chain and postprocessing transform chain.\n The name of saved file will be `{input_image_name}_{output_postfix}{output_ext}`,\n where the input image name is extracted from the provided meta data dictionary.\n If no meta data provided, use index from 0 as the filename prefix.\n It can also save a list of PyTorch Tensor or numpy array without `batch dim`.\n\n Note: image should be channel-first shape: [C,H,W,[D]].\n\n Args:\n output_dir: output image directory.\n output_postfix: a string appended to all output file names, default to `trans`.\n output_ext: output file extension name, available extensions: `.nii.gz`, `.nii`, `.png`.\n resample: whether to resample before saving the data array.\n if saving PNG format image, based on the `spatial_shape` from metadata.\n if saving NIfTI format image, based on the `original_affine` from metadata.\n mode: This option is used when ``resample = True``. Defaults to ``\"nearest\"``.\n\n - NIfTI files {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n - PNG files {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n\n padding_mode: This option is used when ``resample = True``. Defaults to ``\"border\"``.\n\n - NIfTI files {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n - PNG files\n This option is ignored.\n\n scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling\n [0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.\n it's used for PNG format only.\n dtype: data type during resampling computation. Defaults to ``np.float64`` for best precision.\n if None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n it's used for NIfTI format only.\n output_dtype: data type for saving data. Defaults to ``np.float32``.\n it's used for NIfTI format only.\n squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel\n has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and\n then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false,\n image will always be saved as (H,W,D,C).\n it's used for NIfTI format only.\n data_root_dir: if not empty, it specifies the beginning parts of the input file's\n absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from\n `data_root_dir` to preserve folder structure when saving in case there are files in different\n folders with the same file names. for example:\n input_file_name: /foo/bar/test1/image.nii,\n output_postfix: seg\n output_ext: nii.gz\n output_dir: /output,\n data_root_dir: /foo/bar,\n output will be: /output/test1/image/image_seg.nii.gz\n print_log: whether to print log about the saved file path, etc. default to `True`.\n\n \"\"\"\n\n def __init__(\n self,\n output_dir: str = \"./\",\n output_postfix: str = \"trans\",\n output_ext: str = \".nii.gz\",\n resample: bool = True,\n mode: Union[GridSampleMode, InterpolateMode, str] = \"nearest\",\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n scale: Optional[int] = None,\n dtype: DtypeLike = np.float64,\n output_dtype: DtypeLike = np.float32,\n squeeze_end_dims: bool = True,\n data_root_dir: str = \"\",\n print_log: bool = True,\n ) -> None:\n self.saver: Union[NiftiSaver, PNGSaver]\n if output_ext in (\".nii.gz\", \".nii\"):\n self.saver = NiftiSaver(\n output_dir=output_dir,\n output_postfix=output_postfix,\n output_ext=output_ext,\n resample=resample,\n mode=GridSampleMode(mode),\n padding_mode=padding_mode,\n dtype=dtype,\n output_dtype=output_dtype,\n squeeze_end_dims=squeeze_end_dims,\n data_root_dir=data_root_dir,\n print_log=print_log,\n )\n elif output_ext == \".png\":\n self.saver = PNGSaver(\n output_dir=output_dir,\n output_postfix=output_postfix,\n output_ext=output_ext,\n resample=resample,\n mode=InterpolateMode(mode),\n scale=scale,\n data_root_dir=data_root_dir,\n print_log=print_log,\n )\n else:\n raise ValueError(f\"unsupported output extension: {output_ext}.\")\n\n def __call__(self, img: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None):\n \"\"\"\n Args:\n img: target data content that save into file.\n meta_data: key-value pairs of meta_data corresponding to the data.\n\n \"\"\"\n self.saver.save(img, meta_data)\n\n return img\n", "path": "monai/transforms/io/array.py" } ]
diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index d902300ecb..2b87b76a48 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -289,3 +289,5 @@ def __call__(self, img: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dic """ self.saver.save(img, meta_data) + + return img
kartoza__prj.app-1156
Sign up link for certification is broken when not logged in IF a user visits https://changelog.qgis.org/en/qgis/create-certifyingorganisation/ and they are not logged in, they get redirected to the front page. They should instead get shown a page asking them to log / create an account first and then get redirected back to the create page. They should also be shown the help link so they can find out how the certification system works.
[ { "content": "# coding=utf-8\n\n\"\"\"Project level settings.\n\nAdjust these values as needed but don't commit passwords etc. to any public\nrepository!\n\"\"\"\n\nimport os # noqa\nfrom django.utils.translation import ugettext_lazy as _\nfrom .utils import absolute_path\nfrom .contrib import * # noqa\n\n# Project apps\nINSTALLED_APPS += [\n 'base',\n 'changes',\n 'github_issue',\n 'vota',\n 'certification',\n 'lesson',\n]\n\n# Due to profile page does not available,\n# this will redirect to home page after login\nLOGIN_REDIRECT_URL = '/'\n\n# How many versions to list in each project box\nPROJECT_VERSION_LIST_SIZE = 10\n\n# Set debug to false for production\nDEBUG = TEMPLATE_DEBUG = False\n\nSOUTH_TESTS_MIGRATE = False\n\n\n# Set languages which want to be translated\nLANGUAGES = (\n ('en', _('English')),\n ('id', _('Indonesian')),\n)\n\n# Set storage path for the translation files\nLOCALE_PATHS = (absolute_path('locale'),)\n\n\nMIDDLEWARE += [\n # For nav bar generation\n 'core.custom_middleware.NavContextMiddleware',\n]\n\n# Project specific javascript files to be pipelined\n# For third party libs like jquery should go in contrib.py\nPIPELINE['JAVASCRIPT']['project'] = {\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n 'js/github-issue.js',\n 'js/entry.js',\n 'js/category.js',\n 'js/form.js',\n ),\n 'output_filename': 'js/project.js',\n}\n\n# Project specific css files to be pipelined\n# For third party libs like bootstrap should go in contrib.py\nPIPELINE['STYLESHEETS']['project'] = {\n 'source_filenames': (\n 'css/changelog.css',\n 'css/form.css',\n 'css/fonts.css',\n 'css/base.css',\n ),\n 'output_filename': 'css/project.css',\n 'extra_context': {\n 'media': 'screen,projection',\n },\n}\n\nVALID_DOMAIN = [\n 'localhost',\n 'changelog.kartoza.com',\n]\n\nEMAIL_HOST_USER = '[email protected]'\n", "path": "django_project/core/settings/project.py" } ]
[ { "content": "# coding=utf-8\n\n\"\"\"Project level settings.\n\nAdjust these values as needed but don't commit passwords etc. to any public\nrepository!\n\"\"\"\n\nimport os # noqa\nfrom django.utils.translation import ugettext_lazy as _\nfrom .utils import absolute_path\nfrom .contrib import * # noqa\n\n# Project apps\nINSTALLED_APPS += [\n 'base',\n 'changes',\n 'github_issue',\n 'vota',\n 'certification',\n 'lesson',\n]\n\n# Due to profile page does not available,\n# this will redirect to home page after login\nLOGIN_REDIRECT_URL = '/'\n\n# How many versions to list in each project box\nPROJECT_VERSION_LIST_SIZE = 10\n\n# Set debug to false for production\nDEBUG = TEMPLATE_DEBUG = False\n\nSOUTH_TESTS_MIGRATE = False\n\n\n# Set languages which want to be translated\nLANGUAGES = (\n ('en', _('English')),\n ('id', _('Indonesian')),\n)\n\n# Set storage path for the translation files\nLOCALE_PATHS = (absolute_path('locale'),)\n\n\nMIDDLEWARE += [\n # For nav bar generation\n 'core.custom_middleware.NavContextMiddleware',\n]\n\n# Project specific javascript files to be pipelined\n# For third party libs like jquery should go in contrib.py\nPIPELINE['JAVASCRIPT']['project'] = {\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n 'js/github-issue.js',\n 'js/entry.js',\n 'js/category.js',\n 'js/form.js',\n ),\n 'output_filename': 'js/project.js',\n}\n\n# Project specific css files to be pipelined\n# For third party libs like bootstrap should go in contrib.py\nPIPELINE['STYLESHEETS']['project'] = {\n 'source_filenames': (\n 'css/changelog.css',\n 'css/form.css',\n 'css/fonts.css',\n 'css/base.css',\n ),\n 'output_filename': 'css/project.css',\n 'extra_context': {\n 'media': 'screen,projection',\n },\n}\n\nVALID_DOMAIN = [\n 'localhost',\n 'changelog.kartoza.com',\n]\n\nEMAIL_HOST_USER = '[email protected]'\nLOGIN_URL = '/en/accounts/login/'\n", "path": "django_project/core/settings/project.py" } ]
diff --git a/django_project/core/settings/project.py b/django_project/core/settings/project.py index 64fc1cd4e..696ea42b8 100644 --- a/django_project/core/settings/project.py +++ b/django_project/core/settings/project.py @@ -84,3 +84,4 @@ ] EMAIL_HOST_USER = '[email protected]' +LOGIN_URL = '/en/accounts/login/'
streamlit__streamlit-3038
Dark theme does not properly adjust markdown tables ### Summary When I load the latest streamlit in darkmode I cannot see anything in my markdown tables because the text color is changed but not the background color. ### Steps to reproduce Code snippet: ``` md = """ | Label | Info | | -------- | --------- | | Row | Data | """ st.markdown(md) ``` **Expected behavior:** I would expect if the text color get changed to white in the table, the background color should get changed to something dark **Actual behavior:** Both the text color and background are white so nothing can be seen. ### Is this a regression? no, consequence of new theme ### Debug info - Streamlit version: 0.79.0 - Python version: 3.7.9 - pip - OS version: MacOS Catalina 10.15.7 - Browser version: Chrome 89.0.4389.90 ### Additional information I'm not sure why markdown tables have different background style but they seem to; perhaps other ui elements would be affected as well.
[ { "content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.markdown(\"This **markdown** is awesome! :sunglasses:\")\n\nst.markdown(\"This <b>HTML tag</b> is escaped!\")\n\nst.markdown(\"This <b>HTML tag</b> is not escaped!\", unsafe_allow_html=True)\n\nst.markdown(\"[text]\")\n\nst.markdown(\"[link](href)\")\n\nst.markdown(\"[][]\")\n\nst.markdown(\"Inline math with $\\KaTeX$\")\n\nst.markdown(\n \"\"\"\n$$\nax^2 + bx + c = 0\n$$\n\"\"\"\n)\n", "path": "e2e/scripts/st_markdown.py" } ]
[ { "content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.markdown(\"This **markdown** is awesome! :sunglasses:\")\n\nst.markdown(\"This <b>HTML tag</b> is escaped!\")\n\nst.markdown(\"This <b>HTML tag</b> is not escaped!\", unsafe_allow_html=True)\n\nst.markdown(\"[text]\")\n\nst.markdown(\"[link](href)\")\n\nst.markdown(\"[][]\")\n\nst.markdown(\"Inline math with $\\KaTeX$\")\n\nst.markdown(\n \"\"\"\n$$\nax^2 + bx + c = 0\n$$\n\"\"\"\n)\n\nst.markdown(\n \"\"\"\n| Col1 | Col2 |\n| --------- | ----------- |\n| Some | Data |\n\"\"\"\n)\n", "path": "e2e/scripts/st_markdown.py" } ]
diff --git a/e2e/scripts/st_markdown.py b/e2e/scripts/st_markdown.py index 9e938f15efb5..e17c08001458 100644 --- a/e2e/scripts/st_markdown.py +++ b/e2e/scripts/st_markdown.py @@ -35,3 +35,11 @@ $$ """ ) + +st.markdown( + """ +| Col1 | Col2 | +| --------- | ----------- | +| Some | Data | +""" +) diff --git a/e2e/specs/st_markdown.spec.js b/e2e/specs/st_markdown.spec.js index a9d4d586270d..ee505ceffe6a 100644 --- a/e2e/specs/st_markdown.spec.js +++ b/e2e/specs/st_markdown.spec.js @@ -21,7 +21,7 @@ describe("st.markdown", () => { }); it("displays markdown", () => { - cy.get(".element-container .stMarkdown").should("have.length", 8); + cy.get(".element-container .stMarkdown").should("have.length", 9); cy.get(".element-container .stMarkdown").then(els => { expect(els[0].textContent).to.eq("This markdown is awesome! 😎"); expect(els[1].textContent).to.eq("This <b>HTML tag</b> is escaped!"); @@ -33,6 +33,7 @@ describe("st.markdown", () => { expect(els[7].textContent).to.eq( "ax2+bx+c=0ax^2 + bx + c = 0ax2+bx+c=0" ); + expect(els[8].textContent).to.eq("Col1Col2SomeData"); cy.wrap(els[3]) .find("a") @@ -47,10 +48,20 @@ describe("st.markdown", () => { it("has consistent st.markdown visuals", () => { cy.get(".element-container .stMarkdown").each((el, i) => { // The 6th st.markdown element is an empty one, so cypress gets confused - // when attempting to take a snapshot of it. - if (i !== 5) { + // when attempting to take a snapshot of it. We also have to handle the + // markdown table differently; see the comment below. + if (i !== 5 && i !== 8) { return cy.wrap(el).matchThemedSnapshots(`markdown-visuals-${i}`); } }); }); + + // Tables in html are weird and hard to take snapshots of since they may + // overflow their parent elements while still rendering correctly, so we deal + // with taking these snapshots separately from the ones above. + it("has consistent st.markdown table visuals", () => { + const els = cy.get(".element-container .stMarkdown table"); + els.should("have.length", 1); + els.first().matchThemedSnapshots("markdown-table-visuals"); + }); }); diff --git a/frontend/cypress/snapshots/linux/2x/st_markdown.spec.js/markdown-table-visuals-dark.snap.png b/frontend/cypress/snapshots/linux/2x/st_markdown.spec.js/markdown-table-visuals-dark.snap.png new file mode 100644 index 000000000000..cbab548be1d2 Binary files /dev/null and b/frontend/cypress/snapshots/linux/2x/st_markdown.spec.js/markdown-table-visuals-dark.snap.png differ diff --git a/frontend/cypress/snapshots/linux/2x/st_markdown.spec.js/markdown-table-visuals.snap.png b/frontend/cypress/snapshots/linux/2x/st_markdown.spec.js/markdown-table-visuals.snap.png new file mode 100644 index 000000000000..b70ea80f97c8 Binary files /dev/null and b/frontend/cypress/snapshots/linux/2x/st_markdown.spec.js/markdown-table-visuals.snap.png differ diff --git a/frontend/src/components/shared/StreamlitMarkdown/styled-components.ts b/frontend/src/components/shared/StreamlitMarkdown/styled-components.ts index 90115049814d..283f5c716bc1 100644 --- a/frontend/src/components/shared/StreamlitMarkdown/styled-components.ts +++ b/frontend/src/components/shared/StreamlitMarkdown/styled-components.ts @@ -32,7 +32,6 @@ export const StyledStreamlitMarkdown = styled.div(({ theme }) => ({ tr: { borderTop: `1px solid ${theme.colors.fadedText10}`, - background: theme.colors.white, }, "th, td": {
wright-group__WrightTools-221
make setup.py an executable script Change file permissions and reformat the shebang line
[ { "content": "# !/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='[email protected]',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py" } ]
[ { "content": "#! /usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='[email protected]',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 index 99df28160..94dceb8dd --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# !/usr/bin/env python +#! /usr/bin/env python import os from setuptools import setup, find_packages
python-discord__site-268
Ugly prefix on all ID links. Currently, all the headers that are created by the wiki will have id's that are prefixed with `wiki-toc`. As such, when you want to link a header, the link will look something like https://pythondiscord.com/pages/contributing/site/#wiki-toc-development-environment. It would be better if this simply said `#development-environment`, so let's change that.
[ { "content": "", "path": "pydis_site/__init__.py" } ]
[ { "content": "from wiki.plugins.macros.mdx import toc\n\n# Remove the toc header prefix. There's no option for this, so we gotta monkey patch it.\ntoc.HEADER_ID_PREFIX = ''\n", "path": "pydis_site/__init__.py" } ]
diff --git a/pydis_site/__init__.py b/pydis_site/__init__.py index e69de29bb..c6146450c 100644 --- a/pydis_site/__init__.py +++ b/pydis_site/__init__.py @@ -0,0 +1,4 @@ +from wiki.plugins.macros.mdx import toc + +# Remove the toc header prefix. There's no option for this, so we gotta monkey patch it. +toc.HEADER_ID_PREFIX = ''
zestedesavoir__zds-site-6179
Retirer les dernier restes de Travis **Description du bug** J'ai l'impression qu'il reste quelques miettes de Travis : * https://github.com/zestedesavoir/zds-site/blob/dev/zds/settings/travis_fixture.py * https://github.com/zestedesavoir/zds-site/blob/fe854d9b006e5ca500a911c48e3b25b11154d926/scripts/define_function.sh#L13-L66 **Comportement attendu** A priori, on ne se sert plus de Travis, donc tout ça devrait disparaître.
[ { "content": "from .ci_test import *\n\nLOGGING[\"loggers\"][\"zds.utils.templatetags.emarkdown\"] = {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n}\n", "path": "zds/settings/travis_fixture.py" } ]
[ { "content": null, "path": "zds/settings/travis_fixture.py" } ]
diff --git a/scripts/define_function.sh b/scripts/define_function.sh index 9a1a02ae68..4de1ad6f0f 100755 --- a/scripts/define_function.sh +++ b/scripts/define_function.sh @@ -10,125 +10,6 @@ function _in { } -## travis code -# https://github.com/travis-ci/travis-build/blob/master/lib/travis/build/bash/travis_fold.bash -zds_travis_fold() { - local action="${1}" - local name="${2}" - echo -en "travis_fold:${action}:${name}\\r${ANSI_CLEAR}" -} - - -# https://github.com/travis-ci/travis-build/blob/master/lib/travis/build/bash/travis_nanoseconds.bash -zds_travis_nanoseconds() { - local cmd='date' - local format='+%s%N' - - if hash gdate >/dev/null 2>&1; then - cmd='gdate' - elif [[ "${TRAVIS_OS_NAME}" == osx ]]; then - format='+%s000000000' - fi - - "${cmd}" -u "${format}" -} - - -# https://github.com/travis-ci/travis-build/blob/master/lib/travis/build/bash/travis_time_start.bash -# change : prefixed global variable with ZDS_ -zds_travis_time_start() { - ZDS_TRAVIS_TIMER_ID="ZDS_$(printf %08x $((RANDOM * RANDOM)))" - ZDS_TRAVIS_TIMER_START_TIME="$(zds_travis_nanoseconds)" - export ZDS_TRAVIS_TIMER_ID ZDS_TRAVIS_TIMER_START_TIME - echo -en "travis_time:start:${ZDS_TRAVIS_TIMER_ID}\\r${ANSI_CLEAR}" -} - - -# https://github.com/travis-ci/travis-build/blob/master/lib/travis/build/bash/travis_time_finish.bash -# change : prefixed global variable with ZDS_ -zds_travis_time_finish() { - local result="${?}" - local travis_timer_end_time - travis_timer_end_time="$(zds_travis_nanoseconds)" - local duration - duration="$((travis_timer_end_time - ZDS_TRAVIS_TIMER_START_TIME))" - echo -en "travis_time:end:${ZDS_TRAVIS_TIMER_ID}:start=${ZDS_TRAVIS_TIMER_START_TIME},finish=${travis_timer_end_time},duration=${duration}\\r${ANSI_CLEAR}" - return "${result}" -} -## - - -## start fold for travis -ZDS_SHOW_TRAVIS_FOLD=0 -if $(_in "--travis-output" $@); then - ZDS_SHOW_TRAVIS_FOLD=1 -fi - - -zds_fold_current_cat="default" -function zds_fold_category { - zds_fold_current_cat="$1" -} - - -zds_fold_current="" -function zds_fold_start { - if [[ $ZDS_SHOW_TRAVIS_FOLD == 1 ]]; then - if [[ $zds_fold_current == $1 ]]; then # for virtualenv fold - return - fi - - zds_fold_current="$1" - zds_travis_fold "start" "${zds_fold_current_cat}_${zds_fold_current}" - - zds_travis_time_start - fi - - print_info "$2" --bold -} - - -function zds_fold_end { - if [[ $ZDS_SHOW_TRAVIS_FOLD == 1 ]] && [[ $zds_fold_current =~ "" ]]; then - zds_travis_time_finish - - zds_travis_fold "end" "${zds_fold_current_cat}_${zds_fold_current}" - zds_fold_current="" - fi -} -## end - - -## start zmd start & stop function -function zds_start_zmd { - npm run server --prefix zmd/node_modules/zmarkdown -- --silent; exVal=$? - - if [[ $exVal != 0 ]]; then - zds_fold_end - gateway "!! Cannot start zmd" $exVal - exit 1 - fi -} - - -function zds_stop_zmd { - node ./zmd/node_modules/pm2/bin/pm2 kill; exVal=$? - - if [[ $exVal != 0 ]]; then - print_error "Warning: Cannot stop zmd" - fi -} -## end - - -function gateway { - if [[ $2 != 0 ]]; then - print_error "$1" - exit $2 - fi -} - - ## start print function function print_info { if [[ "$2" == "--bold" ]]; then @@ -146,4 +27,4 @@ function print_error { echo "$1" echo -en "\033[00m" } -## end \ No newline at end of file +## end diff --git a/scripts/install_zds.sh b/scripts/install_zds.sh index 9239061449..897217ddfd 100755 --- a/scripts/install_zds.sh +++ b/scripts/install_zds.sh @@ -10,34 +10,6 @@ function load_nvm { } -## start quiet mode -function progressfilt { - local flag=false c count cr=$'\r' nl=$'\n' - while IFS='' read -d '' -rn 1 c - do - if $flag; then - printf '%s' "$c" - else - if [[ $c != $cr && $c != $nl ]]; then - count=0 - else - ((count++)) - if ((count > 1)); then - flag=true - fi - fi - fi - done -} - - -# Hack for "-q --show-progress" (at least v1.16) and travis uses (travis uses wget 1.15) -function wget_nv { - wget "$@" --progress=bar:force 2>&1 | progressfilt -} -## end - - # zds-site root folder ZDSSITE_DIR=$(pwd) @@ -47,12 +19,10 @@ LOCAL_DIR="$(cd "$(dirname "$0")" && pwd)" source $LOCAL_DIR/define_variable.sh source $LOCAL_DIR/define_function.sh -zds_fold_category "install" - # Install packages if ! $(_in "-packages" $@) && ( $(_in "+packages" $@) || $(_in "+base" $@) || $(_in "+full" $@) ); then - zds_fold_start "packages" "* [+packages] installing packages (this subcommand will be run as super-user)" + print_info "* [+packages] installing packages (this subcommand will be run as super-user)" --bold if $(_in "--detect-os-version" $@); then version=$(cat /proc/version) @@ -147,15 +117,13 @@ if ! $(_in "-packages" $@) && ( $(_in "+packages" $@) || $(_in "+base" $@) || $ fi echo "" done - - zds_fold_end fi # virtualenv if ! $(_in "-virtualenv" $@) && ( $(_in "+virtualenv" $@) || $(_in "+base" $@) || $(_in "+full" $@) ); then - zds_fold_start "virtualenv" "* Create virtualenv" + print_info "* Create virtualenv" --bold if [ ! -f $ZDS_VENV/bin/activate ]; then if [ -d $ZDS_VENV ]; then @@ -202,7 +170,7 @@ fi # nvm node & yarn if ! $(_in "-node" $@) && ( $(_in "+node" $@) || $(_in "+base" $@) || $(_in "+full" $@) ); then - zds_fold_start "node" "* [+node] installing nvm (v$ZDS_NVM_VERSION) & node (v$ZDS_NODE_VERSION) & yarn" + print_info "* [+node] installing nvm (v$ZDS_NVM_VERSION) & node (v$ZDS_NODE_VERSION) & yarn" --bold wget -qO- https://raw.githubusercontent.com/creationix/nvm/v${ZDS_NVM_VERSION}/install.sh | bash if [[ $? == 0 ]]; then @@ -225,13 +193,11 @@ if ! $(_in "-node" $@) && ( $(_in "+node" $@) || $(_in "+base" $@) || $(_in "+f print_error "!! Cannot obtain nvm v${ZDS_NVM_VERSION}" exit 1 fi - - zds_fold_end fi # virtualenv activation if ! $(_in "--force-skip-activating" $@) && [[ ( $VIRTUAL_ENV == "" || $(realpath $VIRTUAL_ENV) != $(realpath $ZDS_VENV) ) ]]; then - zds_fold_start "virtualenv" "* Load virtualenv" + print_info "* Load virtualenv" --bold print_info "* activating venv \`$ZDS_VENV\`" @@ -256,16 +222,12 @@ if ! $(_in "--force-skip-activating" $@) && [[ ( $VIRTUAL_ENV == "" || $(realpat echo " - If you don't have other choice, use \`--force-skip-activating\`." exit 1 fi - - zds_fold_end else print_info "!! Add \`$(realpath $ZDS_VENV)\` in your PATH." if [ ! -d $ZDS_VENV ]; then mkdir $ZDS_VENV fi - - zds_fold_end fi export ZDS_ENV=$(realpath $ZDS_VENV) @@ -273,7 +235,7 @@ export ZDS_ENV=$(realpath $ZDS_VENV) # local jdk if ! $(_in "-jdk-local" $@) && ( $(_in "+jdk-local" $@) || $(_in "+full" $@) ); then - zds_fold_start "jdk" "* [+jdk-local] installing a local version of JDK (v$ZDS_JDK_VERSION)" + print_info "* [+jdk-local] installing a local version of JDK (v$ZDS_JDK_VERSION)" --bold mkdir -p $ZDS_VENV/lib/ cd $ZDS_VENV/lib/ @@ -289,7 +251,7 @@ if ! $(_in "-jdk-local" $@) && ( $(_in "+jdk-local" $@) || $(_in "+full" $@) ); folderPATH="${foldername}/OpenJDK11U-jdk_x64_linux_hotspot_${ZDS_JDK_VERSION}_${ZDS_JDK_REV}.tar.gz" echo "GET ${baseURL}${folderPATH}" - wget_nv -O ${foldername}.tar.gz ${baseURL}${folderPATH} + wget -O ${foldername}.tar.gz ${baseURL}${folderPATH} -q --show-progress tar xf ${foldername}.tar.gz if [[ $? == 0 ]]; then @@ -314,14 +276,12 @@ if ! $(_in "-jdk-local" $@) && ( $(_in "+jdk-local" $@) || $(_in "+full" $@) ); exit 1 fi cd $ZDSSITE_DIR - - zds_fold_end fi # local elasticsearch if ! $(_in "-elastic-local" $@) && ( $(_in "+elastic-local" $@) || $(_in "+full" $@) ); then - zds_fold_start "elasticsearch" "* [+elastic-local] installing a local version of elasticsearch (v$ZDS_ELASTIC_VERSION)" + print_info "* [+elastic-local] installing a local version of elasticsearch (v$ZDS_ELASTIC_VERSION)" --bold mkdir -p .local cd .local @@ -332,7 +292,7 @@ if ! $(_in "-elastic-local" $@) && ( $(_in "+elastic-local" $@) || $(_in "+full rm -r "$es_path" fi - wget_nv https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ZDS_ELASTIC_VERSION}.zip + wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ZDS_ELASTIC_VERSION}.zip -q --show-progress if [[ $? == 0 ]]; then unzip -q elasticsearch-${ZDS_ELASTIC_VERSION}.zip rm elasticsearch-${ZDS_ELASTIC_VERSION}.zip @@ -350,14 +310,12 @@ if ! $(_in "-elastic-local" $@) && ( $(_in "+elastic-local" $@) || $(_in "+full exit 1 fi cd $ZDSSITE_DIR - - zds_fold_end fi # local texlive if ! $(_in "-tex-local" $@) && ( $(_in "+tex-local" $@) || $(_in "+full" $@) ); then - zds_fold_start "texlive" "* [+tex-local] install texlive" + print_info "* [+tex-local] install texlive" --bold mkdir -p .local cd .local @@ -387,7 +345,7 @@ if ! $(_in "-tex-local" $@) && ( $(_in "+tex-local" $@) || $(_in "+full" $@) ); sed -i '[email protected]@texlive@' texlive.profile # change directory sed -i "s@\$HOME@$LOCAL@" texlive.profile # change destination - wget_nv -O install-tl.tar.gz http://mirror.ctan.org/systems/texlive/tlnet/install-tl-unx.tar.gz + wget -O install-tl.tar.gz http://mirror.ctan.org/systems/texlive/tlnet/install-tl-unx.tar.gz -q --show-progress if [[ $? == 0 ]]; then if [[ ! -f ./bin/x86_64-linux/tlmgr ]]; then # install texlive tar xzf install-tl.tar.gz @@ -417,14 +375,12 @@ if ! $(_in "-tex-local" $@) && ( $(_in "+tex-local" $@) || $(_in "+full" $@) ); fi cd $ZDSSITE_DIR - - zds_fold_end fi # latex-template in TEXMFHOME. if ! $(_in "-latex-template" $@) && ( $(_in "+latex-template" $@) || $(_in "+full" $@) ); then - zds_fold_start "latex-template" "* [+latex-template] install latex-template (from $ZDS_LATEX_REPO)" + print_info "* [+latex-template] install latex-template (from $ZDS_LATEX_REPO)" --bold if [[ $(which kpsewhich) == "" ]]; then # no texlive ? print_error "!! Cannot find kpsewhich, do you have texlive?" @@ -449,14 +405,12 @@ if ! $(_in "-latex-template" $@) && ( $(_in "+latex-template" $@) || $(_in "+fu fi cd $ZDSSITE_DIR - - zds_fold_end fi # install back if ! $(_in "-back" $@) && ( $(_in "+back" $@) || $(_in "+base" $@) || $(_in "+full" $@) ); then - zds_fold_start "back" "* [+back] install back dependencies & migration" + print_info "* [+back] install back dependencies & migration" --bold if $(_in "+prod" $@); then make install-back-with-prod; exVal=$? @@ -477,14 +431,12 @@ if ! $(_in "-back" $@) && ( $(_in "+back" $@) || $(_in "+base" $@) || $(_in "+f exit 1 fi fi - - zds_fold_end fi # install front if ! $(_in "-front" $@) && ( $(_in "+front" $@) || $(_in "+base" $@) || $(_in "+full" $@) ); then - zds_fold_start "front" "* [+front] install front dependencies & build front" + print_info "* [+front] install front dependencies & build front" --bold if [ -d node_modules ]; then # delete previous modules rm -r node_modules @@ -503,14 +455,12 @@ if ! $(_in "-front" $@) && ( $(_in "+front" $@) || $(_in "+base" $@) || $(_in " print_error "!! Cannot build-front (use \`-front\` to skip)" exit 1 fi - - zds_fold_end fi # zmd if ! $(_in "-zmd" $@) && ( $(_in "+zmd" $@) || $(_in "+base" $@) || $(_in "+full" $@) ); then - zds_fold_start "zmd" "* [+zmd] install zmarkdown dependencies" + print_info "* [+zmd] install zmarkdown dependencies" --bold make zmd-install; exVal=$? @@ -518,14 +468,12 @@ if ! $(_in "-zmd" $@) && ( $(_in "+zmd" $@) || $(_in "+base" $@) || $(_in "+ful print_error "!! Cannot install zmd (use \`-zmd\` to skip)" exit 1 fi - - zds_fold_end fi # fixtures if ! $(_in "-data" $@) && ( $(_in "+data" $@) || $(_in "+base" $@) || $(_in "+full" $@) ); then - zds_fold_start "fixtures" "* [+data] fixtures" + print_info "* [+data] fixtures" --bold npm run server --prefix zmd/node_modules/zmarkdown -- --silent; exVal=$? @@ -570,8 +518,6 @@ if ! $(_in "-data" $@) && ( $(_in "+data" $@) || $(_in "+base" $@) || $(_in "+f if $futureExit; then exit 1 fi - - zds_fold_end fi if ! $(_in "--force-skip-activating" $@); then diff --git a/scripts/travis_overview/grep_ignore_msg.txt b/scripts/travis_overview/grep_ignore_msg.txt deleted file mode 100644 index 74406c02c0..0000000000 --- a/scripts/travis_overview/grep_ignore_msg.txt +++ /dev/null @@ -1,4 +0,0 @@ -ERROR\( \|:\)zds.utils.templatetags.emarkdown\( \|:\)Markdown errors -[ a-z]*auth.0007_alter_validators_add_error_messages... OK -test_error_last_user_with_write_leave_gallery -test_no_error_on_multiple_subscription diff --git a/scripts/travis_overview/sed_parsing_rules.txt b/scripts/travis_overview/sed_parsing_rules.txt deleted file mode 100644 index a9dbcb49a8..0000000000 --- a/scripts/travis_overview/sed_parsing_rules.txt +++ /dev/null @@ -1,7 +0,0 @@ -1~1s/^travis_.+((\[0K)?travis_.+)+\[0K/\n/g -1~1s/^travis_.+(\[0K\[33;1m.+\[0m)$/\1/g -/^travis_.+$/d -/^\[00mtravis_.+$/d -1~1s/^\[0K()/\1/g -1~1s/^\[0K.*//g -s/\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]//g diff --git a/scripts/travis_overview/travis_overview.sh b/scripts/travis_overview/travis_overview.sh deleted file mode 100755 index 131d8a9150..0000000000 --- a/scripts/travis_overview/travis_overview.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -LOCALDIR=$(dirname "$0") -cd $LOCALDIR -source ../define_function.sh - -# FOR LOCAL USAGE: -# TRAVIS_JOB_WEB_URL="https://travis-ci.com/github/zestedesavoir/zds-site/jobs/123456789" -# TRAVIS_JOB_ID=$(echo "$TRAVIS_JOB_WEB_URL" 2>&1 | sed -E "s/^.+\/([0-9]+)$/\1/") - -if [ -z $TRAVIS_JOB_ID ]; then - echo "Error: TRAVIS_JOB_ID missing." - exit 1 -fi - -echo "https://api.travis-ci.com/v3/job/$TRAVIS_JOB_ID/log.txt" - -# 1) get log -curl --silent "https://api.travis-ci.com/v3/job/$TRAVIS_JOB_ID/log.txt" --output log4.txt - -# 2) sed will parse log.txt to get the good line number (like in travis-ci.com UI) -sed --in-place log4.txt --regexp-extended --file=sed_parsing_rules.txt - -# 3) get the line number -cat log4.txt --number > log3.txt - -# 4) Ignore line -grep --invert-match --file="grep_ignore_msg.txt" log3.txt > log2.txt - -if [ ! -s log3.txt ]; then - echo "Error: Should be not empty!" - exit 1 -fi - -# 5) Match line with "error" or "Traceback" (and get +5/-5 lines) -grep "\(error\|Traceback\)" log2.txt --color=always --after-context=5 --before-context=5 > log.txt - -# 6) Display : -if [ -s log.txt ]; then - echo "#######################################################" - echo " WE FOUND LINES WITH 'error' OR 'traceback' " - echo "Scans previous output & prints lines containing 'error'" - echo "or 'traceback'. Be careful with false positives. " - echo "#######################################################" - zds_fold_start "line-overview" "Lines found" - cat log.txt | cut --bytes=1-312 - zds_fold_end - echo "#######################################################" - echo " END " - echo "#######################################################" -fi - -rm log2.txt log3.txt log4.txt diff --git a/zds/settings/travis_fixture.py b/zds/settings/travis_fixture.py deleted file mode 100644 index a9748a9c06..0000000000 --- a/zds/settings/travis_fixture.py +++ /dev/null @@ -1,6 +0,0 @@ -from .ci_test import * - -LOGGING["loggers"]["zds.utils.templatetags.emarkdown"] = { - "level": "INFO", - "handlers": ["console"], -}
conda-forge__conda-smithy-1140
Not compatible with ruamel.yaml 0.16 Fails with, ``` Traceback (most recent call last): File "/home/travis/miniconda/bin/conda-smithy", line 10, in <module> sys.exit(main()) File "/home/travis/miniconda/lib/python3.7/site-packages/conda_smithy/cli.py", line 470, in main args.subcommand_func(args) File "/home/travis/miniconda/lib/python3.7/site-packages/conda_smithy/cli.py", line 217, in __call__ args.feedstock_directory, owner, repo File "/home/travis/miniconda/lib/python3.7/site-packages/conda_smithy/ci_register.py", line 351, in travis_token_update_conda_forge_config ] = travis_encrypt_binstar_token(slug, item) File "/home/travis/miniconda/lib/python3.7/contextlib.py", line 119, in __exit__ next(self.gen) File "/home/travis/miniconda/lib/python3.7/site-packages/conda_smithy/utils.py", line 92, in update_conda_forge_config fh.write(yaml.dump(code)) File "/home/travis/miniconda/lib/python3.7/site-packages/ruamel/yaml/main.py", line 448, in dump raise TypeError('Need a stream argument when not dumping from context manager') TypeError: Need a stream argument when not dumping from context manager ``` cc @ocefpaf, @scopatz
[ { "content": "import shutil\nimport tempfile\nimport jinja2\nimport datetime\nimport time\nimport os\nimport sys\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport ruamel.yaml\n\n\n# define global yaml API\n# roundrip-loader and allowing duplicate keys\n# for handling # [filter] / # [not filter]\nyaml = ruamel.yaml.YAML(typ=\"rt\")\nyaml.allow_duplicate_keys = True\n\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp(\"_recipe\")\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return \"{}.{}\".format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: \"\")\n self.sep = \"/\"\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(\n dict(\n compiler=lambda x: x + \"_compiler_stub\",\n pin_subpackage=lambda *args, **kwargs: \"subpackage_stub\",\n pin_compatible=lambda *args, **kwargs: \"compatible_pin_stub\",\n cdt=lambda *args, **kwargs: \"cdt_stub\",\n load_file_regex=lambda *args, **kwargs: defaultdict(lambda: \"\"),\n datetime=datetime,\n time=time,\n target_platform=\"linux-64\",\n )\n )\n mockos = MockOS()\n py_ver = \"3.7\"\n context = {\"os\": mockos, \"environ\": mockos.environ, \"PY_VER\": py_ver}\n content = env.from_string(text).render(context)\n return content\n\n\n@contextmanager\ndef update_conda_forge_config(feedstock_directory):\n \"\"\"Utility method used to update conda forge configuration files\n\n Uage:\n >>> with update_conda_forge_config(somepath) as cfg:\n ... cfg['foo'] = 'bar'\n \"\"\"\n forge_yaml = os.path.join(feedstock_directory, \"conda-forge.yml\")\n if os.path.exists(forge_yaml):\n with open(forge_yaml, \"r\") as fh:\n code = yaml.load(fh)\n else:\n code = {}\n\n # Code could come in as an empty list.\n if not code:\n code = {}\n\n yield code\n\n with open(forge_yaml, \"w\") as fh:\n fh.write(yaml.dump(code))\n", "path": "conda_smithy/utils.py" } ]
[ { "content": "import shutil\nimport tempfile\nimport jinja2\nimport datetime\nimport time\nimport os\nimport sys\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport ruamel.yaml\n\n\n# define global yaml API\n# roundrip-loader and allowing duplicate keys\n# for handling # [filter] / # [not filter]\nyaml = ruamel.yaml.YAML(typ=\"rt\")\nyaml.allow_duplicate_keys = True\n\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp(\"_recipe\")\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return \"{}.{}\".format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: \"\")\n self.sep = \"/\"\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(\n dict(\n compiler=lambda x: x + \"_compiler_stub\",\n pin_subpackage=lambda *args, **kwargs: \"subpackage_stub\",\n pin_compatible=lambda *args, **kwargs: \"compatible_pin_stub\",\n cdt=lambda *args, **kwargs: \"cdt_stub\",\n load_file_regex=lambda *args, **kwargs: defaultdict(lambda: \"\"),\n datetime=datetime,\n time=time,\n target_platform=\"linux-64\",\n )\n )\n mockos = MockOS()\n py_ver = \"3.7\"\n context = {\"os\": mockos, \"environ\": mockos.environ, \"PY_VER\": py_ver}\n content = env.from_string(text).render(context)\n return content\n\n\n@contextmanager\ndef update_conda_forge_config(feedstock_directory):\n \"\"\"Utility method used to update conda forge configuration files\n\n Uage:\n >>> with update_conda_forge_config(somepath) as cfg:\n ... cfg['foo'] = 'bar'\n \"\"\"\n forge_yaml = os.path.join(feedstock_directory, \"conda-forge.yml\")\n if os.path.exists(forge_yaml):\n with open(forge_yaml, \"r\") as fh:\n code = yaml.load(fh)\n else:\n code = {}\n\n # Code could come in as an empty list.\n if not code:\n code = {}\n\n yield code\n\n yaml.dump(code, forge_yaml)\n", "path": "conda_smithy/utils.py" } ]
diff --git a/conda_smithy/utils.py b/conda_smithy/utils.py index 0d115daac..5036798db 100644 --- a/conda_smithy/utils.py +++ b/conda_smithy/utils.py @@ -88,5 +88,4 @@ def update_conda_forge_config(feedstock_directory): yield code - with open(forge_yaml, "w") as fh: - fh.write(yaml.dump(code)) + yaml.dump(code, forge_yaml) diff --git a/news/ruamelfix.rst b/news/ruamelfix.rst new file mode 100644 index 000000000..21fc29b1f --- /dev/null +++ b/news/ruamelfix.rst @@ -0,0 +1,24 @@ +**Added:** + +* <news item> + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* Updated conda-smithy to work with ruamel.yaml v0.16+. + +**Security:** + +* <news item> +
archlinux__archinstall-1300
Archinstall discover shop non-functional. Hello, I have installed Arch with archinstall twice now, selected the desktop option then KDE but I noticed that by default the "Discover" shop does not want to function I have to download the packagekit-qt5 package then it functions. Just wanted to let you know. Archinstall discover shop non-functional. Hello, I have installed Arch with archinstall twice now, selected the desktop option then KDE but I noticed that by default the "Discover" shop does not want to function I have to download the packagekit-qt5 package then it functions. Just wanted to let you know.
[ { "content": "# A desktop environment using \"KDE\".\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"plasma-meta\",\n\t\"konsole\",\n\t\"kwrite\",\n\t\"dolphin\",\n\t\"ark\",\n\t\"sddm\",\n\t\"plasma-wayland-session\",\n\t\"egl-wayland\",\n]\n\n\n# TODO: Remove hard dependency of bash (due to .bash_profile)\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# KDE requires a functioning Xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n\"\"\"\ndef _post_install(*args, **kwargs):\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tprint(\"Plasma Wayland has known compatibility issues with the proprietary Nvidia driver\")\n\tprint(\"After booting, you can choose between Wayland and Xorg using the drop-down menu\")\n\treturn True\n\"\"\"\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"kde\", \"/somewhere/kde.py\")\n# or through conventional import kde\nif __name__ == 'kde':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the KDE packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\t# Enable autostart of KDE for all users\n\tarchinstall.storage['installation_session'].enable_service('sddm')\n", "path": "profiles/kde.py" } ]
[ { "content": "# A desktop environment using \"KDE\".\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"plasma-meta\",\n\t\"konsole\",\n\t\"kwrite\",\n\t\"dolphin\",\n\t\"ark\",\n\t\"sddm\",\n\t\"plasma-wayland-session\",\n\t\"egl-wayland\",\n\t\"packagekit-qt5\",\n]\n\n\n# TODO: Remove hard dependency of bash (due to .bash_profile)\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# KDE requires a functioning Xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n\"\"\"\ndef _post_install(*args, **kwargs):\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tprint(\"Plasma Wayland has known compatibility issues with the proprietary Nvidia driver\")\n\tprint(\"After booting, you can choose between Wayland and Xorg using the drop-down menu\")\n\treturn True\n\"\"\"\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"kde\", \"/somewhere/kde.py\")\n# or through conventional import kde\nif __name__ == 'kde':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the KDE packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\t# Enable autostart of KDE for all users\n\tarchinstall.storage['installation_session'].enable_service('sddm')\n", "path": "profiles/kde.py" } ]
diff --git a/profiles/kde.py b/profiles/kde.py index 9edbe32561..3de2dbad77 100644 --- a/profiles/kde.py +++ b/profiles/kde.py @@ -13,6 +13,7 @@ "sddm", "plasma-wayland-session", "egl-wayland", + "packagekit-qt5", ]
bokeh__bokeh-8537
Docs tweak to add note about BOKEH_DEV and apps Make clearer need to change BOKEH_RESOURCES too.
[ { "content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' The resources module provides the Resources class for easily configuring\nhow BokehJS code and CSS resources should be located, loaded, and embedded in\nBokeh documents.\n\nAlso provides some pre-configured Resources objects.\n\nAttributes:\n CDN : load minified BokehJS from CDN\n INLINE : provide minified BokehJS from library static directory\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport re\nimport json\nfrom os.path import basename, join, relpath\n\n# External imports\nfrom six import string_types\n\n# Bokeh imports\nfrom . import __version__\nfrom .core.templates import JS_RESOURCES, CSS_RESOURCES\nfrom .model import Model\nfrom .settings import settings\n\nfrom .util.paths import bokehjsdir\nfrom .util.session_id import generate_session_id\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\nDEFAULT_SERVER_HOST = \"localhost\"\nDEFAULT_SERVER_PORT = 5006\nDEFAULT_SERVER_HTTP_URL = \"http://%s:%d/\" % (DEFAULT_SERVER_HOST, DEFAULT_SERVER_PORT)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n# __all__ defined at the bottom on the class module\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\nclass BaseResources(object):\n _default_root_dir = \".\"\n _default_root_url = DEFAULT_SERVER_HTTP_URL\n\n def __init__(self, mode='inline', version=None, root_dir=None,\n minified=True, log_level=\"info\", root_url=None,\n path_versioner=None, components=None):\n\n self._components = components\n\n if hasattr(self, '_js_components'):\n self.js_components = self._js_components\n if hasattr(self, '_css_components'):\n self.css_components = self._css_components\n\n self.mode = settings.resources(mode); del mode\n self.root_dir = settings.rootdir(root_dir); del root_dir\n self.version = settings.version(version); del version\n self.minified = settings.minified(minified); del minified\n self.log_level = settings.log_level(log_level); del log_level\n self.path_versioner = path_versioner; del path_versioner\n\n if root_url and not root_url.endswith(\"/\"):\n log.warning(\"root_url should end with a /, adding one\")\n root_url = root_url + \"/\"\n self._root_url = root_url\n if self.mode not in ['inline', 'cdn', 'server', 'server-dev', 'relative', 'relative-dev', 'absolute', 'absolute-dev']:\n raise ValueError(\"wrong value for 'mode' parameter, expected \"\n \"'inline', 'cdn', 'server(-dev)', 'relative(-dev)' or 'absolute(-dev)', got %r\" % self.mode)\n\n if self.root_dir and not self.mode.startswith(\"relative\"):\n raise ValueError(\"setting 'root_dir' makes sense only when 'mode' is set to 'relative'\")\n\n if self.version and not self.mode.startswith('cdn'):\n raise ValueError(\"setting 'version' makes sense only when 'mode' is set to 'cdn'\")\n\n if root_url and not self.mode.startswith('server'):\n raise ValueError(\"setting 'root_url' makes sense only when 'mode' is set to 'server'\")\n\n self.dev = self.mode.endswith('-dev')\n if self.dev:\n self.mode = self.mode[:-4]\n\n self.messages = []\n\n if self.mode == \"cdn\":\n cdn = self._cdn_urls()\n self.messages.extend(cdn['messages'])\n elif self.mode == \"server\":\n server = self._server_urls()\n self.messages.extend(server['messages'])\n\n # Properties --------------------------------------------------------------\n\n @property\n def log_level(self):\n return self._log_level\n\n @log_level.setter\n def log_level(self, level):\n valid_levels = [\n \"trace\", \"debug\", \"info\", \"warn\", \"error\", \"fatal\"\n ]\n if not (level is None or level in valid_levels):\n raise ValueError(\"Unknown log level '{}', valid levels are: {}\".format(level, str(valid_levels)))\n self._log_level = level\n\n @property\n def root_url(self):\n if self._root_url is not None:\n return self._root_url\n else:\n return self._default_root_url\n\n # Public methods ----------------------------------------------------------\n\n def components(self, kind):\n components = self.js_components if kind == 'js' else self.css_components\n if self._components is not None:\n components = [ c for c in components if c in self._components ]\n return components\n\n def _file_paths(self, kind):\n bokehjs_dir = bokehjsdir(self.dev)\n minified = \".min\" if not self.dev and self.minified else \"\"\n files = [ \"%s%s.%s\" % (component, minified, kind) for component in self.components(kind) ]\n paths = [ join(bokehjs_dir, kind, file) for file in files ]\n return paths\n\n def _collect_external_resources(self, resource_attr):\n \"\"\" Collect external resources set on resource_attr attribute of all models.\"\"\"\n\n external_resources = []\n\n for _, cls in sorted(Model.model_class_reverse_map.items(), key=lambda arg: arg[0]):\n external = getattr(cls, resource_attr, None)\n\n if isinstance(external, string_types):\n if external not in external_resources:\n external_resources.append(external)\n elif isinstance(external, list):\n for e in external:\n if e not in external_resources:\n external_resources.append(e)\n\n return external_resources\n\n def _cdn_urls(self):\n return _get_cdn_urls(self.version, self.minified)\n\n def _server_urls(self):\n return _get_server_urls(self.root_url, False if self.dev else self.minified, self.path_versioner)\n\n def _resolve(self, kind):\n paths = self._file_paths(kind)\n files, raw = [], []\n\n if self.mode == \"inline\":\n raw = [ self._inline(path) for path in paths ]\n elif self.mode == \"relative\":\n root_dir = self.root_dir or self._default_root_dir\n files = [ relpath(path, root_dir) for path in paths ]\n elif self.mode == \"absolute\":\n files = list(paths)\n elif self.mode == \"cdn\":\n cdn = self._cdn_urls()\n files = list(cdn['urls'](self.components(kind), kind))\n elif self.mode == \"server\":\n server = self._server_urls()\n files = list(server['urls'](self.components(kind), kind))\n\n return (files, raw)\n\n def _inline(self, path):\n begin = \"/* BEGIN %s */\" % basename(path)\n with open(path, 'rb') as f:\n middle = f.read().decode(\"utf-8\")\n end = \"/* END %s */\" % basename(path)\n return \"%s\\n%s\\n%s\" % (begin, middle, end)\n\nclass JSResources(BaseResources):\n ''' The Resources class encapsulates information relating to loading or embedding Bokeh Javascript.\n\n Args:\n mode (str) : How should Bokeh JS be included in output\n\n See below for descriptions of available modes\n\n version (str, optional) : what version of Bokeh JS to load\n\n Only valid with the ``'cdn'`` mode\n\n root_dir (str, optional) : root directory for loading Bokeh JS assets\n\n Only valid with ``'relative'`` and ``'relative-dev'`` modes\n\n minified (bool, optional) : whether JavaScript should be minified or not (default: True)\n\n root_url (str, optional) : URL and port of Bokeh Server to load resources from (default: None)\n\n If ``None``, absoute URLs based on the default server configuration will\n be generated.\n\n ``root_url`` can also be the empty string, in which case relative URLs,\n e.g., \"static/css/bokeh.min.js\", are generated.\n\n Only valid with ``'server'`` and ``'server-dev'`` modes\n\n The following **mode** values are available for configuring a Resource object:\n\n * ``'inline'`` configure to provide entire Bokeh JS and CSS inline\n * ``'cdn'`` configure to load Bokeh JS and CSS from ``http://cdn.pydata.org``\n * ``'server'`` configure to load from a Bokeh Server\n * ``'server-dev'`` same as ``server`` but supports non-minified assets\n * ``'relative'`` configure to load relative to the given directory\n * ``'relative-dev'`` same as ``relative`` but supports non-minified assets\n * ``'absolute'`` configure to load from the installed Bokeh library static directory\n * ``'absolute-dev'`` same as ``absolute`` but supports non-minified assets\n\n Once configured, a Resource object exposes the following public attributes:\n\n Attributes:\n css_raw : any raw CSS that needs to be places inside ``<style>`` tags\n css_files : URLs of any CSS files that need to be loaded by ``<link>`` tags\n messages : any informational messages concerning this configuration\n\n These attributes are often useful as template parameters when embedding\n Bokeh plots.\n\n '''\n\n _js_components = [\"bokeh\", \"bokeh-widgets\", \"bokeh-tables\", \"bokeh-gl\"]\n\n # Properties --------------------------------------------------------------\n\n @property\n def js_files(self):\n files, _ = self._resolve('js')\n external_resources = self._collect_external_resources('__javascript__')\n return external_resources + files\n\n @property\n def js_raw(self):\n _, raw = self._resolve('js')\n\n if self.log_level is not None:\n raw.append('Bokeh.set_log_level(\"%s\");' % self.log_level)\n\n if self.dev:\n raw.append('Bokeh.settings.dev = true')\n\n return raw\n\n # Public methods ----------------------------------------------------------\n\n def render_js(self):\n return JS_RESOURCES.render(js_raw=self.js_raw, js_files=self.js_files)\n\nclass CSSResources(BaseResources):\n ''' The CSSResources class encapsulates information relating to loading or embedding Bokeh client-side CSS.\n\n Args:\n mode (str) : how should Bokeh CSS be included in output\n\n See below for descriptions of available modes\n\n version (str, optional) : what version of Bokeh CSS to load\n\n Only valid with the ``'cdn'`` mode\n\n root_dir (str, optional) : root directory for loading BokehJS resources\n\n Only valid with ``'relative'`` and ``'relative-dev'`` modes\n\n minified (bool, optional) : whether CSS should be minified or not (default: True)\n\n root_url (str, optional) : URL and port of Bokeh Server to load resources from\n\n Only valid with ``'server'`` and ``'server-dev'`` modes\n\n The following **mode** values are available for configuring a Resource object:\n\n * ``'inline'`` configure to provide entire BokehJS code and CSS inline\n * ``'cdn'`` configure to load Bokeh CSS from ``http://cdn.pydata.org``\n * ``'server'`` configure to load from a Bokeh Server\n * ``'server-dev'`` same as ``server`` but supports non-minified CSS\n * ``'relative'`` configure to load relative to the given directory\n * ``'relative-dev'`` same as ``relative`` but supports non-minified CSS\n * ``'absolute'`` configure to load from the installed Bokeh library static directory\n * ``'absolute-dev'`` same as ``absolute`` but supports non-minified CSS\n\n Once configured, a Resource object exposes the following public attributes:\n\n Attributes:\n css_raw : any raw CSS that needs to be places inside ``<style>`` tags\n css_files : URLs of any CSS files that need to be loaded by ``<link>`` tags\n messages : any informational messages concerning this configuration\n\n These attributes are often useful as template parameters when embedding Bokeh plots.\n\n '''\n\n _css_components = [\"bokeh\", \"bokeh-widgets\", \"bokeh-tables\"]\n\n # Properties --------------------------------------------------------------\n\n @property\n def css_files(self):\n files, _ = self._resolve('css')\n external_resources = self._collect_external_resources(\"__css__\")\n return external_resources + files\n\n @property\n def css_raw(self):\n _, raw = self._resolve('css')\n return raw\n\n @property\n def css_raw_str(self):\n return [ json.dumps(css) for css in self.css_raw ]\n\n # Public methods ----------------------------------------------------------\n\n def render_css(self):\n return CSS_RESOURCES.render(css_raw=self.css_raw, css_files=self.css_files)\n\nclass Resources(JSResources, CSSResources):\n ''' The Resources class encapsulates information relating to loading or\n embedding Bokeh Javascript and CSS.\n\n Args:\n mode (str) : how should Bokeh JS and CSS be included in output\n\n See below for descriptions of available modes\n\n version (str, optional) : what version of Bokeh JS and CSS to load\n\n Only valid with the ``'cdn'`` mode\n\n root_dir (str, optional) : root directory for loading Bokeh JS and CSS assets\n\n Only valid with ``'relative'`` and ``'relative-dev'`` modes\n\n minified (bool, optional) : whether JavaScript and CSS should be minified or not (default: True)\n\n root_url (str, optional) : URL and port of Bokeh Server to load resources from\n\n Only valid with ``'server'`` and ``'server-dev'`` modes\n\n The following **mode** values are available for configuring a Resource object:\n\n * ``'inline'`` configure to provide entire Bokeh JS and CSS inline\n * ``'cdn'`` configure to load Bokeh JS and CSS from ``http://cdn.pydata.org``\n * ``'server'`` configure to load from a Bokeh Server\n * ``'server-dev'`` same as ``server`` but supports non-minified assets\n * ``'relative'`` configure to load relative to the given directory\n * ``'relative-dev'`` same as ``relative`` but supports non-minified assets\n * ``'absolute'`` configure to load from the installed Bokeh library static directory\n * ``'absolute-dev'`` same as ``absolute`` but supports non-minified assets\n\n Once configured, a Resource object exposes the following public attributes:\n\n Attributes:\n js_raw : any raw JS that needs to be placed inside ``<script>`` tags\n css_raw : any raw CSS that needs to be places inside ``<style>`` tags\n js_files : URLs of any JS files that need to be loaded by ``<script>`` tags\n css_files : URLs of any CSS files that need to be loaded by ``<link>`` tags\n messages : any informational messages concerning this configuration\n\n These attributes are often useful as template parameters when embedding\n Bokeh plots.\n\n '''\n\n # Public methods ----------------------------------------------------------\n\n def render(self):\n return \"%s\\n%s\" % (self.render_css(), self.render_js())\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\nclass _SessionCoordinates(object):\n \"\"\" Internal class used to parse kwargs for server URL, app_path, and session_id.\"\"\"\n def __init__(self, **kwargs):\n self._url = kwargs.get('url', DEFAULT_SERVER_HTTP_URL)\n\n if self._url is None:\n raise ValueError(\"url cannot be None\")\n\n if self._url == 'default':\n self._url = DEFAULT_SERVER_HTTP_URL\n\n if self._url.startswith(\"ws\"):\n raise ValueError(\"url should be the http or https URL for the server, not the websocket URL\")\n\n self._url = self._url.rstrip(\"/\")\n\n # we lazy-generate the session_id so we can generate it server-side when appropriate\n self._session_id = kwargs.get('session_id')\n\n # Properties --------------------------------------------------------------\n\n @property\n def url(self):\n return self._url\n\n @property\n def session_id(self):\n \"\"\" Session ID derived from the kwargs provided.\"\"\"\n if self._session_id is None:\n self._session_id = generate_session_id()\n return self._session_id\n\n @property\n def session_id_allowing_none(self):\n \"\"\" Session ID provided in kwargs, keeping it None if it hasn't been generated yet.\n\n The purpose of this is to preserve ``None`` as long as possible... in some cases\n we may never generate the session ID because we generate it on the server.\n \"\"\"\n return self._session_id\n\n_DEV_PAT = re.compile(r\"^(\\d)+\\.(\\d)+\\.(\\d)+(dev|rc)\")\n\ndef _cdn_base_url():\n return \"https://cdn.pydata.org\"\n\n\ndef _get_cdn_urls(version=None, minified=True):\n if version is None:\n if settings.docs_cdn():\n version = settings.docs_cdn()\n else:\n version = __version__.split('-')[0]\n\n # check if we want minified js and css\n _min = \".min\" if minified else \"\"\n\n base_url = _cdn_base_url()\n dev_container = 'bokeh/dev'\n rel_container = 'bokeh/release'\n\n # check the 'dev' fingerprint\n container = dev_container if _DEV_PAT.match(version) else rel_container\n\n if version.endswith(('dev', 'rc')):\n log.debug(\"Getting CDN URL for local dev version will not produce usable URL\")\n\n def mk_url(comp, kind):\n return '%s/%s/%s-%s%s.%s' % (base_url, container, comp, version, _min, kind)\n\n result = {\n 'urls' : lambda components, kind: [ mk_url(component, kind) for component in components ],\n 'messages' : [],\n }\n\n if len(__version__.split('-')) > 1:\n result['messages'].append({\n \"type\" : \"warn\",\n \"text\" : (\"Requesting CDN BokehJS version '%s' from Bokeh development version '%s'. \"\n \"This configuration is unsupported and may not work!\" % (version, __version__))\n })\n\n return result\n\n\ndef _get_server_urls(root_url, minified=True, path_versioner=None):\n _min = \".min\" if minified else \"\"\n\n def mk_url(comp, kind):\n path = \"%s/%s%s.%s\" % (kind, comp, _min, kind)\n if path_versioner is not None:\n path = path_versioner(path)\n return '%sstatic/%s' % (root_url, path)\n\n return {\n 'urls' : lambda components, kind: [ mk_url(component, kind) for component in components ],\n 'messages' : [],\n }\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\nCDN = Resources(mode=\"cdn\")\n\nINLINE = Resources(mode=\"inline\")\n\n__all__ = (\n 'CDN',\n 'INLINE'\n)\n", "path": "bokeh/resources.py" } ]
[ { "content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' The resources module provides the Resources class for easily configuring\nhow BokehJS code and CSS resources should be located, loaded, and embedded in\nBokeh documents.\n\nAlso provides some pre-configured Resources objects.\n\nAttributes:\n CDN : load minified BokehJS from CDN\n INLINE : provide minified BokehJS from library static directory\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport re\nimport json\nfrom os.path import basename, join, relpath\n\n# External imports\nfrom six import string_types\n\n# Bokeh imports\nfrom . import __version__\nfrom .core.templates import JS_RESOURCES, CSS_RESOURCES\nfrom .model import Model\nfrom .settings import settings\n\nfrom .util.paths import bokehjsdir\nfrom .util.session_id import generate_session_id\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\nDEFAULT_SERVER_HOST = \"localhost\"\nDEFAULT_SERVER_PORT = 5006\nDEFAULT_SERVER_HTTP_URL = \"http://%s:%d/\" % (DEFAULT_SERVER_HOST, DEFAULT_SERVER_PORT)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n# __all__ defined at the bottom on the class module\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\nclass BaseResources(object):\n _default_root_dir = \".\"\n _default_root_url = DEFAULT_SERVER_HTTP_URL\n\n def __init__(self, mode='inline', version=None, root_dir=None,\n minified=True, log_level=\"info\", root_url=None,\n path_versioner=None, components=None):\n\n self._components = components\n\n if hasattr(self, '_js_components'):\n self.js_components = self._js_components\n if hasattr(self, '_css_components'):\n self.css_components = self._css_components\n\n self.mode = settings.resources(mode); del mode\n self.root_dir = settings.rootdir(root_dir); del root_dir\n self.version = settings.version(version); del version\n self.minified = settings.minified(minified); del minified\n self.log_level = settings.log_level(log_level); del log_level\n self.path_versioner = path_versioner; del path_versioner\n\n if root_url and not root_url.endswith(\"/\"):\n log.warning(\"root_url should end with a /, adding one\")\n root_url = root_url + \"/\"\n self._root_url = root_url\n if self.mode not in ['inline', 'cdn', 'server', 'server-dev', 'relative', 'relative-dev', 'absolute', 'absolute-dev']:\n raise ValueError(\"wrong value for 'mode' parameter, expected \"\n \"'inline', 'cdn', 'server(-dev)', 'relative(-dev)' or 'absolute(-dev)', got %r\" % self.mode)\n\n if self.root_dir and not self.mode.startswith(\"relative\"):\n raise ValueError(\"setting 'root_dir' makes sense only when 'mode' is set to 'relative'\")\n\n if self.version and not self.mode.startswith('cdn'):\n raise ValueError(\"setting 'version' makes sense only when 'mode' is set to 'cdn'\")\n\n if root_url and not self.mode.startswith('server'):\n raise ValueError(\"setting 'root_url' makes sense only when 'mode' is set to 'server'\")\n\n self.dev = self.mode.endswith('-dev')\n if self.dev:\n self.mode = self.mode[:-4]\n\n self.messages = []\n\n if self.mode == \"cdn\":\n cdn = self._cdn_urls()\n self.messages.extend(cdn['messages'])\n elif self.mode == \"server\":\n server = self._server_urls()\n self.messages.extend(server['messages'])\n\n # Properties --------------------------------------------------------------\n\n @property\n def log_level(self):\n return self._log_level\n\n @log_level.setter\n def log_level(self, level):\n valid_levels = [\n \"trace\", \"debug\", \"info\", \"warn\", \"error\", \"fatal\"\n ]\n if not (level is None or level in valid_levels):\n raise ValueError(\"Unknown log level '{}', valid levels are: {}\".format(level, str(valid_levels)))\n self._log_level = level\n\n @property\n def root_url(self):\n if self._root_url is not None:\n return self._root_url\n else:\n return self._default_root_url\n\n # Public methods ----------------------------------------------------------\n\n def components(self, kind):\n components = self.js_components if kind == 'js' else self.css_components\n if self._components is not None:\n components = [ c for c in components if c in self._components ]\n return components\n\n def _file_paths(self, kind):\n bokehjs_dir = bokehjsdir(self.dev)\n minified = \".min\" if not self.dev and self.minified else \"\"\n files = [ \"%s%s.%s\" % (component, minified, kind) for component in self.components(kind) ]\n paths = [ join(bokehjs_dir, kind, file) for file in files ]\n return paths\n\n def _collect_external_resources(self, resource_attr):\n \"\"\" Collect external resources set on resource_attr attribute of all models.\"\"\"\n\n external_resources = []\n\n for _, cls in sorted(Model.model_class_reverse_map.items(), key=lambda arg: arg[0]):\n external = getattr(cls, resource_attr, None)\n\n if isinstance(external, string_types):\n if external not in external_resources:\n external_resources.append(external)\n elif isinstance(external, list):\n for e in external:\n if e not in external_resources:\n external_resources.append(e)\n\n return external_resources\n\n def _cdn_urls(self):\n return _get_cdn_urls(self.version, self.minified)\n\n def _server_urls(self):\n return _get_server_urls(self.root_url, False if self.dev else self.minified, self.path_versioner)\n\n def _resolve(self, kind):\n paths = self._file_paths(kind)\n files, raw = [], []\n\n if self.mode == \"inline\":\n raw = [ self._inline(path) for path in paths ]\n elif self.mode == \"relative\":\n root_dir = self.root_dir or self._default_root_dir\n files = [ relpath(path, root_dir) for path in paths ]\n elif self.mode == \"absolute\":\n files = list(paths)\n elif self.mode == \"cdn\":\n cdn = self._cdn_urls()\n files = list(cdn['urls'](self.components(kind), kind))\n elif self.mode == \"server\":\n server = self._server_urls()\n files = list(server['urls'](self.components(kind), kind))\n\n return (files, raw)\n\n def _inline(self, path):\n begin = \"/* BEGIN %s */\" % basename(path)\n with open(path, 'rb') as f:\n middle = f.read().decode(\"utf-8\")\n end = \"/* END %s */\" % basename(path)\n return \"%s\\n%s\\n%s\" % (begin, middle, end)\n\nclass JSResources(BaseResources):\n ''' The Resources class encapsulates information relating to loading or embedding Bokeh Javascript.\n\n Args:\n mode (str) : How should Bokeh JS be included in output\n\n See below for descriptions of available modes\n\n version (str, optional) : what version of Bokeh JS to load\n\n Only valid with the ``'cdn'`` mode\n\n root_dir (str, optional) : root directory for loading Bokeh JS assets\n\n Only valid with ``'relative'`` and ``'relative-dev'`` modes\n\n minified (bool, optional) : whether JavaScript should be minified or not (default: True)\n\n root_url (str, optional) : URL and port of Bokeh Server to load resources from (default: None)\n\n If ``None``, absoute URLs based on the default server configuration will\n be generated.\n\n ``root_url`` can also be the empty string, in which case relative URLs,\n e.g., \"static/css/bokeh.min.js\", are generated.\n\n Only valid with ``'server'`` and ``'server-dev'`` modes\n\n The following **mode** values are available for configuring a Resource object:\n\n * ``'inline'`` configure to provide entire Bokeh JS and CSS inline\n * ``'cdn'`` configure to load Bokeh JS and CSS from ``http://cdn.pydata.org``\n * ``'server'`` configure to load from a Bokeh Server\n * ``'server-dev'`` same as ``server`` but supports non-minified assets\n * ``'relative'`` configure to load relative to the given directory\n * ``'relative-dev'`` same as ``relative`` but supports non-minified assets\n * ``'absolute'`` configure to load from the installed Bokeh library static directory\n * ``'absolute-dev'`` same as ``absolute`` but supports non-minified assets\n\n Once configured, a Resource object exposes the following public attributes:\n\n Attributes:\n css_raw : any raw CSS that needs to be places inside ``<style>`` tags\n css_files : URLs of any CSS files that need to be loaded by ``<link>`` tags\n messages : any informational messages concerning this configuration\n\n These attributes are often useful as template parameters when embedding\n Bokeh plots.\n\n '''\n\n _js_components = [\"bokeh\", \"bokeh-widgets\", \"bokeh-tables\", \"bokeh-gl\"]\n\n # Properties --------------------------------------------------------------\n\n @property\n def js_files(self):\n files, _ = self._resolve('js')\n external_resources = self._collect_external_resources('__javascript__')\n return external_resources + files\n\n @property\n def js_raw(self):\n _, raw = self._resolve('js')\n\n if self.log_level is not None:\n raw.append('Bokeh.set_log_level(\"%s\");' % self.log_level)\n\n if self.dev:\n raw.append('Bokeh.settings.dev = true')\n\n return raw\n\n # Public methods ----------------------------------------------------------\n\n def render_js(self):\n return JS_RESOURCES.render(js_raw=self.js_raw, js_files=self.js_files)\n\nclass CSSResources(BaseResources):\n ''' The CSSResources class encapsulates information relating to loading or embedding Bokeh client-side CSS.\n\n Args:\n mode (str) : how should Bokeh CSS be included in output\n\n See below for descriptions of available modes\n\n version (str, optional) : what version of Bokeh CSS to load\n\n Only valid with the ``'cdn'`` mode\n\n root_dir (str, optional) : root directory for loading BokehJS resources\n\n Only valid with ``'relative'`` and ``'relative-dev'`` modes\n\n minified (bool, optional) : whether CSS should be minified or not (default: True)\n\n root_url (str, optional) : URL and port of Bokeh Server to load resources from\n\n Only valid with ``'server'`` and ``'server-dev'`` modes\n\n The following **mode** values are available for configuring a Resource object:\n\n * ``'inline'`` configure to provide entire BokehJS code and CSS inline\n * ``'cdn'`` configure to load Bokeh CSS from ``http://cdn.pydata.org``\n * ``'server'`` configure to load from a Bokeh Server\n * ``'server-dev'`` same as ``server`` but supports non-minified CSS\n * ``'relative'`` configure to load relative to the given directory\n * ``'relative-dev'`` same as ``relative`` but supports non-minified CSS\n * ``'absolute'`` configure to load from the installed Bokeh library static directory\n * ``'absolute-dev'`` same as ``absolute`` but supports non-minified CSS\n\n Once configured, a Resource object exposes the following public attributes:\n\n Attributes:\n css_raw : any raw CSS that needs to be places inside ``<style>`` tags\n css_files : URLs of any CSS files that need to be loaded by ``<link>`` tags\n messages : any informational messages concerning this configuration\n\n These attributes are often useful as template parameters when embedding Bokeh plots.\n\n '''\n\n _css_components = [\"bokeh\", \"bokeh-widgets\", \"bokeh-tables\"]\n\n # Properties --------------------------------------------------------------\n\n @property\n def css_files(self):\n files, _ = self._resolve('css')\n external_resources = self._collect_external_resources(\"__css__\")\n return external_resources + files\n\n @property\n def css_raw(self):\n _, raw = self._resolve('css')\n return raw\n\n @property\n def css_raw_str(self):\n return [ json.dumps(css) for css in self.css_raw ]\n\n # Public methods ----------------------------------------------------------\n\n def render_css(self):\n return CSS_RESOURCES.render(css_raw=self.css_raw, css_files=self.css_files)\n\nclass Resources(JSResources, CSSResources):\n ''' The Resources class encapsulates information relating to loading or\n embedding Bokeh Javascript and CSS.\n\n Args:\n mode (str) : how should Bokeh JS and CSS be included in output\n\n See below for descriptions of available modes\n\n version (str, optional) : what version of Bokeh JS and CSS to load\n\n Only valid with the ``'cdn'`` mode\n\n root_dir (str, optional) : root directory for loading Bokeh JS and CSS assets\n\n Only valid with ``'relative'`` and ``'relative-dev'`` modes\n\n minified (bool, optional) : whether JavaScript and CSS should be minified or not (default: True)\n\n root_url (str, optional) : URL and port of Bokeh Server to load resources from\n\n Only valid with ``'server'`` and ``'server-dev'`` modes\n\n The following **mode** values are available for configuring a Resource object:\n\n * ``'inline'`` configure to provide entire Bokeh JS and CSS inline\n * ``'cdn'`` configure to load Bokeh JS and CSS from ``http://cdn.pydata.org``\n * ``'server'`` configure to load from a Bokeh Server\n * ``'server-dev'`` same as ``server`` but supports non-minified assets\n * ``'relative'`` configure to load relative to the given directory\n * ``'relative-dev'`` same as ``relative`` but supports non-minified assets\n * ``'absolute'`` configure to load from the installed Bokeh library static directory\n * ``'absolute-dev'`` same as ``absolute`` but supports non-minified assets\n\n Once configured, a Resource object exposes the following public attributes:\n\n Attributes:\n js_raw : any raw JS that needs to be placed inside ``<script>`` tags\n css_raw : any raw CSS that needs to be places inside ``<style>`` tags\n js_files : URLs of any JS files that need to be loaded by ``<script>`` tags\n css_files : URLs of any CSS files that need to be loaded by ``<link>`` tags\n messages : any informational messages concerning this configuration\n\n These attributes are often useful as template parameters when embedding\n Bokeh plots.\n\n '''\n\n # Public methods ----------------------------------------------------------\n\n def render(self):\n return \"%s\\n%s\" % (self.render_css(), self.render_js())\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\nclass _SessionCoordinates(object):\n \"\"\" Internal class used to parse kwargs for server URL, app_path, and session_id.\"\"\"\n def __init__(self, **kwargs):\n self._url = kwargs.get('url', DEFAULT_SERVER_HTTP_URL)\n\n if self._url is None:\n raise ValueError(\"url cannot be None\")\n\n if self._url == 'default':\n self._url = DEFAULT_SERVER_HTTP_URL\n\n if self._url.startswith(\"ws\"):\n raise ValueError(\"url should be the http or https URL for the server, not the websocket URL\")\n\n self._url = self._url.rstrip(\"/\")\n\n # we lazy-generate the session_id so we can generate it server-side when appropriate\n self._session_id = kwargs.get('session_id')\n\n # Properties --------------------------------------------------------------\n\n @property\n def url(self):\n return self._url\n\n @property\n def session_id(self):\n \"\"\" Session ID derived from the kwargs provided.\"\"\"\n if self._session_id is None:\n self._session_id = generate_session_id()\n return self._session_id\n\n @property\n def session_id_allowing_none(self):\n \"\"\" Session ID provided in kwargs, keeping it None if it hasn't been generated yet.\n\n The purpose of this is to preserve ``None`` as long as possible... in some cases\n we may never generate the session ID because we generate it on the server.\n \"\"\"\n return self._session_id\n\n_DEV_PAT = re.compile(r\"^(\\d)+\\.(\\d)+\\.(\\d)+(dev|rc)\")\n\ndef _cdn_base_url():\n return \"https://cdn.pydata.org\"\n\n\ndef _get_cdn_urls(version=None, minified=True):\n if version is None:\n if settings.docs_cdn():\n version = settings.docs_cdn()\n else:\n version = __version__.split('-')[0]\n\n # check if we want minified js and css\n _min = \".min\" if minified else \"\"\n\n base_url = _cdn_base_url()\n dev_container = 'bokeh/dev'\n rel_container = 'bokeh/release'\n\n # check the 'dev' fingerprint\n container = dev_container if _DEV_PAT.match(version) else rel_container\n\n if version.endswith(('dev', 'rc')):\n log.debug(\"Getting CDN URL for local dev version will not produce usable URL\")\n\n def mk_url(comp, kind):\n return '%s/%s/%s-%s%s.%s' % (base_url, container, comp, version, _min, kind)\n\n result = {\n 'urls' : lambda components, kind: [ mk_url(component, kind) for component in components ],\n 'messages' : [],\n }\n\n if len(__version__.split('-')) > 1:\n result['messages'].append({\n \"type\" : \"warn\",\n \"text\" : (\"Requesting CDN BokehJS version '%s' from Bokeh development version '%s'. \"\n \"This configuration is unsupported and may not work!\" % (version, __version__))\n })\n\n return result\n\n\ndef _get_server_urls(root_url, minified=True, path_versioner=None):\n _min = \".min\" if minified else \"\"\n\n def mk_url(comp, kind):\n path = \"%s/%s%s.%s\" % (kind, comp, _min, kind)\n if path_versioner is not None:\n path = path_versioner(path)\n return '%sstatic/%s' % (root_url, path)\n\n return {\n 'urls' : lambda components, kind: [ mk_url(component, kind) for component in components ],\n 'messages' : [],\n }\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\nCDN = Resources(mode=\"cdn\")\n\nINLINE = Resources(mode=\"inline\")\n\n__all__ = (\n 'CDN',\n 'INLINE',\n 'Resources',\n 'JSResources',\n 'CSSResources',\n)\n", "path": "bokeh/resources.py" } ]
diff --git a/bokeh/resources.py b/bokeh/resources.py index 0058906919b..ae55cbc80ac 100644 --- a/bokeh/resources.py +++ b/bokeh/resources.py @@ -516,5 +516,8 @@ def mk_url(comp, kind): __all__ = ( 'CDN', - 'INLINE' + 'INLINE', + 'Resources', + 'JSResources', + 'CSSResources', ) diff --git a/sphinx/source/docs/dev_guide/env_vars.rst b/sphinx/source/docs/dev_guide/env_vars.rst index 0a11d70f794..de09138e4a0 100644 --- a/sphinx/source/docs/dev_guide/env_vars.rst +++ b/sphinx/source/docs/dev_guide/env_vars.rst @@ -34,10 +34,14 @@ This is a meta variable equivalent to the following environment variables: - ``BOKEH_PRETTY=true`` - ``BOKEH_PY_LOG_LEVEL=debug`` - ``BOKEH_RESOURCES=absolute-dev`` -- ``BOKEH_SIMPLE_IDS=true`` Accepted values are ``yes``/``no``, ``true``/``false`` or ``0``/``1``. +.. note:: + When running server examples, the ``BOKEH_RESOURCES`` setting that + ``BOKEH_DEV`` sets will cause the page to stop rendering. So you + will need to manually also set ``BOKEH_RESOURCES=server``. + ``BOKEH_DOCS_CDN`` -------------------- What version of BokehJS to use when building sphinx docs.
feast-dev__feast-3755
Redis version in setup.py contains fixable vulnerabilities ## Expected Behaviour ## Current Behaviour: trivvy scanning of our feature server container is failing due to the fixed version of redis in setup.py (4.2.2) - it looks this version hasn't been updated in a year. ![Screenshot 2023-08-31 at 10 41 11](https://github.com/feast-dev/feast/assets/14976256/c3e2f538-e959-41bb-b975-277ab7c14bab) ## Steps to reproduce ### Specifications - Version: feast[redis]==0.31.1 - Platform: - Subsystem: ## Possible Solution Bump to latest redis or lowest fixed version
[ { "content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom distutils.cmd import Command\nfrom pathlib import Path\n\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.build_ext import build_ext as _build_ext\n from setuptools.command.build_py import build_py\n from setuptools.command.develop import develop\n from setuptools.command.install import install\n\nexcept ImportError:\n from distutils.command.build_ext import build_ext as _build_ext\n from distutils.command.build_py import build_py\n from distutils.core import setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.8.0\"\n\nREQUIRED = [\n \"click>=7.0.0,<9.0.0\",\n \"colorama>=0.3.9,<1\",\n \"dill~=0.3.0\",\n \"fastavro>=1.1.0,<2\",\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"grpcio-reflection>=1.56.2,<2\",\n \"grpcio-health-checking>=1.56.2,<2\",\n \"mypy-protobuf==3.1\",\n \"Jinja2>=2,<4\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy>=1.22,<3\",\n \"pandas>=1.4.3,<2\",\n # For some reason pandavro higher than 1.5.* only support pandas less than 1.3.\n \"pandavro~=1.5.0\",\n # Higher than 4.23.4 seems to cause a seg fault\n \"protobuf<4.23.4,>3.20\",\n \"proto-plus>=1.20.0,<2\",\n \"pyarrow>=4,<12\",\n \"pydantic>=1,<2\",\n \"pygments>=2.12.0,<3\",\n \"PyYAML>=5.4.0,<7\",\n \"requests\",\n \"SQLAlchemy[mypy]>1,<2\",\n \"tabulate>=0.8.0,<1\",\n \"tenacity>=7,<9\",\n \"toml>=0.10.0,<1\",\n \"tqdm>=4,<5\",\n \"typeguard==2.13.3\",\n \"fastapi>=0.68.0,<0.100\",\n \"uvicorn[standard]>=0.14.0,<1\",\n \"gunicorn\",\n \"dask>=2021.1.0\",\n \"bowler\", # Needed for automatic repo upgrades\n # FastAPI does not correctly pull starlette dependency on httpx see thread(https://github.com/tiangolo/fastapi/issues/5656).\n \"httpx>=0.23.3\",\n]\n\nGCP_REQUIRED = [\n \"google-api-core>=1.23.0,<3\",\n \"googleapis-common-protos>=1.52.0,<2\",\n \"google-cloud-bigquery[pandas]>=2,<4\",\n \"google-cloud-bigquery-storage >= 2.0.0,<3\",\n \"google-cloud-datastore>=2.1.0,<3\",\n \"google-cloud-storage>=1.34.0,<3\",\n \"google-cloud-bigtable>=2.11.0,<3\",\n]\n\nREDIS_REQUIRED = [\n \"redis==4.2.2\",\n \"hiredis>=2.0.0,<3\",\n]\n\nAWS_REQUIRED = [\"boto3>=1.17.0,<2\", \"docker>=5.0.2\"]\n\nBYTEWAX_REQUIRED = [\"bytewax==0.15.1\", \"docker>=5.0.2\", \"kubernetes<=20.13.0\"]\n\nSNOWFLAKE_REQUIRED = [\n \"snowflake-connector-python[pandas]>=3,<4\",\n]\n\nSPARK_REQUIRED = [\n \"pyspark>=3.0.0,<4\",\n]\n\nTRINO_REQUIRED = [\"trino>=0.305.0,<0.400.0\", \"regex\"]\n\nPOSTGRES_REQUIRED = [\n \"psycopg2-binary>=2.8.3,<3\",\n]\n\nMYSQL_REQUIRED = [\"mysqlclient\", \"pymysql\", \"types-PyMySQL\"]\n\nHBASE_REQUIRED = [\n \"happybase>=1.2.0,<3\",\n]\n\nCASSANDRA_REQUIRED = [\n \"cassandra-driver>=3.24.0,<4\",\n]\n\nGE_REQUIRED = [\"great_expectations>=0.15.41,<0.16.0\"]\n\nAZURE_REQUIRED = [\n \"azure-storage-blob>=0.37.0\",\n \"azure-identity>=1.6.1\",\n \"SQLAlchemy>=1.4.19\",\n \"pyodbc>=4.0.30\",\n \"pymssql\",\n]\n\nROCKSET_REQUIRED = [\n \"rockset>=1.0.3\",\n]\n\nHAZELCAST_REQUIRED = [\n \"hazelcast-python-client>=5.1\",\n]\n\nCI_REQUIRED = (\n [\n \"build\",\n \"virtualenv==20.23.0\",\n \"cryptography>=35.0,<42\",\n \"flake8>=6.0.0,<6.1.0\",\n \"black>=22.6.0,<23\",\n \"isort>=5,<6\",\n \"grpcio-testing>=1.56.2,<2\",\n \"minio==7.1.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy>=0.981,<0.990\",\n \"avro==1.10.0\",\n \"gcsfs>=0.4.0,<=2022.01.0\",\n \"urllib3>=1.25.4,<2\",\n \"psutil==5.9.0\",\n \"py>=1.11.0\", # https://github.com/pytest-dev/pytest/issues/10420\n \"pytest>=6.0.0,<8\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-benchmark>=3.4.1,<4\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering~=0.6.0\",\n \"pytest-mock==1.10.4\",\n \"Sphinx>4.0.0,<7\",\n \"testcontainers>=3.5,<4\",\n \"adlfs==0.5.9\",\n \"firebase-admin>=5.2.0,<6\",\n \"pre-commit<3.3.2\",\n \"assertpy==1.1\",\n \"pip-tools\",\n \"pybindgen\",\n \"types-protobuf~=3.19.22\",\n \"types-python-dateutil\",\n \"types-pytz\",\n \"types-PyYAML\",\n \"types-redis\",\n \"types-requests\",\n \"types-setuptools\",\n \"types-tabulate\",\n \"virtualenv<20.24.2\"\n ]\n + GCP_REQUIRED\n + REDIS_REQUIRED\n + AWS_REQUIRED\n + BYTEWAX_REQUIRED\n + SNOWFLAKE_REQUIRED\n + SPARK_REQUIRED\n + POSTGRES_REQUIRED\n + MYSQL_REQUIRED\n + TRINO_REQUIRED\n + GE_REQUIRED\n + HBASE_REQUIRED\n + CASSANDRA_REQUIRED\n + AZURE_REQUIRED\n + ROCKSET_REQUIRED\n + HAZELCAST_REQUIRED\n)\n\n\n# rtd builds fail because of mysql not being installed in their environment.\n# We can add mysql there, but it's not strictly needed. This will be faster for builds.\nDOCS_REQUIRED = CI_REQUIRED.copy()\nfor _r in MYSQL_REQUIRED:\n DOCS_REQUIRED.remove(_r)\n\nDEV_REQUIRED = [\"mypy-protobuf==3.1\", \"grpcio-testing~=1.0\"] + CI_REQUIRED\n\n# Get git repo root directory\nrepo_root = str(pathlib.Path(__file__).resolve().parent)\n\n# README file from Feast repo root directory\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\", encoding=\"utf8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n# Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)\nif shutil.which(\"git\"):\n use_scm_version = {\"root\": \".\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX}\nelse:\n use_scm_version = None\n\nPROTO_SUBDIRS = [\"core\", \"serving\", \"types\", \"storage\"]\nPYTHON_CODE_PREFIX = \"sdk/python\"\n\n\nclass BuildPythonProtosCommand(Command):\n description = \"Builds the proto files into Python files.\"\n user_options = [\n (\"inplace\", \"i\", \"Write generated proto files to source directory.\"),\n ]\n\n def initialize_options(self):\n self.python_protoc = [\n sys.executable,\n \"-m\",\n \"grpc_tools.protoc\",\n ] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.sub_folders = PROTO_SUBDIRS\n self.build_lib = None\n self.inplace = 0\n\n def finalize_options(self):\n self.set_undefined_options(\"build\", (\"build_lib\", \"build_lib\"))\n\n @property\n def python_folder(self):\n if self.inplace:\n return os.path.join(\n os.path.dirname(__file__) or os.getcwd(), \"sdk/python/feast/protos\"\n )\n\n return os.path.join(self.build_lib, \"feast/protos\")\n\n def _generate_python_protos(self, path: str):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n Path(self.python_folder).mkdir(parents=True, exist_ok=True)\n subprocess.check_call(\n self.python_protoc\n + [\n \"-I\",\n self.proto_folder,\n \"--python_out\",\n self.python_folder,\n \"--grpc_python_out\",\n self.python_folder,\n \"--mypy_out\",\n self.python_folder,\n ]\n + proto_files\n )\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_python_protos(f\"feast/{sub_folder}/*.proto\")\n # We need the __init__ files for each of the generated subdirs\n # so that they are regular packages, and don't need the `--namespace-packages` flags\n # when being typechecked using mypy.\n with open(f\"{self.python_folder}/feast/{sub_folder}/__init__.py\", \"w\"):\n pass\n\n with open(f\"{self.python_folder}/__init__.py\", \"w\"):\n pass\n with open(f\"{self.python_folder}/feast/__init__.py\", \"w\"):\n pass\n\n for path in Path(self.python_folder).rglob(\"*.py\"):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, \"r\") as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n f\"from feast.{folder}\", f\"from feast.protos.feast.{folder}\"\n )\n\n # Write the file out again\n with open(path, \"w\") as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command(\"build_python_protos\")\n\n self.run_command(\"build_ext\")\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.reinitialize_command(\"build_python_protos\", inplace=1)\n self.run_command(\"build_python_protos\")\n\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(\n where=PYTHON_CODE_PREFIX, exclude=(\"java\", \"infra\", \"sdk/python/tests\", \"ui\")\n ),\n package_dir={\"\": PYTHON_CODE_PREFIX},\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": DEV_REQUIRED,\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"bytewax\": BYTEWAX_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n \"snowflake\": SNOWFLAKE_REQUIRED,\n \"spark\": SPARK_REQUIRED,\n \"trino\": TRINO_REQUIRED,\n \"postgres\": POSTGRES_REQUIRED,\n \"azure\": AZURE_REQUIRED,\n \"mysql\": MYSQL_REQUIRED,\n \"ge\": GE_REQUIRED,\n \"hbase\": HBASE_REQUIRED,\n \"docs\": DOCS_REQUIRED,\n \"cassandra\": CASSANDRA_REQUIRED,\n \"hazelcast\": HAZELCAST_REQUIRED,\n \"rockset\": ROCKSET_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version=use_scm_version,\n setup_requires=[\n \"setuptools_scm\",\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"mypy-protobuf==3.1\",\n \"pybindgen==0.22.0\",\n ],\n cmdclass={\n \"build_python_protos\": BuildPythonProtosCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "setup.py" } ]
[ { "content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom distutils.cmd import Command\nfrom pathlib import Path\n\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.build_ext import build_ext as _build_ext\n from setuptools.command.build_py import build_py\n from setuptools.command.develop import develop\n from setuptools.command.install import install\n\nexcept ImportError:\n from distutils.command.build_ext import build_ext as _build_ext\n from distutils.command.build_py import build_py\n from distutils.core import setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.8.0\"\n\nREQUIRED = [\n \"click>=7.0.0,<9.0.0\",\n \"colorama>=0.3.9,<1\",\n \"dill~=0.3.0\",\n \"fastavro>=1.1.0,<2\",\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"grpcio-reflection>=1.56.2,<2\",\n \"grpcio-health-checking>=1.56.2,<2\",\n \"mypy-protobuf==3.1\",\n \"Jinja2>=2,<4\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy>=1.22,<3\",\n \"pandas>=1.4.3,<2\",\n # For some reason pandavro higher than 1.5.* only support pandas less than 1.3.\n \"pandavro~=1.5.0\",\n # Higher than 4.23.4 seems to cause a seg fault\n \"protobuf<4.23.4,>3.20\",\n \"proto-plus>=1.20.0,<2\",\n \"pyarrow>=4,<12\",\n \"pydantic>=1,<2\",\n \"pygments>=2.12.0,<3\",\n \"PyYAML>=5.4.0,<7\",\n \"requests\",\n \"SQLAlchemy[mypy]>1,<2\",\n \"tabulate>=0.8.0,<1\",\n \"tenacity>=7,<9\",\n \"toml>=0.10.0,<1\",\n \"tqdm>=4,<5\",\n \"typeguard==2.13.3\",\n \"fastapi>=0.68.0,<0.100\",\n \"uvicorn[standard]>=0.14.0,<1\",\n \"gunicorn\",\n \"dask>=2021.1.0\",\n \"bowler\", # Needed for automatic repo upgrades\n # FastAPI does not correctly pull starlette dependency on httpx see thread(https://github.com/tiangolo/fastapi/issues/5656).\n \"httpx>=0.23.3\",\n]\n\nGCP_REQUIRED = [\n \"google-api-core>=1.23.0,<3\",\n \"googleapis-common-protos>=1.52.0,<2\",\n \"google-cloud-bigquery[pandas]>=2,<4\",\n \"google-cloud-bigquery-storage >= 2.0.0,<3\",\n \"google-cloud-datastore>=2.1.0,<3\",\n \"google-cloud-storage>=1.34.0,<3\",\n \"google-cloud-bigtable>=2.11.0,<3\",\n]\n\nREDIS_REQUIRED = [\n \"redis>=4.2.2,<5\",\n \"hiredis>=2.0.0,<3\",\n]\n\nAWS_REQUIRED = [\"boto3>=1.17.0,<2\", \"docker>=5.0.2\"]\n\nBYTEWAX_REQUIRED = [\"bytewax==0.15.1\", \"docker>=5.0.2\", \"kubernetes<=20.13.0\"]\n\nSNOWFLAKE_REQUIRED = [\n \"snowflake-connector-python[pandas]>=3,<4\",\n]\n\nSPARK_REQUIRED = [\n \"pyspark>=3.0.0,<4\",\n]\n\nTRINO_REQUIRED = [\"trino>=0.305.0,<0.400.0\", \"regex\"]\n\nPOSTGRES_REQUIRED = [\n \"psycopg2-binary>=2.8.3,<3\",\n]\n\nMYSQL_REQUIRED = [\"mysqlclient\", \"pymysql\", \"types-PyMySQL\"]\n\nHBASE_REQUIRED = [\n \"happybase>=1.2.0,<3\",\n]\n\nCASSANDRA_REQUIRED = [\n \"cassandra-driver>=3.24.0,<4\",\n]\n\nGE_REQUIRED = [\"great_expectations>=0.15.41,<0.16.0\"]\n\nAZURE_REQUIRED = [\n \"azure-storage-blob>=0.37.0\",\n \"azure-identity>=1.6.1\",\n \"SQLAlchemy>=1.4.19\",\n \"pyodbc>=4.0.30\",\n \"pymssql\",\n]\n\nROCKSET_REQUIRED = [\n \"rockset>=1.0.3\",\n]\n\nHAZELCAST_REQUIRED = [\n \"hazelcast-python-client>=5.1\",\n]\n\nCI_REQUIRED = (\n [\n \"build\",\n \"virtualenv==20.23.0\",\n \"cryptography>=35.0,<42\",\n \"flake8>=6.0.0,<6.1.0\",\n \"black>=22.6.0,<23\",\n \"isort>=5,<6\",\n \"grpcio-testing>=1.56.2,<2\",\n \"minio==7.1.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy>=0.981,<0.990\",\n \"avro==1.10.0\",\n \"gcsfs>=0.4.0,<=2022.01.0\",\n \"urllib3>=1.25.4,<2\",\n \"psutil==5.9.0\",\n \"py>=1.11.0\", # https://github.com/pytest-dev/pytest/issues/10420\n \"pytest>=6.0.0,<8\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-benchmark>=3.4.1,<4\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering~=0.6.0\",\n \"pytest-mock==1.10.4\",\n \"Sphinx>4.0.0,<7\",\n \"testcontainers>=3.5,<4\",\n \"adlfs==0.5.9\",\n \"firebase-admin>=5.2.0,<6\",\n \"pre-commit<3.3.2\",\n \"assertpy==1.1\",\n \"pip-tools\",\n \"pybindgen\",\n \"types-protobuf~=3.19.22\",\n \"types-python-dateutil\",\n \"types-pytz\",\n \"types-PyYAML\",\n \"types-redis\",\n \"types-requests\",\n \"types-setuptools\",\n \"types-tabulate\",\n \"virtualenv<20.24.2\"\n ]\n + GCP_REQUIRED\n + REDIS_REQUIRED\n + AWS_REQUIRED\n + BYTEWAX_REQUIRED\n + SNOWFLAKE_REQUIRED\n + SPARK_REQUIRED\n + POSTGRES_REQUIRED\n + MYSQL_REQUIRED\n + TRINO_REQUIRED\n + GE_REQUIRED\n + HBASE_REQUIRED\n + CASSANDRA_REQUIRED\n + AZURE_REQUIRED\n + ROCKSET_REQUIRED\n + HAZELCAST_REQUIRED\n)\n\n\n# rtd builds fail because of mysql not being installed in their environment.\n# We can add mysql there, but it's not strictly needed. This will be faster for builds.\nDOCS_REQUIRED = CI_REQUIRED.copy()\nfor _r in MYSQL_REQUIRED:\n DOCS_REQUIRED.remove(_r)\n\nDEV_REQUIRED = [\"mypy-protobuf==3.1\", \"grpcio-testing~=1.0\"] + CI_REQUIRED\n\n# Get git repo root directory\nrepo_root = str(pathlib.Path(__file__).resolve().parent)\n\n# README file from Feast repo root directory\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\", encoding=\"utf8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n# Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)\nif shutil.which(\"git\"):\n use_scm_version = {\"root\": \".\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX}\nelse:\n use_scm_version = None\n\nPROTO_SUBDIRS = [\"core\", \"serving\", \"types\", \"storage\"]\nPYTHON_CODE_PREFIX = \"sdk/python\"\n\n\nclass BuildPythonProtosCommand(Command):\n description = \"Builds the proto files into Python files.\"\n user_options = [\n (\"inplace\", \"i\", \"Write generated proto files to source directory.\"),\n ]\n\n def initialize_options(self):\n self.python_protoc = [\n sys.executable,\n \"-m\",\n \"grpc_tools.protoc\",\n ] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.sub_folders = PROTO_SUBDIRS\n self.build_lib = None\n self.inplace = 0\n\n def finalize_options(self):\n self.set_undefined_options(\"build\", (\"build_lib\", \"build_lib\"))\n\n @property\n def python_folder(self):\n if self.inplace:\n return os.path.join(\n os.path.dirname(__file__) or os.getcwd(), \"sdk/python/feast/protos\"\n )\n\n return os.path.join(self.build_lib, \"feast/protos\")\n\n def _generate_python_protos(self, path: str):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n Path(self.python_folder).mkdir(parents=True, exist_ok=True)\n subprocess.check_call(\n self.python_protoc\n + [\n \"-I\",\n self.proto_folder,\n \"--python_out\",\n self.python_folder,\n \"--grpc_python_out\",\n self.python_folder,\n \"--mypy_out\",\n self.python_folder,\n ]\n + proto_files\n )\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_python_protos(f\"feast/{sub_folder}/*.proto\")\n # We need the __init__ files for each of the generated subdirs\n # so that they are regular packages, and don't need the `--namespace-packages` flags\n # when being typechecked using mypy.\n with open(f\"{self.python_folder}/feast/{sub_folder}/__init__.py\", \"w\"):\n pass\n\n with open(f\"{self.python_folder}/__init__.py\", \"w\"):\n pass\n with open(f\"{self.python_folder}/feast/__init__.py\", \"w\"):\n pass\n\n for path in Path(self.python_folder).rglob(\"*.py\"):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, \"r\") as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n f\"from feast.{folder}\", f\"from feast.protos.feast.{folder}\"\n )\n\n # Write the file out again\n with open(path, \"w\") as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command(\"build_python_protos\")\n\n self.run_command(\"build_ext\")\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.reinitialize_command(\"build_python_protos\", inplace=1)\n self.run_command(\"build_python_protos\")\n\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(\n where=PYTHON_CODE_PREFIX, exclude=(\"java\", \"infra\", \"sdk/python/tests\", \"ui\")\n ),\n package_dir={\"\": PYTHON_CODE_PREFIX},\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": DEV_REQUIRED,\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"bytewax\": BYTEWAX_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n \"snowflake\": SNOWFLAKE_REQUIRED,\n \"spark\": SPARK_REQUIRED,\n \"trino\": TRINO_REQUIRED,\n \"postgres\": POSTGRES_REQUIRED,\n \"azure\": AZURE_REQUIRED,\n \"mysql\": MYSQL_REQUIRED,\n \"ge\": GE_REQUIRED,\n \"hbase\": HBASE_REQUIRED,\n \"docs\": DOCS_REQUIRED,\n \"cassandra\": CASSANDRA_REQUIRED,\n \"hazelcast\": HAZELCAST_REQUIRED,\n \"rockset\": ROCKSET_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version=use_scm_version,\n setup_requires=[\n \"setuptools_scm\",\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"mypy-protobuf==3.1\",\n \"pybindgen==0.22.0\",\n ],\n cmdclass={\n \"build_python_protos\": BuildPythonProtosCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "setup.py" } ]
diff --git a/sdk/python/requirements/py3.10-ci-requirements.txt b/sdk/python/requirements/py3.10-ci-requirements.txt index cb72fdaa350..a59553b4ac8 100644 --- a/sdk/python/requirements/py3.10-ci-requirements.txt +++ b/sdk/python/requirements/py3.10-ci-requirements.txt @@ -180,8 +180,6 @@ decorator==5.1.1 # ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via redis deprecation==2.1.0 # via testcontainers dill==0.3.7 @@ -577,7 +575,6 @@ packaging==23.1 # marshmallow # nbconvert # pytest - # redis # snowflake-connector-python # sphinx pandas==1.5.3 @@ -784,7 +781,7 @@ pyzmq==25.1.1 # ipykernel # jupyter-client # jupyter-server -redis==4.2.2 +redis==4.6.0 # via feast (setup.py) referencing==0.30.2 # via @@ -1065,9 +1062,7 @@ wheel==0.41.2 widgetsnbextension==4.0.8 # via ipywidgets wrapt==1.15.0 - # via - # deprecated - # testcontainers + # via testcontainers xmltodict==0.13.0 # via moto yarl==1.9.2 diff --git a/sdk/python/requirements/py3.8-ci-requirements.txt b/sdk/python/requirements/py3.8-ci-requirements.txt index 9dfefc21081..b24172e890f 100644 --- a/sdk/python/requirements/py3.8-ci-requirements.txt +++ b/sdk/python/requirements/py3.8-ci-requirements.txt @@ -184,8 +184,6 @@ decorator==5.1.1 # ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via redis deprecation==2.1.0 # via testcontainers dill==0.3.7 @@ -592,7 +590,6 @@ packaging==23.1 # marshmallow # nbconvert # pytest - # redis # snowflake-connector-python # sphinx pandas==1.5.3 @@ -802,7 +799,7 @@ pyzmq==25.1.1 # ipykernel # jupyter-client # jupyter-server -redis==4.2.2 +redis==4.6.0 # via feast (setup.py) referencing==0.30.2 # via @@ -1082,9 +1079,7 @@ wheel==0.41.2 widgetsnbextension==4.0.8 # via ipywidgets wrapt==1.15.0 - # via - # deprecated - # testcontainers + # via testcontainers xmltodict==0.13.0 # via moto yarl==1.9.2 diff --git a/sdk/python/requirements/py3.9-ci-requirements.txt b/sdk/python/requirements/py3.9-ci-requirements.txt index 3992303d00e..ad19f9e8bde 100644 --- a/sdk/python/requirements/py3.9-ci-requirements.txt +++ b/sdk/python/requirements/py3.9-ci-requirements.txt @@ -180,8 +180,6 @@ decorator==5.1.1 # ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via redis deprecation==2.1.0 # via testcontainers dill==0.3.7 @@ -527,7 +525,7 @@ mypy-extensions==1.0.0 # via # black # mypy -mypy-protobuf==3.1 +mypy-protobuf==3.1.0 # via feast (setup.py) mysqlclient==2.2.0 # via feast (setup.py) @@ -584,7 +582,6 @@ packaging==23.1 # marshmallow # nbconvert # pytest - # redis # snowflake-connector-python # sphinx pandas==1.5.3 @@ -791,7 +788,7 @@ pyzmq==25.1.1 # ipykernel # jupyter-client # jupyter-server -redis==4.2.2 +redis==4.6.0 # via feast (setup.py) referencing==0.30.2 # via @@ -1077,9 +1074,7 @@ wheel==0.41.2 widgetsnbextension==4.0.8 # via ipywidgets wrapt==1.15.0 - # via - # deprecated - # testcontainers + # via testcontainers xmltodict==0.13.0 # via moto yarl==1.9.2 diff --git a/setup.py b/setup.py index 699d394940e..573ab54d512 100644 --- a/setup.py +++ b/setup.py @@ -92,7 +92,7 @@ ] REDIS_REQUIRED = [ - "redis==4.2.2", + "redis>=4.2.2,<5", "hiredis>=2.0.0,<3", ]
cookiecutter__cookiecutter-1712
CI/CD: Verify .pre-commit-config.yaml use latest hooks versions
[ { "content": "#!/usr/bin/env python\n\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.1.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.1.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py" } ]
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7be7e3bb8..96d89ad9a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ --- repos: - repo: https://github.com/PyCQA/doc8 - rev: 0.8.1 + rev: 0.11.2 hooks: - id: doc8 name: doc8 @@ -17,17 +17,26 @@ repos: language_version: python3 exclude: ^(tests\/hooks-abort-render\/hooks|docs\/HelloCookieCutter1) - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.4.0 + rev: v4.2.0 hooks: - id: trailing-whitespace args: [--markdown-linebreak-ext=md] - id: mixed-line-ending - - id: check-byte-order-marker + - id: fix-byte-order-marker - id: check-executables-have-shebangs + - id: check-shebang-scripts-are-executable - id: check-merge-conflict - id: check-symlinks + - id: check-case-conflict + - id: check-docstring-first + - id: check-json + exclude: "invalid-syntax.json|tests/fake-repo-bad-json/cookiecutter.json|tests/fake-repo/cookiecutter.json" + - id: check-toml + - id: check-xml + - id: check-yaml + exclude: "not_rendered.yml|invalid-config.yaml" - repo: https://gitlab.com/pycqa/flake8 - rev: 3.7.9 + rev: 4.0.1 hooks: - id: flake8 additional_dependencies: @@ -35,7 +44,7 @@ repos: - flake8-black - flake8-docstrings - repo: https://github.com/PyCQA/bandit - rev: 1.6.0 + rev: 1.7.4 hooks: - id: bandit args: [--ini, .bandit] diff --git a/setup.py b/setup.py index 7c6a677b2..d61fbd6ba 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """cookiecutter distutils configuration.""" from setuptools import setup diff --git a/tests/hooks-abort-render/hooks/post_gen_project.py b/tests/hooks-abort-render/hooks/post_gen_project.py index 706cc440d..d95ca59fa 100644 --- a/tests/hooks-abort-render/hooks/post_gen_project.py +++ b/tests/hooks-abort-render/hooks/post_gen_project.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # flake8: noqa """Simple post-gen hook for testing the handling of different exit codes.""" diff --git a/tests/hooks-abort-render/hooks/pre_gen_project.py b/tests/hooks-abort-render/hooks/pre_gen_project.py index a132af807..3bd59868c 100644 --- a/tests/hooks-abort-render/hooks/pre_gen_project.py +++ b/tests/hooks-abort-render/hooks/pre_gen_project.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # flake8: noqa """Simple pre-gen hook for testing the handling of different exit codes.""" diff --git a/tests/test-pyhooks/hooks/post_gen_project.py b/tests/test-pyhooks/hooks/post_gen_project.py index c8b7c194f..98a5a353b 100644 --- a/tests/test-pyhooks/hooks/post_gen_project.py +++ b/tests/test-pyhooks/hooks/post_gen_project.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Simple post-gen hook for testing project folder and custom file creation.""" print('pre generation hook') diff --git a/tests/test-pyhooks/hooks/pre_gen_project.py b/tests/test-pyhooks/hooks/pre_gen_project.py index 4d84bd3ec..6f1887bd4 100644 --- a/tests/test-pyhooks/hooks/pre_gen_project.py +++ b/tests/test-pyhooks/hooks/pre_gen_project.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Simple pre-gen hook for testing project folder and custom file creation.""" print('pre generation hook') diff --git a/tests/test-pyshellhooks/hooks/post_gen_project.py b/tests/test-pyshellhooks/hooks/post_gen_project.py index c8b7c194f..98a5a353b 100644 --- a/tests/test-pyshellhooks/hooks/post_gen_project.py +++ b/tests/test-pyshellhooks/hooks/post_gen_project.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Simple post-gen hook for testing project folder and custom file creation.""" print('pre generation hook') diff --git a/tests/test-pyshellhooks/hooks/pre_gen_project.py b/tests/test-pyshellhooks/hooks/pre_gen_project.py index db8bfc6a7..daeb59acb 100644 --- a/tests/test-pyshellhooks/hooks/pre_gen_project.py +++ b/tests/test-pyshellhooks/hooks/pre_gen_project.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Simple pre-gen hook for testing project folder and custom file creation."""
scrapy__scrapy-4563
Extend hoverxref_roles @humitos [suggested](https://github.com/scrapy/scrapy/issues/4475#issuecomment-613350667) extending the `hoverxref_roles` setting of the corresponding Sphinx extension so that the display-on-hover behavior of the documentations works for things like signal or setting references.
[ { "content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008–{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# List of Sphinx warnings that will not be raised\nsuppress_warnings = ['epub.unknown_project_files']\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract’s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\n", "path": "docs/conf.py" } ]
[ { "content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008–{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract’s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\nhoverxref_roles = ['command', 'reqmeta', 'setting', 'signal']\n", "path": "docs/conf.py" } ]
diff --git a/docs/README.rst b/docs/README.rst index 0a343cd1966..0b7afa5486b 100644 --- a/docs/README.rst +++ b/docs/README.rst @@ -57,3 +57,12 @@ There is a way to recreate the doc automatically when you make changes, you need to install watchdog (``pip install watchdog``) and then use:: make watch + +Alternative method using tox +---------------------------- + +To compile the documentation to HTML run the following command:: + + tox -e docs + +Documentation will be generated (in HTML format) inside the ``.tox/docs/tmp/html`` dir. diff --git a/docs/conf.py b/docs/conf.py index 8ab38a090c3..3ae709a04df 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -300,3 +300,4 @@ "mod": "tooltip", "ref": "tooltip", } +hoverxref_roles = ['command', 'reqmeta', 'setting', 'signal']
magenta__magenta-1254
Pip installation fails due to librosa dependency Hi, I'm trying to install the magenta-gpu but when I did a pip install magenta-gpu: **librosa 0.6.2 has requirement joblib>=0.12, but you'll have joblib 0.11 which is incompatible.**
[ { "content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Separate file for storing the current version of Magenta.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n\n__version__ = '0.3.10'\n", "path": "magenta/version.py" } ]
[ { "content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Separate file for storing the current version of Magenta.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n\n__version__ = '0.3.11'\n", "path": "magenta/version.py" } ]
diff --git a/magenta/version.py b/magenta/version.py index f8a8ff848a..ad2e7a2c4c 100644 --- a/magenta/version.py +++ b/magenta/version.py @@ -17,4 +17,4 @@ pulling in all the dependencies in __init__.py. """ -__version__ = '0.3.10' +__version__ = '0.3.11'
typeddjango__django-stubs-1391
Bump mypy from 1.0.1 to 1.1.1 Bumps [mypy](https://github.com/python/mypy) from 1.0.1 to 1.1.1. <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/python/mypy/commit/9b777a36315b1ba24ab840f9f905cfb6c82e35a9"><code>9b777a3</code></a> bump version to 1.1.1 for wheels build</li> <li><a href="https://github.com/python/mypy/commit/6d355f57df1a664e9853891ca77af68944242d52"><code>6d355f5</code></a> [Release 1.1] Cherry-pick some mypyc build fixes (<a href="https://github-redirect.dependabot.com/python/mypy/issues/14820">#14820</a>)</li> <li><a href="https://github.com/python/mypy/commit/a27dec535e3eb1ed8dab1625e592bce5ab9a7972"><code>a27dec5</code></a> Fix <code>--strict-equality</code> crash for instances of a class generic over a `ParamS...</li> <li><a href="https://github.com/python/mypy/commit/c2016586d45767246d73bc38fd5b01e0d5c8f787"><code>c201658</code></a> Remove +dev from version number before release</li> <li><a href="https://github.com/python/mypy/commit/17fba49939d4d8408f77e539290b24dd9b7f07ae"><code>17fba49</code></a> [1.1 backport] [dataclass_transform] include <strong>dataclass_fields</strong> in transfor...</li> <li><a href="https://github.com/python/mypy/commit/f2cac4a1bf08874f3862cdb48cad7f908577c400"><code>f2cac4a</code></a> [1.1 backport] [dataclass_transform] detect transform spec changes in increme...</li> <li><a href="https://github.com/python/mypy/commit/c03e979ca06c3bf082a4cd07458a1bc3205dc5e5"><code>c03e979</code></a> Stubtest: Link directly to line (<a href="https://github-redirect.dependabot.com/python/mypy/issues/14437">#14437</a>)</li> <li><a href="https://github.com/python/mypy/commit/8a487ff248783fdc2fc0c1852a15f9fd6fbc12e8"><code>8a487ff</code></a> Sync typeshed (<a href="https://github-redirect.dependabot.com/python/mypy/issues/14733">#14733</a>)</li> <li><a href="https://github.com/python/mypy/commit/c99133f405f286ed3429c809e9ae2cb3faaa2ceb"><code>c99133f</code></a> Fix for bug with <code>in</code> operation on optionals in <code>no-strict-optional</code> mode (<a href="https://github-redirect.dependabot.com/python/mypy/issues/1">#1</a>...</li> <li><a href="https://github.com/python/mypy/commit/ef3187a64d10d1aacbf1d28171b4af00dcd1cb64"><code>ef3187a</code></a> Update commit hashes in sync typeshed script (<a href="https://github-redirect.dependabot.com/python/mypy/issues/14720">#14720</a>)</li> <li>Additional commits viewable in <a href="https://github.com/python/mypy/compare/v1.0.1...v1.1.1">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=mypy&package-manager=pip&previous-version=1.0.1&new-version=1.1.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
[ { "content": "import os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"mypy>=0.980\",\n \"django\",\n \"django-stubs-ext>=0.7.0\",\n \"tomli\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\nextras_require = {\n \"compatible-mypy\": [\"mypy>=1.0,<1.1\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"1.15.0\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n maintainer=\"Nikita Sobolev\",\n maintainer_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.7\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Django :: 4.1\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n", "path": "setup.py" } ]
[ { "content": "import os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"mypy>=0.980\",\n \"django\",\n \"django-stubs-ext>=0.7.0\",\n \"tomli\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\nextras_require = {\n \"compatible-mypy\": [\"mypy>=1.1.1,<1.2\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"1.15.0\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n maintainer=\"Nikita Sobolev\",\n maintainer_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.7\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Django :: 4.1\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n", "path": "setup.py" } ]
diff --git a/django-stubs/utils/datastructures.pyi b/django-stubs/utils/datastructures.pyi index 4c7fd669e..c518c7268 100644 --- a/django-stubs/utils/datastructures.pyi +++ b/django-stubs/utils/datastructures.pyi @@ -65,7 +65,10 @@ class MultiValueDict(dict[_K, _V]): def get(self, key: _K, default: _Z = ...) -> _V | _Z: ... def getlist(self, key: _K, default: _Z = ...) -> list[_V] | _Z: ... def setlist(self, key: _K, list_: list[_V]) -> None: ... - def setdefault(self, key: _K, default: _V = ...) -> _V: ... + @overload + def setdefault(self: MultiValueDict[_K, _V | None], key: _K, default: None = ...) -> _V | None: ... + @overload + def setdefault(self, key: _K, default: _V) -> _V: ... def setlistdefault(self, key: _K, default_list: list[_V] | None = ...) -> list[_V]: ... def appendlist(self, key: _K, value: _V) -> None: ... def items(self) -> Iterator[tuple[_K, _V | list[object]]]: ... # type: ignore diff --git a/requirements.txt b/requirements.txt index 5071ee665..3285c8e19 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,4 +9,4 @@ psycopg2-binary -e .[compatible-mypy] # Overrides: -mypy==1.0.1 +mypy==1.1.1 diff --git a/setup.py b/setup.py index 019e1ca1b..f6b8dc99d 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ def find_stub_files(name: str) -> List[str]: ] extras_require = { - "compatible-mypy": ["mypy>=1.0,<1.1"], + "compatible-mypy": ["mypy>=1.1.1,<1.2"], } setup( diff --git a/tests/typecheck/contrib/sitemaps/test_generic_sitemap.yml b/tests/typecheck/contrib/sitemaps/test_generic_sitemap.yml index 7048e6e27..da70f6870 100644 --- a/tests/typecheck/contrib/sitemaps/test_generic_sitemap.yml +++ b/tests/typecheck/contrib/sitemaps/test_generic_sitemap.yml @@ -45,10 +45,10 @@ ] out: | main:24: error: Return type "str" of "items" incompatible with return type "Iterable[Offer]" in supertype "Sitemap" + main:26: error: Return type "int" of "location" incompatible with return type "str" in supertype "Sitemap" main:26: error: Argument 1 of "location" is incompatible with supertype "Sitemap"; supertype defines the argument type as "Offer" main:26: note: This violates the Liskov substitution principle main:26: note: See https://mypy.readthedocs.io/en/stable/common_issues.html#incompatible-overrides - main:26: error: Return type "int" of "location" incompatible with return type "str" in supertype "Sitemap" main:40: error: Argument 1 to "GenericSitemap" has incompatible type "Dict[str, List[int]]"; expected "Mapping[str, Union[datetime, _QuerySet[Offer, Offer], str]]" installed_apps:
Anselmoo__spectrafit-655
[Docs]: Using builtin release drafter ### Is there an existing issue for this? - [X] I have searched the existing issues ### Current Missing Information in the Docs https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes ### Anything else? _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"0.16.4\"\n", "path": "spectrafit/__init__.py" } ]
[ { "content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a0\"\n", "path": "spectrafit/__init__.py" } ]
diff --git a/.github/labeler.yml b/.github/labeler.yml index 27adb937f..2edf7aae6 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -37,9 +37,10 @@ maintenance: - LICENSE - CHANGELOG.md - CODE_OF_CONDUCT.md - - CONTRIBUTING.md - "SECURITY.md" - .pre-commit-config.yaml - .prettierignore - .sonarcloud.properties - .sourcery.yaml +changelog: + - CONTRIBUTING.md diff --git a/.github/release.yml b/.github/release.yml index 87760aed2..1ca70fd76 100644 --- a/.github/release.yml +++ b/.github/release.yml @@ -1,30 +1,42 @@ changelog: categories: - title: "🏆 Milestone" - labels: "milestone" + labels: + - "milestone" - title: "🚀 New" - labels: "enhancement" + labels: + - "enhancement" - title: "💻 New" - labels: "codespaces" + labels: + - "codespaces" - title: "🐛 Bug Fixes" - labels: "bug" + labels: + - "bug" - title: "🧰 Maintenance" - labels: "maintenance" + labels: + - "maintenance" - title: ":octocat: Github Actions" - labels: "github-actions" + labels: + - "github-actions" - title: "🗂 Documentation" - labels: "documentation" + labels: + - "documentation" - title: "🔗 Dependency Updates" - labels: "dependencies" + labels: + - "dependencies" - title: "🔬 Testing & Coverage" - labels: "testing" + labels: + - "testing" - title: "👋 Welcome" - labels: "good first issue" + labels: + - "good first issue" - title: "🔒 Security" labels: - "security" - "dependabot" - title: "🚨 Breaking Changes" - labels: "breaking" + labels: + - "breaking" - title: "📝 Changelog" - labels: "changelog" + labels: + - "changelog" diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a15f272f..e69de29bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,363 +0,0 @@ -# CHANGELOG - ---- - -## v0.16.0 - ---- - -- Add [Cumulative Distribution Function][38] to the `SpectraFit` package -- Refactor the `model.py` of `SpectraFit` package - -## v0.15.1 - ---- - -- Maintenance of the `SpectraFit` package - -## v0.15.0 - ---- - -- Add `plugins` to the `SpectraFit` package for working with [RIXS][36] data -- `pikle`-file converter and visualizer for [RIXS][36] data -- Simplify the `SpectraFit` continous deployment by using [build][37] - -## v0.14.0 - ---- - -- Add `SpectraFit` to [Conda-Forge][2] as [spectrafit][3] package. -- Extend `SpectraFit` to print current peak values as `dataframe` - in Jupyter-Notebook. -- Add converters for _input-_, _output-_, and _data-files_. -- Add extended _output-print_ for `SpectraFit` in Jupyter-Notebook. - -## v0.13.1 - ---- - -- Fix crashed regression analysis due to _negative_ values in the `y`-data. - -## v0.13.0 - ---- - -- Update `devcontainer` to use `VScode`. -- Removed [`fish-shell`][34] from `devcontainer`. -- Applied code refactoring performed by [Copilot Labs][35] - -## v0.12.5 - ---- - -- Updating `spectrafit`-installer in `Dockerfile`. -- Adding images to `Jupyter-Notebook-Examples`. - -## v0.12.4 - ---- - -- Include metric plots into the [jupyter-notebook][25] interface. -- Removed `dash` dependency from `pyproject.toml`. -- Removed `spectrafit`-dependency from `Dockerfile`. - -## v0.12.3 - ---- - -- Update `Dockerimage` to the previous version of the [Conda-Forge-Recipe][33]. -- Reformat license in the docs. - -## v0.12.2. - ---- - -- Update `Dockerimage` to use `SpectraFit` in the Jupyter Notebook. - -## v0.12.1 - ---- - -- New release for triggering `Conda-Forge` build - -## v0.12.0 - ---- - -- Adding metrics for regression analysis as part of the post analysis; see also - [sklearn-metrics-regression][23] -- Add [art][24] for generating ASCII Decor in the terminal / output. -- Using transposed dataframes for the tabulated output to the terminal. -- Change `global` to `global_` to avoid keyword clash. -- Add plugin for [jupyter-notebook][25] integration in VSCode; see also - [jupyter-notebook-VSCode][26] -- Change `Dockerimage` to use [jupyter/scipy][27] as base image, see also - [SpectraFit-Dockerfile][31] -- Adding devcontainer for VSCode; see also [devcontainer][30] -- Change from `to_dict(orient="list")` to `to_dict(orient="split")` for the - `json` output for including the index. -- Add link to the [GitHub Advisory Database][28] for security issues in the - `Security nodes`. -- Add CI-Test for `devcontainer` in VSCode; see also [devcontainer-ci][29]. -- Add [`pyupgrade`][32] to pre-commit hooks. - -## v0.11.0 - ---- - -- Focus on maintenance fixed for the `spectrafit` package: - - [Synk][21] security vulnerabilities fixed - - [SonarCloud][22] code quality fixed - -## v0.10.4 - ---- - -- Update docs with topics: ``Changelog`, `README`, `Security`, `Licencse` -- Add docs for `conda` installation - -## v0.10.1 - v.10.3 - ---- - -- Downgrading `numdifftools` and `openpyxl` for compatibility with the - [conda-forge-formula][20] - -## v0.10.0 - ---- - -- Refactor the `pyproject.toml` file for getting it working with `conda`. - -## v0.9.0 - ---- - -- Adding Python 3.10 support -- Adding [Athena file][19] support -- Increasing code quality by using [`pylint`][18] -- Adding plugin support for `SpectraFit` - - Starting with input file converter - -## v0.8.6 - ---- - -- Updating the way of poetry caching -- Update docker actions -- Fixed typo in README.md - -## v0.8.3 - v0.8.5 - ---- - -- Dependency and GitHub Action Updates - -## v0.8.2 - ---- - -- Refactor buffer of the _covariance matrix_ - -## v0.8.1 - ---- - -- Updating all `raise` statements -- Add [prettier][17] to CI/CD workflow - -## v0.8.0 - ---- - -- Introduced smaller enhancement: - - Printout of the fit parameters in the output file: True/False &#8594; [0, 1, - 2] - - Keyword check for `SpectraFit` -- Fix smaller bugs: - - `Pseudo-Voigt` power factor from 0.25 &#8594; 0.2 - - Correct type-definitions for `SpectraFit` - -## v0.7.1 - ---- - -- Maintenance of the `SpectraFit` package - -## v0.7.0 - ---- - -- Introducing automatic peak detection for spectra fitting; see also SciPy's - [`find_peaks`][16] - -## v0.6.1 - ---- - -- Reformat the [README.md][14] for [PyPi - SpectraFit][15] - -## v0.6.0 - ---- - -- Introduce the **Global-Fitting** option, which allows to fit the several - spectra with a single model. -- Changed the input for **Pseudo-Voigt**: - - _`fwhm_g`_ &#8594; **`fwhmg`** - - _`fwhm_l`_ &#8594; **`fwhml`** -- Changed the input for **Gaussian-FWHM** and **Lorentzian-FWHM**: - - _`fwhm`_ &#8594; **`fwhmg`** - - _`fwhm`_ &#8594; **`fwhml`** -- Changed the input for **Voigt-FWHM**: - - _`fwhm`_ &#8594; **`fwhmv`** -- Adding error-handling for not determatination of _Confiden Interval_. - -## v0.5.6 - ---- - -- CI/CD pipeline is now token-protected. - -## v0.5.5 - ---- - -- Removed the `setuptools==57.5.0` limitation due to formally `Python2.7`. - -## v0.5.4 - ---- - -- Adding a [stale boot][13] for keeping the issue and PRs up-to-date. - -## v0.5.3 - ---- - -- Extending unit tests to the `SpectraFit` package. - -## v0.5.2 - ---- - -- Adding maintainer to the `pyproject.yml` file. - -## v0.5.1 - ---- - -- Minor fix of broken links in docs. - -## v0.5.0 - ---- - -- Rewrite `SpectraFit` main to become a more object-oriented approach. -- Increase the coverage quality of the tests. - -## v0.4.2 - ---- - -- Removed the [`GIT LFS`][12] integration for avoiding trouble with broken - images. -- Adding [`YAML`-Forms][11] as pull request template. - -## v0.4.1 - ---- - -- Change from `MarkDown` based issue templates to [`YAML`-Forms][11] by GitHub - as issue and feature request templates. - -## v0.4.0 - ---- - -- Create [SECURITY policy][8] for the `spectrafit` application. -- Adding [dependabot][9] for updating `poetry.lock`, `pyproject.toml` and GitHub - Action workflow. -- Adding a [codeql-analysis][10] -- Increasing the coverage level - -## v0.3.2 - ---- - -- Replaced poetry hosted `pre-commit` hook with [pre-commit action][6]. -- Extend `pre-commit` hook [MyPy][7]. -- Fixed a bug for the energy range separation. -- Removed the `--display` option. - -## v0.3.1 - ---- - -- Introducing `pytest` and `coverage` for increasing code quality. -- Adding [`codecov.io`][5] into the GitHub actions workflow. -- Updating the [contribution guideline][4] with inside milestones. - -## v0.2.4 - ---- - -- Adding a Docker Workflow via [https://ghcr.io/anselmoo/spectrafit:latest][2]. -- Poetry for PyPi release via [https://pypi.org/project/spectrafit/][3]. - -## v0.2.0 - ---- - -- Changed from text file based input to object based input. -- Extended `matplotlib` with `seaborn` for the plotting. -- Start outsourcing code into submodules. - -## v0.1.0 - ---- - -- The orginal program `fastfit` is now running as `spectrafit` with an own - installer besed on [POETRY](https://python-poetry.org). - -> See also: [https://github.com/Anselmoo/spectrafit/releases][1] - -[1]: https://github.com/Anselmoo/spectrafit/releases -[2]: https://ghcr.io/anselmoo/spectrafit:latest -[3]: https://pypi.org/project/spectrafit/ -[4]: https://github.com/Anselmoo/spectrafit/blob/main/CONTRIBUTING.md -[5]: https://codecov.io/gh/Anselmoo/spectrafit -[6]: https://github.com/marketplace/actions/pre-commit -[7]: https://mypy.readthedocs.io/en/stable/ -[8]: https://github.com/Anselmoo/spectrafit/security -[9]: https://dependabot.com -[10]: https://securitylab.github.com/tools/codeql/ -[11]: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository -[12]: https://git-lfs.github.com -[13]: https://github.com/apps/stale -[14]: https://github.com/Anselmoo/spectrafit/blob/main/README.md -[15]: https://pypi.org/project/spectrafit/ -[16]: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html -[17]: https://prettier.io -[18]: https://github.com/PyCQA/pylint -[19]: http://bruceravel.github.io/demeter/documents/Athena/index.html -[20]: https://anaconda.org/conda-forge/spectrafit -[21]: https://docs.snyk.io/products/snyk-open-source/language-and-package-manager-support/snyk-for-python -[22]: https://sonarcloud.io -[23]: https://scikit-learn.org/stable/modules/model_evaluation.html -[24]: https://www.4r7.ir -[25]: https://jupyter.org -[26]: https://code.visualstudio.com/docs/datascience/jupyter-notebooks -[27]: https://github.com/jupyter/docker-stacks/blob/main/scipy-notebook/Dockerfile -[28]: https://github.com/advisories?query=type%3Areviewed+ecosystem%3Apip -[29]: https://github.com/marketplace/actions/devcontainers-ci -[30]: https://github.com/Anselmoo/spectrafit/pkgs/container/spectrafit-devcontainer -[31]: https://github.com/Anselmoo/spectrafit/pkgs/container/spectrafit -[32]: https://github.com/Anselmoo/spectrafit/blob/6ca69132a199d3bf458927cf3d4ce6f8fdef0eae/.pre-commit-config.yaml -[33]: https://github.com/conda-forge/spectrafit-feedstock -[34]: https://fishshell.com -[35]: https://githubnext.com/projects/copilot-labs/ -[36]: https://en.wikipedia.org/wiki/Resonant_inelastic_X-ray_scattering -[37]: https://github.com/pypa/build -[38]: https://en.wikipedia.org/wiki/Cumulative_distribution_function diff --git a/docs/changelog.md b/docs/changelog.md index 786b75d5a..4a0b9ee05 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1 +1,365 @@ --8<-- "CHANGELOG.md" + +## Old Changes + +--- + +## v0.16.0 + +--- + +- Add [Cumulative Distribution Function][38] to the `SpectraFit` package +- Refactor the `model.py` of `SpectraFit` package + +## v0.15.1 + +--- + +- Maintenance of the `SpectraFit` package + +## v0.15.0 + +--- + +- Add `plugins` to the `SpectraFit` package for working with [RIXS][36] data +- `pikle`-file converter and visualizer for [RIXS][36] data +- Simplify the `SpectraFit` continous deployment by using [build][37] + +## v0.14.0 + +--- + +- Add `SpectraFit` to [Conda-Forge][2] as [spectrafit][3] package. +- Extend `SpectraFit` to print current peak values as `dataframe` + in Jupyter-Notebook. +- Add converters for _input-_, _output-_, and _data-files_. +- Add extended _output-print_ for `SpectraFit` in Jupyter-Notebook. + +## v0.13.1 + +--- + +- Fix crashed regression analysis due to _negative_ values in the `y`-data. + +## v0.13.0 + +--- + +- Update `devcontainer` to use `VScode`. +- Removed [`fish-shell`][34] from `devcontainer`. +- Applied code refactoring performed by [Copilot Labs][35] + +## v0.12.5 + +--- + +- Updating `spectrafit`-installer in `Dockerfile`. +- Adding images to `Jupyter-Notebook-Examples`. + +## v0.12.4 + +--- + +- Include metric plots into the [jupyter-notebook][25] interface. +- Removed `dash` dependency from `pyproject.toml`. +- Removed `spectrafit`-dependency from `Dockerfile`. + +## v0.12.3 + +--- + +- Update `Dockerimage` to the previous version of the [Conda-Forge-Recipe][33]. +- Reformat license in the docs. + +## v0.12.2. + +--- + +- Update `Dockerimage` to use `SpectraFit` in the Jupyter Notebook. + +## v0.12.1 + +--- + +- New release for triggering `Conda-Forge` build + +## v0.12.0 + +--- + +- Adding metrics for regression analysis as part of the post analysis; see also + [sklearn-metrics-regression][23] +- Add [art][24] for generating ASCII Decor in the terminal / output. +- Using transposed dataframes for the tabulated output to the terminal. +- Change `global` to `global_` to avoid keyword clash. +- Add plugin for [jupyter-notebook][25] integration in VSCode; see also + [jupyter-notebook-VSCode][26] +- Change `Dockerimage` to use [jupyter/scipy][27] as base image, see also + [SpectraFit-Dockerfile][31] +- Adding devcontainer for VSCode; see also [devcontainer][30] +- Change from `to_dict(orient="list")` to `to_dict(orient="split")` for the + `json` output for including the index. +- Add link to the [GitHub Advisory Database][28] for security issues in the + `Security nodes`. +- Add CI-Test for `devcontainer` in VSCode; see also [devcontainer-ci][29]. +- Add [`pyupgrade`][32] to pre-commit hooks. + +## v0.11.0 + +--- + +- Focus on maintenance fixed for the `spectrafit` package: + - [Synk][21] security vulnerabilities fixed + - [SonarCloud][22] code quality fixed + +## v0.10.4 + +--- + +- Update docs with topics: ``Changelog`, `README`, `Security`, `Licencse` +- Add docs for `conda` installation + +## v0.10.1 - v.10.3 + +--- + +- Downgrading `numdifftools` and `openpyxl` for compatibility with the + [conda-forge-formula][20] + +## v0.10.0 + +--- + +- Refactor the `pyproject.toml` file for getting it working with `conda`. + +## v0.9.0 + +--- + +- Adding Python 3.10 support +- Adding [Athena file][19] support +- Increasing code quality by using [`pylint`][18] +- Adding plugin support for `SpectraFit` + - Starting with input file converter + +## v0.8.6 + +--- + +- Updating the way of poetry caching +- Update docker actions +- Fixed typo in README.md + +## v0.8.3 - v0.8.5 + +--- + +- Dependency and GitHub Action Updates + +## v0.8.2 + +--- + +- Refactor buffer of the _covariance matrix_ + +## v0.8.1 + +--- + +- Updating all `raise` statements +- Add [prettier][17] to CI/CD workflow + +## v0.8.0 + +--- + +- Introduced smaller enhancement: + - Printout of the fit parameters in the output file: True/False &#8594; [0, 1, + 2] + - Keyword check for `SpectraFit` +- Fix smaller bugs: + - `Pseudo-Voigt` power factor from 0.25 &#8594; 0.2 + - Correct type-definitions for `SpectraFit` + +## v0.7.1 + +--- + +- Maintenance of the `SpectraFit` package + +## v0.7.0 + +--- + +- Introducing automatic peak detection for spectra fitting; see also SciPy's + [`find_peaks`][16] + +## v0.6.1 + +--- + +- Reformat the [README.md][14] for [PyPi - SpectraFit][15] + +## v0.6.0 + +--- + +- Introduce the **Global-Fitting** option, which allows to fit the several + spectra with a single model. +- Changed the input for **Pseudo-Voigt**: + - _`fwhm_g`_ &#8594; **`fwhmg`** + - _`fwhm_l`_ &#8594; **`fwhml`** +- Changed the input for **Gaussian-FWHM** and **Lorentzian-FWHM**: + - _`fwhm`_ &#8594; **`fwhmg`** + - _`fwhm`_ &#8594; **`fwhml`** +- Changed the input for **Voigt-FWHM**: + - _`fwhm`_ &#8594; **`fwhmv`** +- Adding error-handling for not determatination of _Confiden Interval_. + +## v0.5.6 + +--- + +- CI/CD pipeline is now token-protected. + +## v0.5.5 + +--- + +- Removed the `setuptools==57.5.0` limitation due to formally `Python2.7`. + +## v0.5.4 + +--- + +- Adding a [stale boot][13] for keeping the issue and PRs up-to-date. + +## v0.5.3 + +--- + +- Extending unit tests to the `SpectraFit` package. + +## v0.5.2 + +--- + +- Adding maintainer to the `pyproject.yml` file. + +## v0.5.1 + +--- + +- Minor fix of broken links in docs. + +## v0.5.0 + +--- + +- Rewrite `SpectraFit` main to become a more object-oriented approach. +- Increase the coverage quality of the tests. + +## v0.4.2 + +--- + +- Removed the [`GIT LFS`][12] integration for avoiding trouble with broken + images. +- Adding [`YAML`-Forms][11] as pull request template. + +## v0.4.1 + +--- + +- Change from `MarkDown` based issue templates to [`YAML`-Forms][11] by GitHub + as issue and feature request templates. + +## v0.4.0 + +--- + +- Create [SECURITY policy][8] for the `spectrafit` application. +- Adding [dependabot][9] for updating `poetry.lock`, `pyproject.toml` and GitHub + Action workflow. +- Adding a [codeql-analysis][10] +- Increasing the coverage level + +## v0.3.2 + +--- + +- Replaced poetry hosted `pre-commit` hook with [pre-commit action][6]. +- Extend `pre-commit` hook [MyPy][7]. +- Fixed a bug for the energy range separation. +- Removed the `--display` option. + +## v0.3.1 + +--- + +- Introducing `pytest` and `coverage` for increasing code quality. +- Adding [`codecov.io`][5] into the GitHub actions workflow. +- Updating the [contribution guideline][4] with inside milestones. + +## v0.2.4 + +--- + +- Adding a Docker Workflow via [https://ghcr.io/anselmoo/spectrafit:latest][2]. +- Poetry for PyPi release via [https://pypi.org/project/spectrafit/][3]. + +## v0.2.0 + +--- + +- Changed from text file based input to object based input. +- Extended `matplotlib` with `seaborn` for the plotting. +- Start outsourcing code into submodules. + +## v0.1.0 + +--- + +- The orginal program `fastfit` is now running as `spectrafit` with an own + installer besed on [POETRY](https://python-poetry.org). + +> See also: [https://github.com/Anselmoo/spectrafit/releases][1] + +[1]: https://github.com/Anselmoo/spectrafit/releases +[2]: https://ghcr.io/anselmoo/spectrafit:latest +[3]: https://pypi.org/project/spectrafit/ +[4]: https://github.com/Anselmoo/spectrafit/blob/main/CONTRIBUTING.md +[5]: https://codecov.io/gh/Anselmoo/spectrafit +[6]: https://github.com/marketplace/actions/pre-commit +[7]: https://mypy.readthedocs.io/en/stable/ +[8]: https://github.com/Anselmoo/spectrafit/security +[9]: https://dependabot.com +[10]: https://securitylab.github.com/tools/codeql/ +[11]: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository +[12]: https://git-lfs.github.com +[13]: https://github.com/apps/stale +[14]: https://github.com/Anselmoo/spectrafit/blob/main/README.md +[15]: https://pypi.org/project/spectrafit/ +[16]: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html +[17]: https://prettier.io +[18]: https://github.com/PyCQA/pylint +[19]: http://bruceravel.github.io/demeter/documents/Athena/index.html +[20]: https://anaconda.org/conda-forge/spectrafit +[21]: https://docs.snyk.io/products/snyk-open-source/language-and-package-manager-support/snyk-for-python +[22]: https://sonarcloud.io +[23]: https://scikit-learn.org/stable/modules/model_evaluation.html +[24]: https://www.4r7.ir +[25]: https://jupyter.org +[26]: https://code.visualstudio.com/docs/datascience/jupyter-notebooks +[27]: https://github.com/jupyter/docker-stacks/blob/main/scipy-notebook/Dockerfile +[28]: https://github.com/advisories?query=type%3Areviewed+ecosystem%3Apip +[29]: https://github.com/marketplace/actions/devcontainers-ci +[30]: https://github.com/Anselmoo/spectrafit/pkgs/container/spectrafit-devcontainer +[31]: https://github.com/Anselmoo/spectrafit/pkgs/container/spectrafit +[32]: https://github.com/Anselmoo/spectrafit/blob/6ca69132a199d3bf458927cf3d4ce6f8fdef0eae/.pre-commit-config.yaml +[33]: https://github.com/conda-forge/spectrafit-feedstock +[34]: https://fishshell.com +[35]: https://githubnext.com/projects/copilot-labs/ +[36]: https://en.wikipedia.org/wiki/Resonant_inelastic_X-ray_scattering +[37]: https://github.com/pypa/build +[38]: https://en.wikipedia.org/wiki/Cumulative_distribution_function diff --git a/pyproject.toml b/pyproject.toml index d1fd9abaa..d7d3560e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "SpectraFit" -version = "0.16.4" +version = "1.0.0a0" description = "Fast fitting of 2D- and 3D-Spectra with established routines" readme = "README.md" authors = ["Anselm Hahn <[email protected]>"] @@ -18,7 +18,7 @@ keywords = [ "spectrum", ] classifiers = [ - "Development Status :: 4 - Beta", + "Development Status :: 5 - Production/Stable", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Environment :: Console", diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py index 4db653754..d5e3f80bb 100644 --- a/spectrafit/__init__.py +++ b/spectrafit/__init__.py @@ -1,2 +1,2 @@ """SpectraFit, fast command line tool for fitting data.""" -__version__ = "0.16.4" +__version__ = "1.0.0a0"
ManimCommunity__manim-1597
Dont import `hilite_me` and `insert_line_numbers_in_html` with `from manim import *` https://github.com/ManimCommunity/manim/blob/199424d713c77fca6f59de58ba0f8eb3955bfab1/manim/mobject/svg/code_mobject.py#L6 The functions `hilite_me` and `insert_line_numbers_in_html` are currently imported with `from manim import *` , and I think that is not necessary. Further the question: Why are they defined outside the Code class?
[ { "content": "\"\"\"Mobject representing highlighted source code listings.\"\"\"\n\n__all__ = [\n \"Code\",\n \"hilite_me\",\n \"insert_line_numbers_in_html\",\n]\n\nimport html\nimport os\nimport re\n\nimport numpy as np\nfrom pygments import highlight\nfrom pygments.formatters.html import HtmlFormatter\nfrom pygments.lexers import get_lexer_by_name, guess_lexer_for_filename\nfrom pygments.styles import get_all_styles\n\nfrom ...constants import *\nfrom ...mobject.geometry import Dot, RoundedRectangle\nfrom ...mobject.shape_matchers import SurroundingRectangle\nfrom ...mobject.svg.text_mobject import Paragraph\nfrom ...mobject.types.vectorized_mobject import VGroup\nfrom ...utils.color import WHITE\n\n\nclass Code(VGroup):\n \"\"\"A highlighted source code listing.\n\n An object ``listing`` of :class:`.Code` is a :class:`.VGroup` consisting\n of three objects:\n\n - The background, ``listing.background_mobject``. This is either\n a :class:`.Rectangle` (if the listing has been initialized with\n ``background=\"rectangle\"``, the default option) or a :class:`.VGroup`\n resembling a window (if ``background=\"window\"`` has been passed).\n\n - The line numbers, ``listing.line_numbers`` (a :class:`.Paragraph`\n object).\n\n - The highlighted code itself, ``listing.code`` (a :class:`.Paragraph`\n object).\n\n .. WARNING::\n\n Using a :class:`.Transform` on text with leading whitespace (and in\n this particular case: code) can look\n `weird <https://github.com/3b1b/manim/issues/1067>`_. Consider using\n :meth:`remove_invisible_chars` to resolve this issue.\n\n Parameters\n ----------\n file_name : :class:`str`\n Name of the code file to display.\n code : :class:`str`\n If ``file_name`` is not specified, a code string can be\n passed directly.\n tab_width : :class:`int`, optional\n Number of space characters corresponding to a tab character. Defaults to 3.\n line_spacing : :class:`float`, optional\n Amount of space between lines in relation to font size. Defaults to 0.3, which means 30% of font size.\n scale_factor : class:`float`, optional\n A number which scales displayed code. Defaults to 0.5.\n font : :class:`str`, optional\n The name of the text font to be used. Defaults to ``\"Monospac821 BT\"``.\n stroke_width : class:`float`, optional\n Stroke width for text. 0 is recommended, and the default.\n margin: class :`float`, optional\n Inner margin of text from the background. Defaults to 0.3.\n indentation_chars : :class:`str`, optional\n \"Indentation chars\" refers to the spaces/tabs at the beginning of a given code line. Defaults to ``\" \"`` (spaces).\n background : :class:`str`, optional\n Defines the background's type. Currently supports only ``\"rectangle\"`` (default) and ``\"window\"``.\n background_stroke_width : class:`float`, optional\n Defines the stroke width of the background. Defaults to 1.\n background_stroke_color : class:`str`, optional\n Defines the stroke color for the background. Defaults to ``WHITE``.\n corner_radius : :class:`float`, optional\n Defines the corner radius for the background. Defaults to 0.2.\n insert_line_no : :class:`bool`, optional\n Defines whether line numbers should be inserted in displayed code. Defaults to ``True``.\n line_no_from : :class:`int`, optional\n Defines the first line's number in the line count. Defaults to 1.\n line_no_buff : :class:`float`, optional\n Defines the spacing between line numbers and displayed code. Defaults to 0.4.\n style : :class:`str`, optional\n Defines the style type of displayed code. You can see possible names of styles in with :attr:`styles_list`. Defaults to ``\"vim\"``.\n language : Optional[:class:`str`], optional\n Specifies the programming language the given code was written in. If ``None``\n (the default), the language will be automatically detected. For the list of\n possible options, visit https://pygments.org/docs/lexers/ and look for\n 'aliases or short names'.\n generate_html_file : :class:`bool`, optional\n Defines whether to generate highlighted html code to the folder `assets/codes/generated_html_files`. Defaults to `False`.\n\n Attributes\n ----------\n background_mobject : :class:`~.VGroup`\n The background of the code listing.\n line_numbers : :class:`~.Paragraph`\n The line numbers for the code listing. Empty, if\n ``insert_line_no=False`` has been specified.\n code : :class:`~.Paragraph`\n The highlighted code.\n\n Examples\n --------\n Normal usage::\n\n listing = Code(\n \"helloworldcpp.cpp\",\n tab_width=4,\n background_stroke_width=1,\n background_stroke_color=WHITE,\n insert_line_no=True,\n style=Code.styles_list[15],\n background=\"window\",\n language=\"cpp\",\n )\n\n We can also render code passed as a string (but note that\n the language has to be specified in this case):\n\n .. manim:: CodeFromString\n :save_last_frame:\n\n class CodeFromString(Scene):\n def construct(self):\n code = '''from manim import Scene, Square\n\n class FadeInSquare(Scene):\n def construct(self):\n s = Square()\n self.play(FadeIn(s))\n self.play(s.animate.scale(2))\n self.wait()\n '''\n rendered_code = Code(code=code, tab_width=4, background=\"window\",\n language=\"Python\", font=\"Monospace\")\n self.add(rendered_code)\n\n \"\"\"\n\n # tuples in the form (name, aliases, filetypes, mimetypes)\n # 'language' is aliases or short names\n # For more information about pygments.lexers visit https://pygments.org/docs/lexers/\n # from pygments.lexers import get_all_lexers\n # all_lexers = get_all_lexers()\n styles_list = list(get_all_styles())\n # For more information about pygments.styles visit https://pygments.org/docs/styles/\n\n def __init__(\n self,\n file_name=None,\n code=None,\n tab_width=3,\n line_spacing=0.3,\n scale_factor=0.5,\n font=\"Monospac821 BT\",\n stroke_width=0,\n margin=0.3,\n indentation_chars=\" \",\n background=\"rectangle\", # or window\n background_stroke_width=1,\n background_stroke_color=WHITE,\n corner_radius=0.2,\n insert_line_no=True,\n line_no_from=1,\n line_no_buff=0.4,\n style=\"vim\",\n language=None,\n generate_html_file=False,\n **kwargs,\n ):\n VGroup.__init__(\n self,\n stroke_width=stroke_width,\n background_stroke_color=background_stroke_color,\n background_stroke_width=background_stroke_width,\n **kwargs,\n )\n self.tab_width = tab_width\n self.line_spacing = line_spacing\n self.scale_factor = scale_factor\n self.font = font\n self.margin = margin\n self.indentation_chars = indentation_chars\n self.background = background\n self.corner_radius = corner_radius\n self.insert_line_no = insert_line_no\n self.line_no_from = line_no_from\n self.line_no_buff = line_no_buff\n self.style = style\n self.language = language\n self.generate_html_file = generate_html_file\n\n self.file_path = None\n self.file_name = file_name\n if self.file_name:\n self.ensure_valid_file()\n with open(self.file_path, \"r\") as f:\n self.code_string = f.read()\n elif code:\n self.code_string = code\n else:\n raise ValueError(\n \"Neither a code file nor a code string have been specified.\"\n )\n if isinstance(self.style, str):\n self.style = self.style.lower()\n self.gen_html_string()\n strati = self.html_string.find(\"background:\")\n self.background_color = self.html_string[strati + 12 : strati + 19]\n self.gen_code_json()\n\n self.code = self.gen_colored_lines()\n if self.insert_line_no:\n self.line_numbers = self.gen_line_numbers()\n self.line_numbers.next_to(self.code, direction=LEFT, buff=self.line_no_buff)\n if self.background == \"rectangle\":\n if self.insert_line_no:\n foreground = VGroup(self.code, self.line_numbers)\n else:\n foreground = self.code\n rect = SurroundingRectangle(\n foreground,\n buff=self.margin,\n color=self.background_color,\n fill_color=self.background_color,\n stroke_width=self.background_stroke_width,\n stroke_color=self.background_stroke_color,\n fill_opacity=1,\n )\n rect.round_corners(self.corner_radius)\n self.background_mobject = VGroup(rect)\n else:\n if self.insert_line_no:\n foreground = VGroup(self.code, self.line_numbers)\n else:\n foreground = self.code\n height = foreground.height + 0.1 * 3 + 2 * self.margin\n width = foreground.width + 0.1 * 3 + 2 * self.margin\n\n rect = RoundedRectangle(\n corner_radius=self.corner_radius,\n height=height,\n width=width,\n stroke_width=self.background_stroke_width,\n stroke_color=self.background_stroke_color,\n color=self.background_color,\n fill_opacity=1,\n )\n red_button = Dot(radius=0.1, stroke_width=0, color=\"#ff5f56\")\n red_button.shift(LEFT * 0.1 * 3)\n yellow_button = Dot(radius=0.1, stroke_width=0, color=\"#ffbd2e\")\n green_button = Dot(radius=0.1, stroke_width=0, color=\"#27c93f\")\n green_button.shift(RIGHT * 0.1 * 3)\n buttons = VGroup(red_button, yellow_button, green_button)\n buttons.shift(\n UP * (height / 2 - 0.1 * 2 - 0.05)\n + LEFT * (width / 2 - 0.1 * 5 - self.corner_radius / 2 - 0.05)\n )\n\n self.background_mobject = VGroup(rect, buttons)\n x = (height - foreground.height) / 2 - 0.1 * 3\n self.background_mobject.shift(foreground.get_center())\n self.background_mobject.shift(UP * x)\n if self.insert_line_no:\n VGroup.__init__(\n self, self.background_mobject, self.line_numbers, self.code, **kwargs\n )\n else:\n VGroup.__init__(\n self,\n self.background_mobject,\n Dot(fill_opacity=0, stroke_opacity=0),\n self.code,\n **kwargs,\n )\n self.move_to(np.array([0, 0, 0]))\n\n def ensure_valid_file(self):\n \"\"\"Function to validate file.\"\"\"\n if self.file_name is None:\n raise Exception(\"Must specify file for Code\")\n possible_paths = [\n os.path.join(os.path.join(\"assets\", \"codes\"), self.file_name),\n self.file_name,\n ]\n for path in possible_paths:\n if os.path.exists(path):\n self.file_path = path\n return\n error = (\n f\"From: {os.getcwd()}, could not find {self.file_name} at either \"\n + f\"of these locations: {possible_paths}\"\n )\n raise IOError(error)\n\n def gen_line_numbers(self):\n \"\"\"Function to generate line_numbers.\n\n Returns\n -------\n :class:`~.Paragraph`\n The generated line_numbers according to parameters.\n \"\"\"\n line_numbers_array = []\n for line_no in range(0, self.code_json.__len__()):\n number = str(self.line_no_from + line_no)\n line_numbers_array.append(number)\n line_numbers = Paragraph(\n *list(line_numbers_array),\n line_spacing=self.line_spacing,\n alignment=\"right\",\n font=self.font,\n disable_ligatures=True,\n stroke_width=self.stroke_width,\n ).scale(self.scale_factor)\n for i in line_numbers:\n i.set_color(self.default_color)\n return line_numbers\n\n def gen_colored_lines(self):\n \"\"\"Function to generate code.\n\n Returns\n -------\n :class:`~.Paragraph`\n The generated code according to parameters.\n \"\"\"\n lines_text = []\n for line_no in range(0, self.code_json.__len__()):\n line_str = \"\"\n for word_index in range(self.code_json[line_no].__len__()):\n line_str = line_str + self.code_json[line_no][word_index][0]\n lines_text.append(self.tab_spaces[line_no] * \"\\t\" + line_str)\n code = Paragraph(\n *list(lines_text),\n line_spacing=self.line_spacing,\n tab_width=self.tab_width,\n font=self.font,\n disable_ligatures=True,\n stroke_width=self.stroke_width,\n ).scale(self.scale_factor)\n for line_no in range(code.__len__()):\n line = code.chars[line_no]\n line_char_index = self.tab_spaces[line_no]\n for word_index in range(self.code_json[line_no].__len__()):\n line[\n line_char_index : line_char_index\n + self.code_json[line_no][word_index][0].__len__()\n ].set_color(self.code_json[line_no][word_index][1])\n line_char_index += self.code_json[line_no][word_index][0].__len__()\n return code\n\n def gen_html_string(self):\n \"\"\"Function to generate html string with code highlighted and stores in variable html_string.\"\"\"\n self.html_string = hilite_me(\n self.code_string,\n self.language,\n self.style,\n self.insert_line_no,\n \"border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;\",\n self.file_path,\n self.line_no_from,\n )\n\n if self.generate_html_file:\n os.makedirs(\n os.path.join(\"assets\", \"codes\", \"generated_html_files\"), exist_ok=True\n )\n file = open(\n os.path.join(\n \"assets\", \"codes\", \"generated_html_files\", self.file_name + \".html\"\n ),\n \"w\",\n )\n file.write(self.html_string)\n file.close()\n\n def gen_code_json(self):\n \"\"\"Function to background_color, generate code_json and tab_spaces from html_string.\n background_color is just background color of displayed code.\n code_json is 2d array with rows as line numbers\n and columns as a array with length 2 having text and text's color value.\n tab_spaces is 2d array with rows as line numbers\n and columns as corresponding number of indentation_chars in front of that line in code.\n \"\"\"\n if (\n self.background_color == \"#111111\"\n or self.background_color == \"#272822\"\n or self.background_color == \"#202020\"\n or self.background_color == \"#000000\"\n ):\n self.default_color = \"#ffffff\"\n else:\n self.default_color = \"#000000\"\n # print(self.default_color,self.background_color)\n for i in range(3, -1, -1):\n self.html_string = self.html_string.replace(\"</\" + \" \" * i, \"</\")\n for i in range(10, -1, -1):\n self.html_string = self.html_string.replace(\n \"</span>\" + \" \" * i, \" \" * i + \"</span>\"\n )\n self.html_string = self.html_string.replace(\"background-color:\", \"background:\")\n\n if self.insert_line_no:\n start_point = self.html_string.find(\"</td><td><pre\")\n start_point = start_point + 9\n else:\n start_point = self.html_string.find(\"<pre\")\n self.html_string = self.html_string[start_point:]\n # print(self.html_string)\n lines = self.html_string.split(\"\\n\")\n lines = lines[0 : lines.__len__() - 2]\n start_point = lines[0].find(\">\")\n lines[0] = lines[0][start_point + 1 :]\n # print(lines)\n self.code_json = []\n self.tab_spaces = []\n code_json_line_index = -1\n for line_index in range(0, lines.__len__()):\n # print(lines[line_index])\n self.code_json.append([])\n code_json_line_index = code_json_line_index + 1\n if lines[line_index].startswith(self.indentation_chars):\n start_point = lines[line_index].find(\"<\")\n starting_string = lines[line_index][:start_point]\n indentation_chars_count = lines[line_index][:start_point].count(\n self.indentation_chars\n )\n if (\n starting_string.__len__()\n != indentation_chars_count * self.indentation_chars.__len__()\n ):\n lines[line_index] = (\n \"\\t\" * indentation_chars_count\n + starting_string[\n starting_string.rfind(self.indentation_chars)\n + self.indentation_chars.__len__() :\n ]\n + lines[line_index][start_point:]\n )\n else:\n lines[line_index] = (\n \"\\t\" * indentation_chars_count + lines[line_index][start_point:]\n )\n indentation_chars_count = 0\n if lines[line_index]:\n while lines[line_index][indentation_chars_count] == \"\\t\":\n indentation_chars_count = indentation_chars_count + 1\n self.tab_spaces.append(indentation_chars_count)\n # print(lines[line_index])\n lines[line_index] = self.correct_non_span(lines[line_index])\n # print(lines[line_index])\n words = lines[line_index].split(\"<span\")\n for word_index in range(1, words.__len__()):\n color_index = words[word_index].find(\"color:\")\n if color_index == -1:\n color = self.default_color\n else:\n starti = words[word_index][color_index:].find(\"#\")\n color = words[word_index][\n color_index + starti : color_index + starti + 7\n ]\n start_point = words[word_index].find(\">\")\n end_point = words[word_index].find(\"</span>\")\n text = words[word_index][start_point + 1 : end_point]\n text = html.unescape(text)\n if text != \"\":\n # print(text, \"'\" + color + \"'\")\n self.code_json[code_json_line_index].append([text, color])\n # print(self.code_json)\n\n def correct_non_span(self, line_str):\n \"\"\"Function put text color to those strings that don't have one according to background_color of displayed code.\n\n Parameters\n ---------\n line_str : :class:`str`\n Takes a html element's string to put color to it according to background_color of displayed code.\n\n Returns\n -------\n :class:`str`\n The generated html element's string with having color attributes.\n \"\"\"\n words = line_str.split(\"</span>\")\n line_str = \"\"\n for i in range(0, words.__len__()):\n if i != words.__len__() - 1:\n j = words[i].find(\"<span\")\n else:\n j = words[i].__len__()\n temp = \"\"\n starti = -1\n for k in range(0, j):\n if words[i][k] == \"\\t\" and starti == -1:\n continue\n else:\n if starti == -1:\n starti = k\n temp = temp + words[i][k]\n if temp != \"\":\n if i != words.__len__() - 1:\n temp = (\n '<span style=\"color:'\n + self.default_color\n + '\">'\n + words[i][starti:j]\n + \"</span>\"\n )\n else:\n temp = (\n '<span style=\"color:'\n + self.default_color\n + '\">'\n + words[i][starti:j]\n )\n temp = temp + words[i][j:]\n words[i] = temp\n if words[i] != \"\":\n line_str = line_str + words[i] + \"</span>\"\n return line_str\n\n\ndef hilite_me(\n code, language, style, insert_line_no, divstyles, file_path, line_no_from\n):\n \"\"\"Function to highlight code from string to html.\n\n Parameters\n ---------\n code : :class:`str`\n Code string.\n language : :class:`str`\n The name of the programming language the given code was written in.\n style : :class:`str`\n Code style name.\n insert_line_no : :class:`bool`\n Defines whether line numbers should be inserted in the html file.\n divstyles : :class:`str`\n Some html css styles.\n file_path : :class:`str`\n Path of code file.\n line_no_from : :class:`int`\n Defines the first line's number in the line count.\n \"\"\"\n style = style or \"colorful\"\n defstyles = \"overflow:auto;width:auto;\"\n\n formatter = HtmlFormatter(\n style=style,\n linenos=False,\n noclasses=True,\n cssclass=\"\",\n cssstyles=defstyles + divstyles,\n prestyles=\"margin: 0\",\n )\n if language is None and file_path:\n lexer = guess_lexer_for_filename(file_path, code)\n html = highlight(code, lexer, formatter)\n elif language is None:\n raise ValueError(\n \"The code language has to be specified when rendering a code string\"\n )\n else:\n html = highlight(code, get_lexer_by_name(language, **{}), formatter)\n if insert_line_no:\n html = insert_line_numbers_in_html(html, line_no_from)\n html = \"<!-- HTML generated by Code() -->\" + html\n return html\n\n\ndef insert_line_numbers_in_html(html, line_no_from):\n \"\"\"Function that inserts line numbers in the highlighted HTML code.\n\n Parameters\n ---------\n html : :class:`str`\n html string of highlighted code.\n line_no_from : :class:`int`\n Defines the first line's number in the line count.\n\n Returns\n -------\n :class:`str`\n The generated html string with having line numbers.\n \"\"\"\n match = re.search(\"(<pre[^>]*>)(.*)(</pre>)\", html, re.DOTALL)\n if not match:\n return html\n pre_open = match.group(1)\n pre = match.group(2)\n pre_close = match.group(3)\n\n html = html.replace(pre_close, \"</pre></td></tr></table>\")\n numbers = range(line_no_from, line_no_from + pre.count(\"\\n\") + 1)\n format_lines = \"%\" + str(len(str(numbers[-1]))) + \"i\"\n lines = \"\\n\".join(format_lines % i for i in numbers)\n html = html.replace(\n pre_open, \"<table><tr><td>\" + pre_open + lines + \"</pre></td><td>\" + pre_open\n )\n return html\n", "path": "manim/mobject/svg/code_mobject.py" } ]
[ { "content": "\"\"\"Mobject representing highlighted source code listings.\"\"\"\n\n__all__ = [\n \"Code\",\n]\n\nimport html\nimport os\nimport re\n\nimport numpy as np\nfrom pygments import highlight\nfrom pygments.formatters.html import HtmlFormatter\nfrom pygments.lexers import get_lexer_by_name, guess_lexer_for_filename\nfrom pygments.styles import get_all_styles\n\nfrom ...constants import *\nfrom ...mobject.geometry import Dot, RoundedRectangle\nfrom ...mobject.shape_matchers import SurroundingRectangle\nfrom ...mobject.svg.text_mobject import Paragraph\nfrom ...mobject.types.vectorized_mobject import VGroup\nfrom ...utils.color import WHITE\n\n\nclass Code(VGroup):\n \"\"\"A highlighted source code listing.\n\n An object ``listing`` of :class:`.Code` is a :class:`.VGroup` consisting\n of three objects:\n\n - The background, ``listing.background_mobject``. This is either\n a :class:`.Rectangle` (if the listing has been initialized with\n ``background=\"rectangle\"``, the default option) or a :class:`.VGroup`\n resembling a window (if ``background=\"window\"`` has been passed).\n\n - The line numbers, ``listing.line_numbers`` (a :class:`.Paragraph`\n object).\n\n - The highlighted code itself, ``listing.code`` (a :class:`.Paragraph`\n object).\n\n .. WARNING::\n\n Using a :class:`.Transform` on text with leading whitespace (and in\n this particular case: code) can look\n `weird <https://github.com/3b1b/manim/issues/1067>`_. Consider using\n :meth:`remove_invisible_chars` to resolve this issue.\n\n Parameters\n ----------\n file_name : :class:`str`\n Name of the code file to display.\n code : :class:`str`\n If ``file_name`` is not specified, a code string can be\n passed directly.\n tab_width : :class:`int`, optional\n Number of space characters corresponding to a tab character. Defaults to 3.\n line_spacing : :class:`float`, optional\n Amount of space between lines in relation to font size. Defaults to 0.3, which means 30% of font size.\n scale_factor : class:`float`, optional\n A number which scales displayed code. Defaults to 0.5.\n font : :class:`str`, optional\n The name of the text font to be used. Defaults to ``\"Monospac821 BT\"``.\n stroke_width : class:`float`, optional\n Stroke width for text. 0 is recommended, and the default.\n margin: class :`float`, optional\n Inner margin of text from the background. Defaults to 0.3.\n indentation_chars : :class:`str`, optional\n \"Indentation chars\" refers to the spaces/tabs at the beginning of a given code line. Defaults to ``\" \"`` (spaces).\n background : :class:`str`, optional\n Defines the background's type. Currently supports only ``\"rectangle\"`` (default) and ``\"window\"``.\n background_stroke_width : class:`float`, optional\n Defines the stroke width of the background. Defaults to 1.\n background_stroke_color : class:`str`, optional\n Defines the stroke color for the background. Defaults to ``WHITE``.\n corner_radius : :class:`float`, optional\n Defines the corner radius for the background. Defaults to 0.2.\n insert_line_no : :class:`bool`, optional\n Defines whether line numbers should be inserted in displayed code. Defaults to ``True``.\n line_no_from : :class:`int`, optional\n Defines the first line's number in the line count. Defaults to 1.\n line_no_buff : :class:`float`, optional\n Defines the spacing between line numbers and displayed code. Defaults to 0.4.\n style : :class:`str`, optional\n Defines the style type of displayed code. You can see possible names of styles in with :attr:`styles_list`. Defaults to ``\"vim\"``.\n language : Optional[:class:`str`], optional\n Specifies the programming language the given code was written in. If ``None``\n (the default), the language will be automatically detected. For the list of\n possible options, visit https://pygments.org/docs/lexers/ and look for\n 'aliases or short names'.\n generate_html_file : :class:`bool`, optional\n Defines whether to generate highlighted html code to the folder `assets/codes/generated_html_files`. Defaults to `False`.\n\n Attributes\n ----------\n background_mobject : :class:`~.VGroup`\n The background of the code listing.\n line_numbers : :class:`~.Paragraph`\n The line numbers for the code listing. Empty, if\n ``insert_line_no=False`` has been specified.\n code : :class:`~.Paragraph`\n The highlighted code.\n\n Examples\n --------\n Normal usage::\n\n listing = Code(\n \"helloworldcpp.cpp\",\n tab_width=4,\n background_stroke_width=1,\n background_stroke_color=WHITE,\n insert_line_no=True,\n style=Code.styles_list[15],\n background=\"window\",\n language=\"cpp\",\n )\n\n We can also render code passed as a string (but note that\n the language has to be specified in this case):\n\n .. manim:: CodeFromString\n :save_last_frame:\n\n class CodeFromString(Scene):\n def construct(self):\n code = '''from manim import Scene, Square\n\n class FadeInSquare(Scene):\n def construct(self):\n s = Square()\n self.play(FadeIn(s))\n self.play(s.animate.scale(2))\n self.wait()\n '''\n rendered_code = Code(code=code, tab_width=4, background=\"window\",\n language=\"Python\", font=\"Monospace\")\n self.add(rendered_code)\n\n \"\"\"\n\n # tuples in the form (name, aliases, filetypes, mimetypes)\n # 'language' is aliases or short names\n # For more information about pygments.lexers visit https://pygments.org/docs/lexers/\n # from pygments.lexers import get_all_lexers\n # all_lexers = get_all_lexers()\n styles_list = list(get_all_styles())\n # For more information about pygments.styles visit https://pygments.org/docs/styles/\n\n def __init__(\n self,\n file_name=None,\n code=None,\n tab_width=3,\n line_spacing=0.3,\n scale_factor=0.5,\n font=\"Monospac821 BT\",\n stroke_width=0,\n margin=0.3,\n indentation_chars=\" \",\n background=\"rectangle\", # or window\n background_stroke_width=1,\n background_stroke_color=WHITE,\n corner_radius=0.2,\n insert_line_no=True,\n line_no_from=1,\n line_no_buff=0.4,\n style=\"vim\",\n language=None,\n generate_html_file=False,\n **kwargs,\n ):\n VGroup.__init__(\n self,\n stroke_width=stroke_width,\n background_stroke_color=background_stroke_color,\n background_stroke_width=background_stroke_width,\n **kwargs,\n )\n self.tab_width = tab_width\n self.line_spacing = line_spacing\n self.scale_factor = scale_factor\n self.font = font\n self.margin = margin\n self.indentation_chars = indentation_chars\n self.background = background\n self.corner_radius = corner_radius\n self.insert_line_no = insert_line_no\n self.line_no_from = line_no_from\n self.line_no_buff = line_no_buff\n self.style = style\n self.language = language\n self.generate_html_file = generate_html_file\n\n self.file_path = None\n self.file_name = file_name\n if self.file_name:\n self.ensure_valid_file()\n with open(self.file_path, \"r\") as f:\n self.code_string = f.read()\n elif code:\n self.code_string = code\n else:\n raise ValueError(\n \"Neither a code file nor a code string have been specified.\"\n )\n if isinstance(self.style, str):\n self.style = self.style.lower()\n self.gen_html_string()\n strati = self.html_string.find(\"background:\")\n self.background_color = self.html_string[strati + 12 : strati + 19]\n self.gen_code_json()\n\n self.code = self.gen_colored_lines()\n if self.insert_line_no:\n self.line_numbers = self.gen_line_numbers()\n self.line_numbers.next_to(self.code, direction=LEFT, buff=self.line_no_buff)\n if self.background == \"rectangle\":\n if self.insert_line_no:\n foreground = VGroup(self.code, self.line_numbers)\n else:\n foreground = self.code\n rect = SurroundingRectangle(\n foreground,\n buff=self.margin,\n color=self.background_color,\n fill_color=self.background_color,\n stroke_width=self.background_stroke_width,\n stroke_color=self.background_stroke_color,\n fill_opacity=1,\n )\n rect.round_corners(self.corner_radius)\n self.background_mobject = VGroup(rect)\n else:\n if self.insert_line_no:\n foreground = VGroup(self.code, self.line_numbers)\n else:\n foreground = self.code\n height = foreground.height + 0.1 * 3 + 2 * self.margin\n width = foreground.width + 0.1 * 3 + 2 * self.margin\n\n rect = RoundedRectangle(\n corner_radius=self.corner_radius,\n height=height,\n width=width,\n stroke_width=self.background_stroke_width,\n stroke_color=self.background_stroke_color,\n color=self.background_color,\n fill_opacity=1,\n )\n red_button = Dot(radius=0.1, stroke_width=0, color=\"#ff5f56\")\n red_button.shift(LEFT * 0.1 * 3)\n yellow_button = Dot(radius=0.1, stroke_width=0, color=\"#ffbd2e\")\n green_button = Dot(radius=0.1, stroke_width=0, color=\"#27c93f\")\n green_button.shift(RIGHT * 0.1 * 3)\n buttons = VGroup(red_button, yellow_button, green_button)\n buttons.shift(\n UP * (height / 2 - 0.1 * 2 - 0.05)\n + LEFT * (width / 2 - 0.1 * 5 - self.corner_radius / 2 - 0.05)\n )\n\n self.background_mobject = VGroup(rect, buttons)\n x = (height - foreground.height) / 2 - 0.1 * 3\n self.background_mobject.shift(foreground.get_center())\n self.background_mobject.shift(UP * x)\n if self.insert_line_no:\n VGroup.__init__(\n self, self.background_mobject, self.line_numbers, self.code, **kwargs\n )\n else:\n VGroup.__init__(\n self,\n self.background_mobject,\n Dot(fill_opacity=0, stroke_opacity=0),\n self.code,\n **kwargs,\n )\n self.move_to(np.array([0, 0, 0]))\n\n def ensure_valid_file(self):\n \"\"\"Function to validate file.\"\"\"\n if self.file_name is None:\n raise Exception(\"Must specify file for Code\")\n possible_paths = [\n os.path.join(os.path.join(\"assets\", \"codes\"), self.file_name),\n self.file_name,\n ]\n for path in possible_paths:\n if os.path.exists(path):\n self.file_path = path\n return\n error = (\n f\"From: {os.getcwd()}, could not find {self.file_name} at either \"\n + f\"of these locations: {possible_paths}\"\n )\n raise IOError(error)\n\n def gen_line_numbers(self):\n \"\"\"Function to generate line_numbers.\n\n Returns\n -------\n :class:`~.Paragraph`\n The generated line_numbers according to parameters.\n \"\"\"\n line_numbers_array = []\n for line_no in range(0, self.code_json.__len__()):\n number = str(self.line_no_from + line_no)\n line_numbers_array.append(number)\n line_numbers = Paragraph(\n *list(line_numbers_array),\n line_spacing=self.line_spacing,\n alignment=\"right\",\n font=self.font,\n disable_ligatures=True,\n stroke_width=self.stroke_width,\n ).scale(self.scale_factor)\n for i in line_numbers:\n i.set_color(self.default_color)\n return line_numbers\n\n def gen_colored_lines(self):\n \"\"\"Function to generate code.\n\n Returns\n -------\n :class:`~.Paragraph`\n The generated code according to parameters.\n \"\"\"\n lines_text = []\n for line_no in range(0, self.code_json.__len__()):\n line_str = \"\"\n for word_index in range(self.code_json[line_no].__len__()):\n line_str = line_str + self.code_json[line_no][word_index][0]\n lines_text.append(self.tab_spaces[line_no] * \"\\t\" + line_str)\n code = Paragraph(\n *list(lines_text),\n line_spacing=self.line_spacing,\n tab_width=self.tab_width,\n font=self.font,\n disable_ligatures=True,\n stroke_width=self.stroke_width,\n ).scale(self.scale_factor)\n for line_no in range(code.__len__()):\n line = code.chars[line_no]\n line_char_index = self.tab_spaces[line_no]\n for word_index in range(self.code_json[line_no].__len__()):\n line[\n line_char_index : line_char_index\n + self.code_json[line_no][word_index][0].__len__()\n ].set_color(self.code_json[line_no][word_index][1])\n line_char_index += self.code_json[line_no][word_index][0].__len__()\n return code\n\n def gen_html_string(self):\n \"\"\"Function to generate html string with code highlighted and stores in variable html_string.\"\"\"\n self.html_string = hilite_me(\n self.code_string,\n self.language,\n self.style,\n self.insert_line_no,\n \"border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;\",\n self.file_path,\n self.line_no_from,\n )\n\n if self.generate_html_file:\n os.makedirs(\n os.path.join(\"assets\", \"codes\", \"generated_html_files\"), exist_ok=True\n )\n file = open(\n os.path.join(\n \"assets\", \"codes\", \"generated_html_files\", self.file_name + \".html\"\n ),\n \"w\",\n )\n file.write(self.html_string)\n file.close()\n\n def gen_code_json(self):\n \"\"\"Function to background_color, generate code_json and tab_spaces from html_string.\n background_color is just background color of displayed code.\n code_json is 2d array with rows as line numbers\n and columns as a array with length 2 having text and text's color value.\n tab_spaces is 2d array with rows as line numbers\n and columns as corresponding number of indentation_chars in front of that line in code.\n \"\"\"\n if (\n self.background_color == \"#111111\"\n or self.background_color == \"#272822\"\n or self.background_color == \"#202020\"\n or self.background_color == \"#000000\"\n ):\n self.default_color = \"#ffffff\"\n else:\n self.default_color = \"#000000\"\n # print(self.default_color,self.background_color)\n for i in range(3, -1, -1):\n self.html_string = self.html_string.replace(\"</\" + \" \" * i, \"</\")\n for i in range(10, -1, -1):\n self.html_string = self.html_string.replace(\n \"</span>\" + \" \" * i, \" \" * i + \"</span>\"\n )\n self.html_string = self.html_string.replace(\"background-color:\", \"background:\")\n\n if self.insert_line_no:\n start_point = self.html_string.find(\"</td><td><pre\")\n start_point = start_point + 9\n else:\n start_point = self.html_string.find(\"<pre\")\n self.html_string = self.html_string[start_point:]\n # print(self.html_string)\n lines = self.html_string.split(\"\\n\")\n lines = lines[0 : lines.__len__() - 2]\n start_point = lines[0].find(\">\")\n lines[0] = lines[0][start_point + 1 :]\n # print(lines)\n self.code_json = []\n self.tab_spaces = []\n code_json_line_index = -1\n for line_index in range(0, lines.__len__()):\n # print(lines[line_index])\n self.code_json.append([])\n code_json_line_index = code_json_line_index + 1\n if lines[line_index].startswith(self.indentation_chars):\n start_point = lines[line_index].find(\"<\")\n starting_string = lines[line_index][:start_point]\n indentation_chars_count = lines[line_index][:start_point].count(\n self.indentation_chars\n )\n if (\n starting_string.__len__()\n != indentation_chars_count * self.indentation_chars.__len__()\n ):\n lines[line_index] = (\n \"\\t\" * indentation_chars_count\n + starting_string[\n starting_string.rfind(self.indentation_chars)\n + self.indentation_chars.__len__() :\n ]\n + lines[line_index][start_point:]\n )\n else:\n lines[line_index] = (\n \"\\t\" * indentation_chars_count + lines[line_index][start_point:]\n )\n indentation_chars_count = 0\n if lines[line_index]:\n while lines[line_index][indentation_chars_count] == \"\\t\":\n indentation_chars_count = indentation_chars_count + 1\n self.tab_spaces.append(indentation_chars_count)\n # print(lines[line_index])\n lines[line_index] = self.correct_non_span(lines[line_index])\n # print(lines[line_index])\n words = lines[line_index].split(\"<span\")\n for word_index in range(1, words.__len__()):\n color_index = words[word_index].find(\"color:\")\n if color_index == -1:\n color = self.default_color\n else:\n starti = words[word_index][color_index:].find(\"#\")\n color = words[word_index][\n color_index + starti : color_index + starti + 7\n ]\n start_point = words[word_index].find(\">\")\n end_point = words[word_index].find(\"</span>\")\n text = words[word_index][start_point + 1 : end_point]\n text = html.unescape(text)\n if text != \"\":\n # print(text, \"'\" + color + \"'\")\n self.code_json[code_json_line_index].append([text, color])\n # print(self.code_json)\n\n def correct_non_span(self, line_str):\n \"\"\"Function put text color to those strings that don't have one according to background_color of displayed code.\n\n Parameters\n ---------\n line_str : :class:`str`\n Takes a html element's string to put color to it according to background_color of displayed code.\n\n Returns\n -------\n :class:`str`\n The generated html element's string with having color attributes.\n \"\"\"\n words = line_str.split(\"</span>\")\n line_str = \"\"\n for i in range(0, words.__len__()):\n if i != words.__len__() - 1:\n j = words[i].find(\"<span\")\n else:\n j = words[i].__len__()\n temp = \"\"\n starti = -1\n for k in range(0, j):\n if words[i][k] == \"\\t\" and starti == -1:\n continue\n else:\n if starti == -1:\n starti = k\n temp = temp + words[i][k]\n if temp != \"\":\n if i != words.__len__() - 1:\n temp = (\n '<span style=\"color:'\n + self.default_color\n + '\">'\n + words[i][starti:j]\n + \"</span>\"\n )\n else:\n temp = (\n '<span style=\"color:'\n + self.default_color\n + '\">'\n + words[i][starti:j]\n )\n temp = temp + words[i][j:]\n words[i] = temp\n if words[i] != \"\":\n line_str = line_str + words[i] + \"</span>\"\n return line_str\n\n\ndef hilite_me(\n code, language, style, insert_line_no, divstyles, file_path, line_no_from\n):\n \"\"\"Function to highlight code from string to html.\n\n Parameters\n ---------\n code : :class:`str`\n Code string.\n language : :class:`str`\n The name of the programming language the given code was written in.\n style : :class:`str`\n Code style name.\n insert_line_no : :class:`bool`\n Defines whether line numbers should be inserted in the html file.\n divstyles : :class:`str`\n Some html css styles.\n file_path : :class:`str`\n Path of code file.\n line_no_from : :class:`int`\n Defines the first line's number in the line count.\n \"\"\"\n style = style or \"colorful\"\n defstyles = \"overflow:auto;width:auto;\"\n\n formatter = HtmlFormatter(\n style=style,\n linenos=False,\n noclasses=True,\n cssclass=\"\",\n cssstyles=defstyles + divstyles,\n prestyles=\"margin: 0\",\n )\n if language is None and file_path:\n lexer = guess_lexer_for_filename(file_path, code)\n html = highlight(code, lexer, formatter)\n elif language is None:\n raise ValueError(\n \"The code language has to be specified when rendering a code string\"\n )\n else:\n html = highlight(code, get_lexer_by_name(language, **{}), formatter)\n if insert_line_no:\n html = insert_line_numbers_in_html(html, line_no_from)\n html = \"<!-- HTML generated by Code() -->\" + html\n return html\n\n\ndef insert_line_numbers_in_html(html, line_no_from):\n \"\"\"Function that inserts line numbers in the highlighted HTML code.\n\n Parameters\n ---------\n html : :class:`str`\n html string of highlighted code.\n line_no_from : :class:`int`\n Defines the first line's number in the line count.\n\n Returns\n -------\n :class:`str`\n The generated html string with having line numbers.\n \"\"\"\n match = re.search(\"(<pre[^>]*>)(.*)(</pre>)\", html, re.DOTALL)\n if not match:\n return html\n pre_open = match.group(1)\n pre = match.group(2)\n pre_close = match.group(3)\n\n html = html.replace(pre_close, \"</pre></td></tr></table>\")\n numbers = range(line_no_from, line_no_from + pre.count(\"\\n\") + 1)\n format_lines = \"%\" + str(len(str(numbers[-1]))) + \"i\"\n lines = \"\\n\".join(format_lines % i for i in numbers)\n html = html.replace(\n pre_open, \"<table><tr><td>\" + pre_open + lines + \"</pre></td><td>\" + pre_open\n )\n return html\n", "path": "manim/mobject/svg/code_mobject.py" } ]
diff --git a/manim/mobject/svg/code_mobject.py b/manim/mobject/svg/code_mobject.py index c9646e0948..3d02f0a92c 100644 --- a/manim/mobject/svg/code_mobject.py +++ b/manim/mobject/svg/code_mobject.py @@ -2,8 +2,6 @@ __all__ = [ "Code", - "hilite_me", - "insert_line_numbers_in_html", ] import html
strawberry-graphql__strawberry-585
v0.43.0 requires pydantic, but is marked optional Attempting to start a very simple server results in: ``` $ strawberry server app Traceback (most recent call last): File "/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/bin/strawberry", line 5, in <module> from strawberry.cli import run File "/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/__init__.py", line 1, in <module> from . import experimental, federation File "/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/experimental/__init__.py", line 1, in <module> from . import pydantic File "/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/experimental/pydantic/__init__.py", line 1, in <module> from .error_type import error_type File "/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/experimental/pydantic/error_type.py", line 4, in <module> from pydantic import BaseModel ModuleNotFoundError: No module named 'pydantic' ``` Even though pyproject.toml has pydantic marked as optional: https://github.com/strawberry-graphql/strawberry/blob/master/pyproject.toml#L37 app.py is: ``` import asyncio import strawberry async def resolve_hello(root, info, name: str) -> str: await asyncio.sleep(1) return f"Hello {name}" @strawberry.type class Query: hello: str = strawberry.field(resolver=resolve_hello) schema = strawberry.Schema(query=Query) ```
[ { "content": "from . import pydantic\n\n\n__all__ = [\"pydantic\"]\n", "path": "strawberry/experimental/__init__.py" } ]
[ { "content": "try:\n from . import pydantic\nexcept ImportError:\n pass\nelse:\n __all__ = [\"pydantic\"]\n", "path": "strawberry/experimental/__init__.py" } ]
diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000..b322fcf4e9 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,3 @@ +Release type: patch + +This releases fixes an issue with Strawberry requiring Pydantic even when not used. diff --git a/strawberry/experimental/__init__.py b/strawberry/experimental/__init__.py index 8a4098b279..6386ad81d7 100644 --- a/strawberry/experimental/__init__.py +++ b/strawberry/experimental/__init__.py @@ -1,4 +1,6 @@ -from . import pydantic - - -__all__ = ["pydantic"] +try: + from . import pydantic +except ImportError: + pass +else: + __all__ = ["pydantic"]
emissary-ingress__emissary-23
Users need statsd support Ambassador needs to be able to send stats off to statsd, whatever statsd the user wants to use.
[ { "content": "# Don't change this line without also changing .bumpversion.cfg\nVersion = \"0.5.0\"\n", "path": "ambassador/VERSION.py" } ]
[ { "content": "# Don't change this line without also changing .bumpversion.cfg\nVersion = \"0.5.1\"\n", "path": "ambassador/VERSION.py" } ]
diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 68cfaafe0d..bc0421fb93 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.5.0 +current_version = 0.5.1 commit = True tag = True tag_name = v{new_version} @@ -13,9 +13,20 @@ search = 'image: .*:{current_version}' [bumpversion:file:ambassador.yaml] search = 'image: .*:{current_version}' +[bumpversion:file:statsd-sink.yaml] +search = 'image: .*:{current_version}' + [bumpversion:file:Makefile] search = 'VERSION={current_version}' [bumpversion:file:ambassador/VERSION.py] search = 'Version = "{current_version}"' +[bumpversion:file:templates/ambassador-rest.yaml.sh] +search = 'image: .*:{current_version}' + +[bumpversion:file:templates/ambassador-sds.yaml.sh] +search = 'image: .*:{current_version}' + +[bumpversion:file:templates/statsd-sink.yaml.sh] +search = 'image: .*:{current_version}' diff --git a/Makefile b/Makefile index ecf044057c..fd934f61c7 100644 --- a/Makefile +++ b/Makefile @@ -1,23 +1,31 @@ -all: docker-images ambassador.yaml +all: docker-images ambassador.yaml statsd-sink.yaml -VERSION=0.5.0 +VERSION=0.5.1 .ALWAYS: +ambassador-sds.yaml: .ALWAYS + sh templates/ambassador-sds.yaml.sh > ambassador-sds.yaml + +ambassador-rest.yaml: .ALWAYS + sh templates/ambassador-rest.yaml.sh > ambassador-rest.yaml + ambassador.yaml: ambassador-store.yaml ambassador-sds.yaml ambassador-rest.yaml cat ambassador-store.yaml ambassador-sds.yaml ambassador-rest.yaml > ambassador.yaml -docker-images: ambassador-image sds-image +statsd-sink.yaml: .ALWAYS + sh templates/statsd-sink.yaml.sh > statsd-sink.yaml + +docker-images: ambassador-image sds-image statsd-image prom-statsd-exporter ambassador-image: .ALWAYS - docker build -t dwflynn/ambassador:$(VERSION) ambassador - if [ -n "$(DOCKER_REGISTRY)" ]; then \ - docker push $(DOCKER_REGISTRY)/ambassador:$(VERSION); \ - fi + scripts/docker_build_maybe_push dwflynn ambassador $(VERSION) ambassador sds-image: .ALWAYS - docker build -t dwflynn/ambassador-sds:$(VERSION) sds - if [ -n "$(DOCKER_REGISTRY)" ]; then \ - docker push $(DOCKER_REGISTRY)/ambassador-sds:$(VERSION); \ - fi - \ No newline at end of file + scripts/docker_build_maybe_push dwflynn ambassador-sds $(VERSION) sds + +statsd-image: .ALWAYS + scripts/docker_build_maybe_push ark3 statsd $(VERSION) statsd + +prom-statsd-exporter: .ALWAYS + scripts/docker_build_maybe_push ark3 prom-statsd-exporter $(VERSION) prom-statsd-exporter diff --git a/ambassador-rest.yaml b/ambassador-rest.yaml index 9914d8d48a..7fc84a4ae9 100644 --- a/ambassador-rest.yaml +++ b/ambassador-rest.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: ambassador - image: dwflynn/ambassador:0.5.0 + image: ark3/ambassador:0.5.1 # ports: # - containerPort: 80 # protocol: TCP @@ -24,6 +24,9 @@ spec: volumeMounts: - mountPath: /etc/certs name: cert-data + - name: statsd + image: ark3/statsd:0.5.1 + resources: {} volumes: - name: cert-data secret: diff --git a/ambassador-sds.yaml b/ambassador-sds.yaml index f19cb2f168..a8826b0518 100644 --- a/ambassador-sds.yaml +++ b/ambassador-sds.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: ambassador-sds - image: dwflynn/ambassador-sds:0.5.0 + image: dwflynn/ambassador-sds:0.5.1 resources: {} restartPolicy: Always status: {} diff --git a/ambassador.yaml b/ambassador.yaml index 23293604d5..046c9678e6 100644 --- a/ambassador.yaml +++ b/ambassador.yaml @@ -51,7 +51,7 @@ spec: spec: containers: - name: ambassador-sds - image: dwflynn/ambassador-sds:0.5.0 + image: dwflynn/ambassador-sds:0.5.1 resources: {} restartPolicy: Always status: {} @@ -89,7 +89,7 @@ spec: spec: containers: - name: ambassador - image: dwflynn/ambassador:0.5.0 + image: ark3/ambassador:0.5.1 # ports: # - containerPort: 80 # protocol: TCP @@ -97,6 +97,9 @@ spec: volumeMounts: - mountPath: /etc/certs name: cert-data + - name: statsd + image: ark3/statsd:0.5.1 + resources: {} volumes: - name: cert-data secret: diff --git a/ambassador/VERSION.py b/ambassador/VERSION.py index 13cc219066..6fd8460ded 100644 --- a/ambassador/VERSION.py +++ b/ambassador/VERSION.py @@ -1,2 +1,2 @@ # Don't change this line without also changing .bumpversion.cfg -Version = "0.5.0" +Version = "0.5.1" diff --git a/ambassador/envoy-template.json b/ambassador/envoy-template.json index 8f9966c1c9..17df884a37 100644 --- a/ambassador/envoy-template.json +++ b/ambassador/envoy-template.json @@ -52,8 +52,10 @@ } ] }, - "refresh_delay_ms": 15000 + "refresh_delay_ms": 15000 }, "clusters": [] - } + }, + "statsd_local_udp_port": 8125, + "stats_flush_interval_ms": 1000 } diff --git a/helm-prom-config.yaml b/helm-prom-config.yaml new file mode 100644 index 0000000000..6720c233a3 --- /dev/null +++ b/helm-prom-config.yaml @@ -0,0 +1,608 @@ +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + ## alertmanager container name + ## + name: alertmanager + + ## alertmanager container image + ## + image: + repository: prom/alertmanager + tag: v0.5.1 + pullPolicy: IfNotPresent + + ## Additional alertmanager container arguments + ## + extraArgs: {} + + ingress: + ## If true, alertmanager Ingress will be created + ## + enabled: false + + ## alertmanager Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## alertmanager Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - alertmanager.domain.com + + ## alertmanager Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - alertmanager.domain.com + + ## Node labels for alertmanager pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + persistentVolume: + ## If true, alertmanager will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## alertmanager data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## alertmanager data Persistent Volume Claim annotations + ## + annotations: {} + + ## alertmanager data Persistent Volume existing claim name + ## Requires alertmanager.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## alertmanager data Persistent Volume mount root path + ## + mountPath: /data + + ## alertmanager data Persistent Volume size + ## + size: 2Gi + + ## alertmanager data Persistent Volume Storage Class + ## If defined, volume.beta.kubernetes.io/storage-class: <storageClass> + ## Default: volume.alpha.kubernetes.io/storage-class: default + ## + storageClass: "" + + ## Subdirectory of alertmanager data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Annotations to be added to alertmanager pods + ## + podAnnotations: {} + + replicaCount: 1 + + ## alertmanager resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + service: + annotations: {} + clusterIP: "" + + ## List of IP addresses at which the alertmanager service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.1 + pullPolicy: IfNotPresent + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +kubeStateMetrics: + ## If false, kube-state-metrics will not be installed + ## + enabled: true + + ## kube-state-metrics container name + ## + name: kube-state-metrics + + ## kube-state-metrics container image + ## + image: + repository: gcr.io/google_containers/kube-state-metrics + tag: v0.4.1 + pullPolicy: IfNotPresent + + ## Node labels for kube-state-metrics pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to kube-state-metrics pods + ## + podAnnotations: {} + + replicaCount: 1 + + ## kube-state-metrics resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 16Mi + # requests: + # cpu: 10m + # memory: 16Mi + + service: + annotations: + prometheus.io/scrape: "true" + + clusterIP: None + + ## List of IP addresses at which the kube-state-metrics service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + +nodeExporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + ## node-exporter container name + ## + name: node-exporter + + ## node-exporter container image + ## + image: + repository: prom/node-exporter + tag: v0.13.0 + pullPolicy: IfNotPresent + + ## Additional node-exporter container arguments + ## + extraArgs: {} + + ## Node labels for node-exporter pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to node-exporter pods + ## + podAnnotations: {} + + ## node-exporter resource limits & requests + ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + + service: + annotations: + prometheus.io/scrape: "true" + + clusterIP: None + + ## List of IP addresses at which the node-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + hostPort: 9100 + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9100 + type: ClusterIP + +server: + ## Prometheus server container name + ## + name: server + + ## Prometheus server container image + ## + image: + repository: prom/prometheus + tag: v1.5.2 + pullPolicy: IfNotPresent + + ## (optional) alertmanager URL + ## only used if alertmanager.enabled = false + alertmanagerURL: "" + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, volume.beta.kubernetes.io/storage-class: <storageClass> + ## Default: volume.alpha.kubernetes.io/storage-class: default + ## + storageClass: "" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + replicaCount: 1 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + service: + annotations: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + +## alertmanager ConfigMap entries +## +alertmanagerFiles: + alertmanager.yml: |- + global: + # slack_api_url: '' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Prometheus server ConfigMap entries +## +serverFiles: + alerts: "" + rules: "" + + prometheus.yml: |- + rule_files: + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: statsd_exporter + metrics_path: "/metrics" + static_configs: + - targets: ["statsd-sink:9102"] + labels: + groups: "statsd" + + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # <kubernetes_sd_config>. + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # <kubernetes_sd_config>. + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+)(?::\d+);(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: (.+):(?:\d+);(\d+) + replacement: ${1}:${2} + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name diff --git a/prom-statsd-exporter/Dockerfile b/prom-statsd-exporter/Dockerfile new file mode 100644 index 0000000000..cec86abffa --- /dev/null +++ b/prom-statsd-exporter/Dockerfile @@ -0,0 +1,3 @@ +FROM prom/statsd-exporter +EXPOSE 9102 8125 +CMD ["-statsd.listen-address=:8125"] diff --git a/scripts/docker_build_maybe_push b/scripts/docker_build_maybe_push new file mode 100755 index 0000000000..47de4756ce --- /dev/null +++ b/scripts/docker_build_maybe_push @@ -0,0 +1,17 @@ +#!/bin/sh + +# If DOCKER_REGISTRY is set, use that as the namespace for the build, +# then push to Docker Hub. Otherwise use the specified namespace to +# build, don't push. + +DEFAULT_NAMESPACE="$1" +NAME="$2" +VERSION="$3" +TARGET="$4" + +if [ -n "${DOCKER_REGISTRY}" ]; then + docker build -t "${DOCKER_REGISTRY}/${NAME}:${VERSION}" "${TARGET}" + docker push "${DOCKER_REGISTRY}/${NAME}:${VERSION}" +else + docker build -t "${DEFAULT_NAMESPACE}/${NAME}:${VERSION}" "${TARGET}" +fi diff --git a/stats_walkthrough.txt b/stats_walkthrough.txt new file mode 100644 index 0000000000..830ae8920e --- /dev/null +++ b/stats_walkthrough.txt @@ -0,0 +1,44 @@ +git clone https://github.com/datawire/ambassador.git +git checkout ark3/statsd + +# Make sure kubectl is pointing to the right k8s cluster + +helm init +helm install stable/prometheus --name prom -f helm-prom-config.yaml +# You may find a prometheus-3.0.1.tgz file now. You can delete it. + +kubectl port-forward prom-prometheus-server-[TAB] 9090 # This will tie up the shell +# Navigate to http://localhost:9090/ to see the Prometheus UI + +make # Builds images +# or DOCKER_REGISTRY=rhs make # builds and pushes to Docker Hub + +kubectl apply -f statsd-sink.yaml +kubectl apply -f ambassador-http.yaml +kubectl apply -f ambassador.yaml +kubectl apply -f demo-usersvc.yaml + +eval $(sh scripts/geturl) # Sets AMBASSADORURL +http $AMBASSADORURL/ambassador/health # Or use curl +sh scripts/map user usersvc +http $AMBASSADORURL/user/health +http $AMBASSADORURL/user/purple # Fails, no such user +http PUT $AMBASSADORURL/user/purple "fullname=Purple User" password=123456 +http $AMBASSADORURL/user/purple # Succeeds + +# Make sure port forwarding is still active +# Examine the User service's stats +# - Request rate (requests per second): +# rate(envoy_cluster_usersvc_cluster_upstream_rq_total_counter[5m]) +# - Success rate (fraction of successful requests) +# envoy_cluster_usersvc_cluster_upstream_rq_2xx_counter / envoy_cluster_usersvc_cluster_upstream_rq_total_counter +# (or something like that) +# - Average request latency +# - envoy_cluster_usersvc_cluster_upstream_rq_time_timer (quantiles: 50%, 90%, 99%) +# - envoy_cluster_usersvc_cluster_upstream_rq_time_timer_sum (total time spent) +# - envoy_cluster_usersvc_cluster_upstream_rq_time_timer_count (number of timer events) +# envoy_cluster_usersvc_cluster_upstream_rq_time_timer_sum / envoy_cluster_usersvc_cluster_upstream_rq_time_timer_count +# (or something like that) +# Try something like +# for i in {1..10} ; do http $AMBASSADORURL/user/purple ; done +# to affect the request rate. diff --git a/statsd-sink.yaml b/statsd-sink.yaml new file mode 100644 index 0000000000..9f0a011b27 --- /dev/null +++ b/statsd-sink.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + creationTimestamp: null + name: statsd-sink +spec: + replicas: 1 + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + service: statsd-sink + spec: + containers: + - name: statsd-sink + image: ark3/prom-statsd-exporter:0.5.1 + resources: {} + restartPolicy: Always +status: {} +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + service: statsd-sink + name: statsd-sink +spec: + ports: + - protocol: UDP + port: 8125 + name: statsd-metrics + - protocol: TCP + port: 9102 + name: prometheus-metrics + selector: + service: statsd-sink diff --git a/statsd/Dockerfile b/statsd/Dockerfile new file mode 100644 index 0000000000..316f358a91 --- /dev/null +++ b/statsd/Dockerfile @@ -0,0 +1,6 @@ +FROM mhart/alpine-node:7 + +WORKDIR /application +RUN npm install https://api.github.com/repos/etsy/statsd/tarball/8d5363c +ADD config.js . +CMD ["node", "node_modules/statsd/stats.js", "config.js"] diff --git a/statsd/config.js b/statsd/config.js new file mode 100644 index 0000000000..17adf75415 --- /dev/null +++ b/statsd/config.js @@ -0,0 +1,16 @@ +(function () { + "use strict"; + return { + // Production configuration + //"backends": ["./backends/repeater"], + //"flushInterval": 10000, + + // Development configuration + "debug": true, + "backends": ["./backends/repeater", "./backends/console"], + "flushInterval": 1000, + + "repeater": [ { "host": "statsd-sink", "port": 8125 } ], + "repeaterProtocol": "upd4" + }; +})(); diff --git a/templates/ambassador-rest.yaml.sh b/templates/ambassador-rest.yaml.sh new file mode 100644 index 0000000000..550c1f8d0e --- /dev/null +++ b/templates/ambassador-rest.yaml.sh @@ -0,0 +1,44 @@ +if [ -z "${DOCKER_REGISTRY}" ]; then + AMREG=dwflynn + STREG=ark3 +else + AMREG="${DOCKER_REGISTRY}" + STREG="${DOCKER_REGISTRY}" +fi +cat <<EOF +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + creationTimestamp: null + name: ambassador +spec: + replicas: 1 + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + service: ambassador + # service: ambassador-admin + spec: + containers: + - name: ambassador + image: ${AMREG}/ambassador:0.5.1 + # ports: + # - containerPort: 80 + # protocol: TCP + resources: {} + volumeMounts: + - mountPath: /etc/certs + name: cert-data + - name: statsd + image: ${STREG}/statsd:0.5.1 + resources: {} + volumes: + - name: cert-data + secret: + secretName: ambassador-certs + restartPolicy: Always +status: {} +EOF diff --git a/templates/ambassador-sds.yaml.sh b/templates/ambassador-sds.yaml.sh new file mode 100644 index 0000000000..b355e7d860 --- /dev/null +++ b/templates/ambassador-sds.yaml.sh @@ -0,0 +1,42 @@ +if [ -z "${DOCKER_REGISTRY}" ]; then + DOCKER_REGISTRY=dwflynn +fi +cat <<EOF +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + creationTimestamp: null + name: ambassador-sds +spec: + replicas: 1 + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + service: ambassador-sds + spec: + containers: + - name: ambassador-sds + image: ${DOCKER_REGISTRY}/ambassador-sds:0.5.1 + resources: {} + restartPolicy: Always +status: {} +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + service: ambassador-sds + name: ambassador-sds +spec: + type: NodePort + ports: + - name: ambassador-sds + port: 5000 + targetPort: 5000 + selector: + service: ambassador-sds +EOF diff --git a/templates/statsd-sink.yaml.sh b/templates/statsd-sink.yaml.sh new file mode 100644 index 0000000000..471db615fa --- /dev/null +++ b/templates/statsd-sink.yaml.sh @@ -0,0 +1,44 @@ +if [ -z "${DOCKER_REGISTRY}" ]; then + DOCKER_REGISTRY=ark3 +fi +cat <<EOF +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + creationTimestamp: null + name: statsd-sink +spec: + replicas: 1 + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + service: statsd-sink + spec: + containers: + - name: statsd-sink + image: ${DOCKER_REGISTRY}/prom-statsd-exporter:0.5.1 + resources: {} + restartPolicy: Always +status: {} +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + service: statsd-sink + name: statsd-sink +spec: + ports: + - protocol: UDP + port: 8125 + name: statsd-metrics + - protocol: TCP + port: 9102 + name: prometheus-metrics + selector: + service: statsd-sink +EOF
Anselmoo__spectrafit-660
[Docs]: Update release drafter ### Is there an existing issue for this? - [X] I have searched the existing issues ### Current Missing Information in the Docs - Link to the complete changes of the latest release. - Exclude auto commits in the contributor list ### Anything else? _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a0\"\n", "path": "spectrafit/__init__.py" } ]
[ { "content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a1\"\n", "path": "spectrafit/__init__.py" } ]
diff --git a/.github/workflows/update-changelog.yaml b/.github/workflows/update-changelog.yaml index be5bbb6b0..e98b14df0 100644 --- a/.github/workflows/update-changelog.yaml +++ b/.github/workflows/update-changelog.yaml @@ -3,7 +3,12 @@ name: "Update Changelog" on: release: - types: [released] + types: + - published + +permissions: + contents: read + packages: write jobs: update: diff --git a/poetry.lock b/poetry.lock index 4d9a3ba20..21ee1fa4f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2023,14 +2023,14 @@ test = ["coverage", "jupyter-server[test] (>=2.0.0a0)", "pytest (>=7.0)", "pytes [[package]] name = "jupyter-ydoc" -version = "0.2.2" +version = "0.2.3" description = "Document structures for collaborative editing using Ypy" category = "main" optional = true python-versions = ">=3.7" files = [ - {file = "jupyter_ydoc-0.2.2-py3-none-any.whl", hash = "sha256:596a9ae5986b59f8776c42430b5ad516405963574078ab801781933c9690be93"}, - {file = "jupyter_ydoc-0.2.2.tar.gz", hash = "sha256:3163bd4745eedd46d4bba6df52ab26be3c5c44c3a8aaf247635062486ea8f84f"}, + {file = "jupyter_ydoc-0.2.3-py3-none-any.whl", hash = "sha256:3ac51abfe378c6aeb62a449e8f0241bede1205f0199b0d27429140cbba950f79"}, + {file = "jupyter_ydoc-0.2.3.tar.gz", hash = "sha256:98db7785215873c64d7dfcb1b741f41df11994c4b3d7e2957e004b392d6f11ea"}, ] [package.dependencies] @@ -2038,6 +2038,7 @@ importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} y-py = ">=0.5.3,<0.6.0" [package.extras] +dev = ["click", "jupyter-releaser"] test = ["pre-commit", "pytest", "pytest-asyncio", "websockets (>=10.0)", "ypy-websocket (>=0.3.1,<0.4.0)"] [[package]] @@ -3676,14 +3677,14 @@ files = [ [[package]] name = "platformdirs" -version = "3.1.0" +version = "3.1.1" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "platformdirs-3.1.0-py3-none-any.whl", hash = "sha256:13b08a53ed71021350c9e300d4ea8668438fb0046ab3937ac9a29913a1a1350a"}, - {file = "platformdirs-3.1.0.tar.gz", hash = "sha256:accc3665857288317f32c7bebb5a8e482ba717b474f3fc1d18ca7f9214be0cef"}, + {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, + {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, ] [package.extras] diff --git a/pyproject.toml b/pyproject.toml index d7d3560e8..51f908603 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "SpectraFit" -version = "1.0.0a0" +version = "1.0.0a1" description = "Fast fitting of 2D- and 3D-Spectra with established routines" readme = "README.md" authors = ["Anselm Hahn <[email protected]>"] diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py index d5e3f80bb..4b2ff1f8a 100644 --- a/spectrafit/__init__.py +++ b/spectrafit/__init__.py @@ -1,2 +1,2 @@ """SpectraFit, fast command line tool for fitting data.""" -__version__ = "1.0.0a0" +__version__ = "1.0.0a1"
Anselmoo__spectrafit-695
[Docs]: Update labeled criteria for CHANGELOG ### Is there an existing issue for this? - [X] I have searched the existing issues ### Current Missing Information in the Docs Update the labeler criteria for avoiding double labels in `CHANGELOG.md` ### Anything else? _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a7\"\n", "path": "spectrafit/__init__.py" } ]
[ { "content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a8\"\n", "path": "spectrafit/__init__.py" } ]
diff --git a/.github/labeler.yml b/.github/labeler.yml index c311d724b..765ad286e 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,21 +1,26 @@ +codeowners: + - .github/CODEOWNERS codespaces: - .devcontainer/** dependencies: - poetry.lock + - github/dependabot.yml documentation: - docs/** - - .github/ISSUE_TEMPLATE/** - - .github/pull_request_template.md - Examples/** - mkdocs.yml - README.md + - INSTALLATION.md +forms: + - .github/ISSUE_TEMPLATE/** + - .github/pull_request_template.md github-actions: - .github/workflows/** pre-commit: - .pre-commit-config.yaml - .pre-commit-hooks.yaml python: - - spectrafit/** + - all: ["spectrafit/**/*.py", "!spectrafit/**/test_*.py"] testing: - all: ["spectrafit/**/test_*.py"] docker: @@ -26,28 +31,24 @@ release: - all: ["spectrafit/__init__.py", "pyproject.toml"] vendor: - vendor/** -forms: - - .github/ISSUE_TEMPLATE/** - - .github/pull_request_template.md maintenance: - pyproject.toml - - poetry.lock - .gitignore - .github/dependabot.yml - .github/release-drafter.yml - .github/labeler.yml - .github/workflows/label.yml - .sonarcloud.properties - - LICENSE - - CHANGELOG.md - - CODE_OF_CONDUCT.md - - "SECURITY.md" - .pre-commit-config.yaml - .prettierignore + - .pylintrc - .sonarcloud.properties - .sourcery.yaml +license: + - LICENSE security: - SECURITY.md - - poetry.lock +code-of-conduct: + - CODE_OF_CONDUCT.md changelog: - CONTRIBUTING.md diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml index e994e40ad..36e435734 100644 --- a/.github/release-drafter.yml +++ b/.github/release-drafter.yml @@ -15,7 +15,7 @@ categories: label: "milestone" - title: "🚀 New" label: "enhancement" - - title: "💻 New" + - title: "💻 Codesspaces" label: "codespaces" - title: "🐛 Bug Fixes" label: "bug" @@ -37,6 +37,8 @@ categories: label: "changelog" - title: "🧾 Forms" label: "forms" + - title: "🐳 Docker" + label: "docker" version-resolver: major: @@ -45,14 +47,17 @@ version-resolver: minor: labels: - "enhancement" + - "feature" + - "codeowners" patch: labels: - "codespaces" - - "bug" + - "docker" - "maintenance" - - "github-actions" - "documentation" - "dependencies" + - "forms" + - "github-actions" - "security" - "testing" - "good first issue" @@ -62,6 +67,7 @@ exclude-labels: - "duplicate" - "invalid" - "wontfix" + - "question" exclude-contributors: - dependabot[bot] diff --git a/poetry.lock b/poetry.lock index dd27c6ca0..710eff940 100644 --- a/poetry.lock +++ b/poetry.lock @@ -148,14 +148,14 @@ python-dateutil = ">=2.7.0" [[package]] name = "art" -version = "5.8" +version = "5.9" description = "ASCII Art Library For Python" category = "main" optional = false python-versions = ">=2.7" files = [ - {file = "art-5.8-py2.py3-none-any.whl", hash = "sha256:cd77a9dcfe49dfb82f37a7b2401ae771fde6e155942b4e3dbc50a68c2bc7027c"}, - {file = "art-5.8.tar.gz", hash = "sha256:83e383910e07eb7844a6618498708e0080ea842d584128ee6b0dc62d8b48cf36"}, + {file = "art-5.9-py2.py3-none-any.whl", hash = "sha256:9b02463468fc51c77200dadee970cf02c9576008c4f3b99d7ed02371ec037bd1"}, + {file = "art-5.9.tar.gz", hash = "sha256:6a311ec0a227c6554c6789e91e74c3f2c86453a2a8e76947788695b7a91ccff0"}, ] [package.extras] @@ -777,63 +777,63 @@ test-no-images = ["pytest"] [[package]] name = "coverage" -version = "7.2.1" +version = "7.2.2" description = "Code coverage measurement for Python" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "coverage-7.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49567ec91fc5e0b15356da07a2feabb421d62f52a9fff4b1ec40e9e19772f5f8"}, - {file = "coverage-7.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d2ef6cae70168815ed91388948b5f4fcc69681480a0061114db737f957719f03"}, - {file = "coverage-7.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3004765bca3acd9e015794e5c2f0c9a05587f5e698127ff95e9cfba0d3f29339"}, - {file = "coverage-7.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cca7c0b7f5881dfe0291ef09ba7bb1582cb92ab0aeffd8afb00c700bf692415a"}, - {file = "coverage-7.2.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2167d116309f564af56f9aa5e75ef710ef871c5f9b313a83050035097b56820"}, - {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cb5f152fb14857cbe7f3e8c9a5d98979c4c66319a33cad6e617f0067c9accdc4"}, - {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:87dc37f16fb5e3a28429e094145bf7c1753e32bb50f662722e378c5851f7fdc6"}, - {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e191a63a05851f8bce77bc875e75457f9b01d42843f8bd7feed2fc26bbe60833"}, - {file = "coverage-7.2.1-cp310-cp310-win32.whl", hash = "sha256:e3ea04b23b114572b98a88c85379e9e9ae031272ba1fb9b532aa934c621626d4"}, - {file = "coverage-7.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:0cf557827be7eca1c38a2480484d706693e7bb1929e129785fe59ec155a59de6"}, - {file = "coverage-7.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:570c21a29493b350f591a4b04c158ce1601e8d18bdcd21db136fbb135d75efa6"}, - {file = "coverage-7.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9e872b082b32065ac2834149dc0adc2a2e6d8203080501e1e3c3c77851b466f9"}, - {file = "coverage-7.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fac6343bae03b176e9b58104a9810df3cdccd5cfed19f99adfa807ffbf43cf9b"}, - {file = "coverage-7.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abacd0a738e71b20e224861bc87e819ef46fedba2fb01bc1af83dfd122e9c319"}, - {file = "coverage-7.2.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9256d4c60c4bbfec92721b51579c50f9e5062c21c12bec56b55292464873508"}, - {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:80559eaf6c15ce3da10edb7977a1548b393db36cbc6cf417633eca05d84dd1ed"}, - {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0bd7e628f6c3ec4e7d2d24ec0e50aae4e5ae95ea644e849d92ae4805650b4c4e"}, - {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09643fb0df8e29f7417adc3f40aaf379d071ee8f0350ab290517c7004f05360b"}, - {file = "coverage-7.2.1-cp311-cp311-win32.whl", hash = "sha256:1b7fb13850ecb29b62a447ac3516c777b0e7a09ecb0f4bb6718a8654c87dfc80"}, - {file = "coverage-7.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:617a94ada56bbfe547aa8d1b1a2b8299e2ec1ba14aac1d4b26a9f7d6158e1273"}, - {file = "coverage-7.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8649371570551d2fd7dee22cfbf0b61f1747cdfb2b7587bb551e4beaaa44cb97"}, - {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d2b9b5e70a21474c105a133ba227c61bc95f2ac3b66861143ce39a5ea4b3f84"}, - {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae82c988954722fa07ec5045c57b6d55bc1a0890defb57cf4a712ced65b26ddd"}, - {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:861cc85dfbf55a7a768443d90a07e0ac5207704a9f97a8eb753292a7fcbdfcfc"}, - {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0339dc3237c0d31c3b574f19c57985fcbe494280153bbcad33f2cdf469f4ac3e"}, - {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5928b85416a388dd557ddc006425b0c37e8468bd1c3dc118c1a3de42f59e2a54"}, - {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8d3843ca645f62c426c3d272902b9de90558e9886f15ddf5efe757b12dd376f5"}, - {file = "coverage-7.2.1-cp37-cp37m-win32.whl", hash = "sha256:6a034480e9ebd4e83d1aa0453fd78986414b5d237aea89a8fdc35d330aa13bae"}, - {file = "coverage-7.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6fce673f79a0e017a4dc35e18dc7bb90bf6d307c67a11ad5e61ca8d42b87cbff"}, - {file = "coverage-7.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7f099da6958ddfa2ed84bddea7515cb248583292e16bb9231d151cd528eab657"}, - {file = "coverage-7.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:97a3189e019d27e914ecf5c5247ea9f13261d22c3bb0cfcfd2a9b179bb36f8b1"}, - {file = "coverage-7.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a81dbcf6c6c877986083d00b834ac1e84b375220207a059ad45d12f6e518a4e3"}, - {file = "coverage-7.2.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d2c3dde4c0b9be4b02067185136b7ee4681978228ad5ec1278fa74f5ca3e99"}, - {file = "coverage-7.2.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a209d512d157379cc9ab697cbdbb4cfd18daa3e7eebaa84c3d20b6af0037384"}, - {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f3d07edb912a978915576a776756069dede66d012baa503022d3a0adba1b6afa"}, - {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8dca3c1706670297851bca1acff9618455122246bdae623be31eca744ade05ec"}, - {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b1991a6d64231a3e5bbe3099fb0dd7c9aeaa4275ad0e0aeff4cb9ef885c62ba2"}, - {file = "coverage-7.2.1-cp38-cp38-win32.whl", hash = "sha256:22c308bc508372576ffa3d2dbc4824bb70d28eeb4fcd79d4d1aed663a06630d0"}, - {file = "coverage-7.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:b0c0d46de5dd97f6c2d1b560bf0fcf0215658097b604f1840365296302a9d1fb"}, - {file = "coverage-7.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4dd34a935de268a133e4741827ae951283a28c0125ddcdbcbba41c4b98f2dfef"}, - {file = "coverage-7.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0f8318ed0f3c376cfad8d3520f496946977abde080439d6689d7799791457454"}, - {file = "coverage-7.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:834c2172edff5a08d78e2f53cf5e7164aacabeb66b369f76e7bb367ca4e2d993"}, - {file = "coverage-7.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4d70c853f0546855f027890b77854508bdb4d6a81242a9d804482e667fff6e6"}, - {file = "coverage-7.2.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a6450da4c7afc4534305b2b7d8650131e130610cea448ff240b6ab73d7eab63"}, - {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:99f4dd81b2bb8fc67c3da68b1f5ee1650aca06faa585cbc6818dbf67893c6d58"}, - {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bdd3f2f285ddcf2e75174248b2406189261a79e7fedee2ceeadc76219b6faa0e"}, - {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f29351393eb05e6326f044a7b45ed8e38cb4dcc38570d12791f271399dc41431"}, - {file = "coverage-7.2.1-cp39-cp39-win32.whl", hash = "sha256:e2b50ebc2b6121edf352336d503357321b9d8738bb7a72d06fc56153fd3f4cd8"}, - {file = "coverage-7.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:bd5a12239c0006252244f94863f1c518ac256160cd316ea5c47fb1a11b25889a"}, - {file = "coverage-7.2.1-pp37.pp38.pp39-none-any.whl", hash = "sha256:436313d129db7cf5b4ac355dd2bd3f7c7e5294af077b090b85de75f8458b8616"}, - {file = "coverage-7.2.1.tar.gz", hash = "sha256:c77f2a9093ccf329dd523a9b2b3c854c20d2a3d968b6def3b820272ca6732242"}, + {file = "coverage-7.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c90e73bdecb7b0d1cea65a08cb41e9d672ac6d7995603d6465ed4914b98b9ad7"}, + {file = "coverage-7.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e2926b8abedf750c2ecf5035c07515770944acf02e1c46ab08f6348d24c5f94d"}, + {file = "coverage-7.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57b77b9099f172804e695a40ebaa374f79e4fb8b92f3e167f66facbf92e8e7f5"}, + {file = "coverage-7.2.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:efe1c0adad110bf0ad7fb59f833880e489a61e39d699d37249bdf42f80590169"}, + {file = "coverage-7.2.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2199988e0bc8325d941b209f4fd1c6fa007024b1442c5576f1a32ca2e48941e6"}, + {file = "coverage-7.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:81f63e0fb74effd5be736cfe07d710307cc0a3ccb8f4741f7f053c057615a137"}, + {file = "coverage-7.2.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:186e0fc9cf497365036d51d4d2ab76113fb74f729bd25da0975daab2e107fd90"}, + {file = "coverage-7.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:420f94a35e3e00a2b43ad5740f935358e24478354ce41c99407cddd283be00d2"}, + {file = "coverage-7.2.2-cp310-cp310-win32.whl", hash = "sha256:38004671848b5745bb05d4d621526fca30cee164db42a1f185615f39dc997292"}, + {file = "coverage-7.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:0ce383d5f56d0729d2dd40e53fe3afeb8f2237244b0975e1427bfb2cf0d32bab"}, + {file = "coverage-7.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3eb55b7b26389dd4f8ae911ba9bc8c027411163839dea4c8b8be54c4ee9ae10b"}, + {file = "coverage-7.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d2b96123a453a2d7f3995ddb9f28d01fd112319a7a4d5ca99796a7ff43f02af5"}, + {file = "coverage-7.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:299bc75cb2a41e6741b5e470b8c9fb78d931edbd0cd009c58e5c84de57c06731"}, + {file = "coverage-7.2.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5e1df45c23d4230e3d56d04414f9057eba501f78db60d4eeecfcb940501b08fd"}, + {file = "coverage-7.2.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:006ed5582e9cbc8115d2e22d6d2144a0725db542f654d9d4fda86793832f873d"}, + {file = "coverage-7.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d683d230b5774816e7d784d7ed8444f2a40e7a450e5720d58af593cb0b94a212"}, + {file = "coverage-7.2.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8efb48fa743d1c1a65ee8787b5b552681610f06c40a40b7ef94a5b517d885c54"}, + {file = "coverage-7.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4c752d5264053a7cf2fe81c9e14f8a4fb261370a7bb344c2a011836a96fb3f57"}, + {file = "coverage-7.2.2-cp311-cp311-win32.whl", hash = "sha256:55272f33da9a5d7cccd3774aeca7a01e500a614eaea2a77091e9be000ecd401d"}, + {file = "coverage-7.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:92ebc1619650409da324d001b3a36f14f63644c7f0a588e331f3b0f67491f512"}, + {file = "coverage-7.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5afdad4cc4cc199fdf3e18088812edcf8f4c5a3c8e6cb69127513ad4cb7471a9"}, + {file = "coverage-7.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0484d9dd1e6f481b24070c87561c8d7151bdd8b044c93ac99faafd01f695c78e"}, + {file = "coverage-7.2.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d530191aa9c66ab4f190be8ac8cc7cfd8f4f3217da379606f3dd4e3d83feba69"}, + {file = "coverage-7.2.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ac0f522c3b6109c4b764ffec71bf04ebc0523e926ca7cbe6c5ac88f84faced0"}, + {file = "coverage-7.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ba279aae162b20444881fc3ed4e4f934c1cf8620f3dab3b531480cf602c76b7f"}, + {file = "coverage-7.2.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:53d0fd4c17175aded9c633e319360d41a1f3c6e352ba94edcb0fa5167e2bad67"}, + {file = "coverage-7.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c99cb7c26a3039a8a4ee3ca1efdde471e61b4837108847fb7d5be7789ed8fd9"}, + {file = "coverage-7.2.2-cp37-cp37m-win32.whl", hash = "sha256:5cc0783844c84af2522e3a99b9b761a979a3ef10fb87fc4048d1ee174e18a7d8"}, + {file = "coverage-7.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:817295f06eacdc8623dc4df7d8b49cea65925030d4e1e2a7c7218380c0072c25"}, + {file = "coverage-7.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6146910231ece63facfc5984234ad1b06a36cecc9fd0c028e59ac7c9b18c38c6"}, + {file = "coverage-7.2.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:387fb46cb8e53ba7304d80aadca5dca84a2fbf6fe3faf6951d8cf2d46485d1e5"}, + {file = "coverage-7.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:046936ab032a2810dcaafd39cc4ef6dd295df1a7cbead08fe996d4765fca9fe4"}, + {file = "coverage-7.2.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e627dee428a176ffb13697a2c4318d3f60b2ccdde3acdc9b3f304206ec130ccd"}, + {file = "coverage-7.2.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fa54fb483decc45f94011898727802309a109d89446a3c76387d016057d2c84"}, + {file = "coverage-7.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3668291b50b69a0c1ef9f462c7df2c235da3c4073f49543b01e7eb1dee7dd540"}, + {file = "coverage-7.2.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7c20b731211261dc9739bbe080c579a1835b0c2d9b274e5fcd903c3a7821cf88"}, + {file = "coverage-7.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5764e1f7471cb8f64b8cda0554f3d4c4085ae4b417bfeab236799863703e5de2"}, + {file = "coverage-7.2.2-cp38-cp38-win32.whl", hash = "sha256:4f01911c010122f49a3e9bdc730eccc66f9b72bd410a3a9d3cb8448bb50d65d3"}, + {file = "coverage-7.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:c448b5c9e3df5448a362208b8d4b9ed85305528313fca1b479f14f9fe0d873b8"}, + {file = "coverage-7.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bfe7085783cda55e53510482fa7b5efc761fad1abe4d653b32710eb548ebdd2d"}, + {file = "coverage-7.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9d22e94e6dc86de981b1b684b342bec5e331401599ce652900ec59db52940005"}, + {file = "coverage-7.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:507e4720791977934bba016101579b8c500fb21c5fa3cd4cf256477331ddd988"}, + {file = "coverage-7.2.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc4803779f0e4b06a2361f666e76f5c2e3715e8e379889d02251ec911befd149"}, + {file = "coverage-7.2.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db8c2c5ace167fd25ab5dd732714c51d4633f58bac21fb0ff63b0349f62755a8"}, + {file = "coverage-7.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4f68ee32d7c4164f1e2c8797535a6d0a3733355f5861e0f667e37df2d4b07140"}, + {file = "coverage-7.2.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d52f0a114b6a58305b11a5cdecd42b2e7f1ec77eb20e2b33969d702feafdd016"}, + {file = "coverage-7.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:797aad79e7b6182cb49c08cc5d2f7aa7b2128133b0926060d0a8889ac43843be"}, + {file = "coverage-7.2.2-cp39-cp39-win32.whl", hash = "sha256:db45eec1dfccdadb179b0f9ca616872c6f700d23945ecc8f21bb105d74b1c5fc"}, + {file = "coverage-7.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:8dbe2647bf58d2c5a6c5bcc685f23b5f371909a5624e9f5cd51436d6a9f6c6ef"}, + {file = "coverage-7.2.2-pp37.pp38.pp39-none-any.whl", hash = "sha256:872d6ce1f5be73f05bea4df498c140b9e7ee5418bfa2cc8204e7f9b817caa968"}, + {file = "coverage-7.2.2.tar.gz", hash = "sha256:36dd42da34fe94ed98c39887b86db9d06777b1c8f860520e21126a75507024f2"}, ] [package.dependencies] @@ -913,14 +913,14 @@ files = [ [[package]] name = "dash" -version = "2.8.1" +version = "2.9.0" description = "A Python framework for building reactive web-apps. Developed by Plotly." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "dash-2.8.1-py3-none-any.whl", hash = "sha256:3a9eea30f83733df1b7631fc5248eb87445e7458394558d784c91d072b7f41aa"}, - {file = "dash-2.8.1.tar.gz", hash = "sha256:a71dd81d167fa5e0ad41f356a221357d92724ae84f9faedb6f7ffa1fddfd4969"}, + {file = "dash-2.9.0-py3-none-any.whl", hash = "sha256:f57507643d2e7f150a8067f3bbcabedaf5b33029b6a9a9915787e1526e74e460"}, + {file = "dash-2.9.0.tar.gz", hash = "sha256:d729fc6751e8e537dee700ced82255419789b8fe44226906be742ebe951e5906"}, ] [package.dependencies] @@ -928,6 +928,7 @@ beautifulsoup4 = {version = ">=4.8.2", optional = true, markers = "extra == \"te dash-core-components = "2.0.0" dash-html-components = "2.0.0" dash-table = "5.0.0" +dash-testing-stub = {version = ">=0.0.2", optional = true, markers = "extra == \"testing\""} Flask = ">=1.0.4" lxml = {version = ">=4.6.2", optional = true, markers = "extra == \"testing\""} multiprocess = {version = ">=0.70.12", optional = true, markers = "extra == \"testing\""} @@ -945,7 +946,7 @@ ci = ["black (==21.6b0)", "black (==22.3.0)", "dash-dangerously-set-inner-html", compress = ["flask-compress"] dev = ["PyYAML (>=5.4.1)", "coloredlogs (>=15.0.1)", "fire (>=0.4.0)"] diskcache = ["diskcache (>=5.2.1)", "multiprocess (>=0.70.12)", "psutil (>=5.8.0)"] -testing = ["beautifulsoup4 (>=4.8.2)", "cryptography (<3.4)", "lxml (>=4.6.2)", "multiprocess (>=0.70.12)", "percy (>=2.0.2)", "psutil (>=5.8.0)", "pytest (>=6.0.2)", "requests[security] (>=2.21.0)", "selenium (>=3.141.0,<=4.2.0)", "waitress (>=1.4.4)"] +testing = ["beautifulsoup4 (>=4.8.2)", "cryptography (<3.4)", "dash-testing-stub (>=0.0.2)", "lxml (>=4.6.2)", "multiprocess (>=0.70.12)", "percy (>=2.0.2)", "psutil (>=5.8.0)", "pytest (>=6.0.2)", "requests[security] (>=2.21.0)", "selenium (>=3.141.0,<=4.2.0)", "waitress (>=1.4.4)"] [[package]] name = "dash-bootstrap-components" @@ -1047,6 +1048,18 @@ files = [ {file = "dash_table-5.0.0.tar.gz", hash = "sha256:18624d693d4c8ef2ddec99a6f167593437a7ea0bf153aa20f318c170c5bc7308"}, ] +[[package]] +name = "dash-testing-stub" +version = "0.0.2" +description = "Package installed with dash[testing] for optional loading of pytest dash plugin." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "dash-testing-stub-0.0.2.tar.gz", hash = "sha256:0a98f7da9fe41dd3a37d781bc1d5672319448fdf98e47fd867aff2123171a357"}, + {file = "dash_testing_stub-0.0.2-py3-none-any.whl", hash = "sha256:a44d530a77e1ede9c6528be4b5951f34c6109b419a09f2691422375ffa7d09de"}, +] + [[package]] name = "debugpy" version = "1.6.6" @@ -1359,14 +1372,14 @@ requests = "*" [[package]] name = "fonttools" -version = "4.39.0" +version = "4.39.2" description = "Tools to manipulate font files" category = "main" optional = false python-versions = ">=3.8" files = [ - {file = "fonttools-4.39.0-py3-none-any.whl", hash = "sha256:f5e764e1fd6ad54dfc201ff32af0ba111bcfbe0d05b24540af74c63db4ed6390"}, - {file = "fonttools-4.39.0.zip", hash = "sha256:909c104558835eac27faeb56be5a4c32694192dca123d073bf746ce9254054af"}, + {file = "fonttools-4.39.2-py3-none-any.whl", hash = "sha256:85245aa2fd4cf502a643c9a9a2b5a393703e150a6eaacc3e0e84bb448053f061"}, + {file = "fonttools-4.39.2.zip", hash = "sha256:e2d9f10337c9e3b17f9bce17a60a16a885a7d23b59b7f45ce07ea643e5580439"}, ] [package.extras] @@ -1467,14 +1480,14 @@ files = [ [[package]] name = "identify" -version = "2.5.20" +version = "2.5.21" description = "File identification library for Python" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "identify-2.5.20-py2.py3-none-any.whl", hash = "sha256:5dfef8a745ca4f2c95f27e9db74cb4c8b6d9916383988e8791f3595868f78a33"}, - {file = "identify-2.5.20.tar.gz", hash = "sha256:c8b288552bc5f05a08aff09af2f58e6976bf8ac87beb38498a0e3d98ba64eb18"}, + {file = "identify-2.5.21-py2.py3-none-any.whl", hash = "sha256:69edcaffa8e91ae0f77d397af60f148b6b45a8044b2cc6d99cafa5b04793ff00"}, + {file = "identify-2.5.21.tar.gz", hash = "sha256:7671a05ef9cfaf8ff63b15d45a91a1147a03aaccb2976d4e9bd047cbbc508471"}, ] [package.extras] @@ -1859,19 +1872,19 @@ test = ["codecov", "coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-co [[package]] name = "jupyter-core" -version = "5.2.0" +version = "5.3.0" description = "Jupyter core package. A base package on which Jupyter projects rely." category = "main" optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_core-5.2.0-py3-none-any.whl", hash = "sha256:4bdc2928c37f6917130c667d8b8708f20aee539d8283c6be72aabd2a4b4c83b0"}, - {file = "jupyter_core-5.2.0.tar.gz", hash = "sha256:1407cdb4c79ee467696c04b76633fc1884015fa109323365a6372c8e890cc83f"}, + {file = "jupyter_core-5.3.0-py3-none-any.whl", hash = "sha256:d4201af84559bc8c70cead287e1ab94aeef3c512848dde077b7684b54d67730d"}, + {file = "jupyter_core-5.3.0.tar.gz", hash = "sha256:6db75be0c83edbf1b7c9f91ec266a9a24ef945da630f3120e1a0046dc13713fc"}, ] [package.dependencies] platformdirs = ">=2.5" -pywin32 = {version = ">=1.0", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} traitlets = ">=5.3" [package.extras] @@ -1930,14 +1943,14 @@ test = ["click", "coverage", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>= [[package]] name = "jupyter-server" -version = "2.4.0" +version = "2.5.0" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." category = "main" optional = true python-versions = ">=3.8" files = [ - {file = "jupyter_server-2.4.0-py3-none-any.whl", hash = "sha256:cc22792281bfb0131a728414f28ae74883b44ad6d009971aa975cae9bcc650de"}, - {file = "jupyter_server-2.4.0.tar.gz", hash = "sha256:f31f0ba2c3c44f07143bfa03fb07dd0253f857eb63f0c26f2fea955f04a49765"}, + {file = "jupyter_server-2.5.0-py3-none-any.whl", hash = "sha256:e6bc1e9e96d7c55b9ce9699ff6cb9a910581fe7349e27c40389acb67632e24c0"}, + {file = "jupyter_server-2.5.0.tar.gz", hash = "sha256:9fde612791f716fd34d610cd939704a9639643744751ba66e7ee8fdc9cead07e"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index 26cb09d01..6cbcf982a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "SpectraFit" -version = "1.0.0a7" +version = "1.0.0a8" description = "Fast fitting of 2D- and 3D-Spectra with established routines" readme = "README.md" authors = ["Anselm Hahn <[email protected]>"] diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py index 14cb00e4a..d4d81e7e9 100644 --- a/spectrafit/__init__.py +++ b/spectrafit/__init__.py @@ -1,2 +1,2 @@ """SpectraFit, fast command line tool for fitting data.""" -__version__ = "1.0.0a7" +__version__ = "1.0.0a8"
Anselmoo__spectrafit-701
[Bug]: ASCII Char in creating branch ### Is there an existing issue for this? - [X] I have searched the existing issues ### Current Behavior Is crashing ### Expected Behavior Is realising a change in changeling ### Steps To Reproduce _No response_ ### ⚙️ Environment ```markdown - OS: - Python: - spectrafit: ``` ### Anything else? _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0b1\"\n", "path": "spectrafit/__init__.py" } ]
[ { "content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0b2\"\n", "path": "spectrafit/__init__.py" } ]
diff --git a/.github/labeler.yml b/.github/labeler.yml index d33d238b4..ca2898265 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -20,15 +20,15 @@ pre-commit: - .pre-commit-config.yaml - .pre-commit-hooks.yaml python: - - all: ["spectrafit/**/*.py", "!spectrafit/**/test_*.py"] + - all: ["./spectrafit/**/*.py", "!./spectrafit/**/test_*.py"] testing: - - all: ["spectrafit/**/test_*.py"] + - all: ["./spectrafit/**/test_*.py"] docker: - Dockerfile - .dockerignore - .devcontainer/Dockerfile release: - - all: ["spectrafit/__init__.py", "pyproject.toml"] + - "spectrafit/__init__.py" vendor: - vendor/** maintenance: @@ -43,7 +43,7 @@ maintenance: - .pylintrc - .sonarcloud.properties - .sourcery.yaml - - all: ["!spectrafit/__init__.py", "pyproject.toml"] + - "pyproject.toml" license: - LICENSE security: diff --git a/.github/workflows/update-changelog.yaml b/.github/workflows/update-changelog.yaml index 0688d5a18..6f73bcc14 100644 --- a/.github/workflows/update-changelog.yaml +++ b/.github/workflows/update-changelog.yaml @@ -18,18 +18,13 @@ jobs: with: release-notes: ${{ github.event.release.body }} latest-version: ${{ github.event.release.name }} - - name: Set git config - run: | - git config --local user.email "[email protected]" - git config --local user.name "GitHub Action" - - name: Create release branch - run: | - git checkout -b release/${{ github.event.release.name }} - git add CHANGELOG.md - git commit -m "Update CHANGELOG for ${{ github.event.release.name }}" - git push --set-upstream origin release/${{ github.event.release.name }} - - name: Change back to main branch - run: git checkout main + - name: Commit updated CHANGELOG + uses: stefanzweifel/git-auto-commit-action@v4 + with: + branch: release/${{ github.event.release.tag_name }} + commit_message: Update CHANGELOG + file_pattern: CHANGELOG.md + create_branch: true Release-Documentation: name: Build Documentation needs: Changelog-Update diff --git a/pyproject.toml b/pyproject.toml index 50f2ad3c1..7b9728250 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "SpectraFit" -version = "1.0.0b1" +version = "1.0.0b2" description = "Fast fitting of 2D- and 3D-Spectra with established routines" readme = "README.md" authors = ["Anselm Hahn <[email protected]>"] diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py index 339be6ef1..6ad73a32d 100644 --- a/spectrafit/__init__.py +++ b/spectrafit/__init__.py @@ -1,2 +1,2 @@ """SpectraFit, fast command line tool for fitting data.""" -__version__ = "1.0.0b1" +__version__ = "1.0.0b2"
Anselmoo__spectrafit-662
[Docs]: Using mike for versioning docs ### Is there an existing issue for this? - [X] I have searched the existing issues ### Current Missing Information in the Docs https://squidfunk.github.io/mkdocs-material/setup/setting-up-versioning/ ### Anything else? _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a2\"\n", "path": "spectrafit/__init__.py" } ]
[ { "content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a3\"\n", "path": "spectrafit/__init__.py" } ]
diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml index 639f4dd2e..ce13e12c6 100644 --- a/.github/workflows/python-ci.yml +++ b/.github/workflows/python-ci.yml @@ -107,7 +107,7 @@ jobs: run: poetry run mkdocs build --clean - name: Deploy documentation develops if: contains(github.ref, 'refs/heads/main') - run: poetry run mkdocs gh-deploy --clean --force + run: poetry run mike deploy --push --update-aliases ${GITHUB_REF_NAME::-2} latest devcontainer: name: Devcontainer needs: build diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a98448fb3..adcdaca66 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ repos: rev: v4.4.0 hooks: - id: check-yaml - exclude: mkdocs.yaml + exclude: mkdocs.yml - id: check-toml exclude: poetry.lock - id: check-json diff --git a/docs/changelog.md b/docs/changelog.md index 4a0b9ee05..786b75d5a 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,365 +1 @@ --8<-- "CHANGELOG.md" - -## Old Changes - ---- - -## v0.16.0 - ---- - -- Add [Cumulative Distribution Function][38] to the `SpectraFit` package -- Refactor the `model.py` of `SpectraFit` package - -## v0.15.1 - ---- - -- Maintenance of the `SpectraFit` package - -## v0.15.0 - ---- - -- Add `plugins` to the `SpectraFit` package for working with [RIXS][36] data -- `pikle`-file converter and visualizer for [RIXS][36] data -- Simplify the `SpectraFit` continous deployment by using [build][37] - -## v0.14.0 - ---- - -- Add `SpectraFit` to [Conda-Forge][2] as [spectrafit][3] package. -- Extend `SpectraFit` to print current peak values as `dataframe` - in Jupyter-Notebook. -- Add converters for _input-_, _output-_, and _data-files_. -- Add extended _output-print_ for `SpectraFit` in Jupyter-Notebook. - -## v0.13.1 - ---- - -- Fix crashed regression analysis due to _negative_ values in the `y`-data. - -## v0.13.0 - ---- - -- Update `devcontainer` to use `VScode`. -- Removed [`fish-shell`][34] from `devcontainer`. -- Applied code refactoring performed by [Copilot Labs][35] - -## v0.12.5 - ---- - -- Updating `spectrafit`-installer in `Dockerfile`. -- Adding images to `Jupyter-Notebook-Examples`. - -## v0.12.4 - ---- - -- Include metric plots into the [jupyter-notebook][25] interface. -- Removed `dash` dependency from `pyproject.toml`. -- Removed `spectrafit`-dependency from `Dockerfile`. - -## v0.12.3 - ---- - -- Update `Dockerimage` to the previous version of the [Conda-Forge-Recipe][33]. -- Reformat license in the docs. - -## v0.12.2. - ---- - -- Update `Dockerimage` to use `SpectraFit` in the Jupyter Notebook. - -## v0.12.1 - ---- - -- New release for triggering `Conda-Forge` build - -## v0.12.0 - ---- - -- Adding metrics for regression analysis as part of the post analysis; see also - [sklearn-metrics-regression][23] -- Add [art][24] for generating ASCII Decor in the terminal / output. -- Using transposed dataframes for the tabulated output to the terminal. -- Change `global` to `global_` to avoid keyword clash. -- Add plugin for [jupyter-notebook][25] integration in VSCode; see also - [jupyter-notebook-VSCode][26] -- Change `Dockerimage` to use [jupyter/scipy][27] as base image, see also - [SpectraFit-Dockerfile][31] -- Adding devcontainer for VSCode; see also [devcontainer][30] -- Change from `to_dict(orient="list")` to `to_dict(orient="split")` for the - `json` output for including the index. -- Add link to the [GitHub Advisory Database][28] for security issues in the - `Security nodes`. -- Add CI-Test for `devcontainer` in VSCode; see also [devcontainer-ci][29]. -- Add [`pyupgrade`][32] to pre-commit hooks. - -## v0.11.0 - ---- - -- Focus on maintenance fixed for the `spectrafit` package: - - [Synk][21] security vulnerabilities fixed - - [SonarCloud][22] code quality fixed - -## v0.10.4 - ---- - -- Update docs with topics: ``Changelog`, `README`, `Security`, `Licencse` -- Add docs for `conda` installation - -## v0.10.1 - v.10.3 - ---- - -- Downgrading `numdifftools` and `openpyxl` for compatibility with the - [conda-forge-formula][20] - -## v0.10.0 - ---- - -- Refactor the `pyproject.toml` file for getting it working with `conda`. - -## v0.9.0 - ---- - -- Adding Python 3.10 support -- Adding [Athena file][19] support -- Increasing code quality by using [`pylint`][18] -- Adding plugin support for `SpectraFit` - - Starting with input file converter - -## v0.8.6 - ---- - -- Updating the way of poetry caching -- Update docker actions -- Fixed typo in README.md - -## v0.8.3 - v0.8.5 - ---- - -- Dependency and GitHub Action Updates - -## v0.8.2 - ---- - -- Refactor buffer of the _covariance matrix_ - -## v0.8.1 - ---- - -- Updating all `raise` statements -- Add [prettier][17] to CI/CD workflow - -## v0.8.0 - ---- - -- Introduced smaller enhancement: - - Printout of the fit parameters in the output file: True/False &#8594; [0, 1, - 2] - - Keyword check for `SpectraFit` -- Fix smaller bugs: - - `Pseudo-Voigt` power factor from 0.25 &#8594; 0.2 - - Correct type-definitions for `SpectraFit` - -## v0.7.1 - ---- - -- Maintenance of the `SpectraFit` package - -## v0.7.0 - ---- - -- Introducing automatic peak detection for spectra fitting; see also SciPy's - [`find_peaks`][16] - -## v0.6.1 - ---- - -- Reformat the [README.md][14] for [PyPi - SpectraFit][15] - -## v0.6.0 - ---- - -- Introduce the **Global-Fitting** option, which allows to fit the several - spectra with a single model. -- Changed the input for **Pseudo-Voigt**: - - _`fwhm_g`_ &#8594; **`fwhmg`** - - _`fwhm_l`_ &#8594; **`fwhml`** -- Changed the input for **Gaussian-FWHM** and **Lorentzian-FWHM**: - - _`fwhm`_ &#8594; **`fwhmg`** - - _`fwhm`_ &#8594; **`fwhml`** -- Changed the input for **Voigt-FWHM**: - - _`fwhm`_ &#8594; **`fwhmv`** -- Adding error-handling for not determatination of _Confiden Interval_. - -## v0.5.6 - ---- - -- CI/CD pipeline is now token-protected. - -## v0.5.5 - ---- - -- Removed the `setuptools==57.5.0` limitation due to formally `Python2.7`. - -## v0.5.4 - ---- - -- Adding a [stale boot][13] for keeping the issue and PRs up-to-date. - -## v0.5.3 - ---- - -- Extending unit tests to the `SpectraFit` package. - -## v0.5.2 - ---- - -- Adding maintainer to the `pyproject.yml` file. - -## v0.5.1 - ---- - -- Minor fix of broken links in docs. - -## v0.5.0 - ---- - -- Rewrite `SpectraFit` main to become a more object-oriented approach. -- Increase the coverage quality of the tests. - -## v0.4.2 - ---- - -- Removed the [`GIT LFS`][12] integration for avoiding trouble with broken - images. -- Adding [`YAML`-Forms][11] as pull request template. - -## v0.4.1 - ---- - -- Change from `MarkDown` based issue templates to [`YAML`-Forms][11] by GitHub - as issue and feature request templates. - -## v0.4.0 - ---- - -- Create [SECURITY policy][8] for the `spectrafit` application. -- Adding [dependabot][9] for updating `poetry.lock`, `pyproject.toml` and GitHub - Action workflow. -- Adding a [codeql-analysis][10] -- Increasing the coverage level - -## v0.3.2 - ---- - -- Replaced poetry hosted `pre-commit` hook with [pre-commit action][6]. -- Extend `pre-commit` hook [MyPy][7]. -- Fixed a bug for the energy range separation. -- Removed the `--display` option. - -## v0.3.1 - ---- - -- Introducing `pytest` and `coverage` for increasing code quality. -- Adding [`codecov.io`][5] into the GitHub actions workflow. -- Updating the [contribution guideline][4] with inside milestones. - -## v0.2.4 - ---- - -- Adding a Docker Workflow via [https://ghcr.io/anselmoo/spectrafit:latest][2]. -- Poetry for PyPi release via [https://pypi.org/project/spectrafit/][3]. - -## v0.2.0 - ---- - -- Changed from text file based input to object based input. -- Extended `matplotlib` with `seaborn` for the plotting. -- Start outsourcing code into submodules. - -## v0.1.0 - ---- - -- The orginal program `fastfit` is now running as `spectrafit` with an own - installer besed on [POETRY](https://python-poetry.org). - -> See also: [https://github.com/Anselmoo/spectrafit/releases][1] - -[1]: https://github.com/Anselmoo/spectrafit/releases -[2]: https://ghcr.io/anselmoo/spectrafit:latest -[3]: https://pypi.org/project/spectrafit/ -[4]: https://github.com/Anselmoo/spectrafit/blob/main/CONTRIBUTING.md -[5]: https://codecov.io/gh/Anselmoo/spectrafit -[6]: https://github.com/marketplace/actions/pre-commit -[7]: https://mypy.readthedocs.io/en/stable/ -[8]: https://github.com/Anselmoo/spectrafit/security -[9]: https://dependabot.com -[10]: https://securitylab.github.com/tools/codeql/ -[11]: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository -[12]: https://git-lfs.github.com -[13]: https://github.com/apps/stale -[14]: https://github.com/Anselmoo/spectrafit/blob/main/README.md -[15]: https://pypi.org/project/spectrafit/ -[16]: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html -[17]: https://prettier.io -[18]: https://github.com/PyCQA/pylint -[19]: http://bruceravel.github.io/demeter/documents/Athena/index.html -[20]: https://anaconda.org/conda-forge/spectrafit -[21]: https://docs.snyk.io/products/snyk-open-source/language-and-package-manager-support/snyk-for-python -[22]: https://sonarcloud.io -[23]: https://scikit-learn.org/stable/modules/model_evaluation.html -[24]: https://www.4r7.ir -[25]: https://jupyter.org -[26]: https://code.visualstudio.com/docs/datascience/jupyter-notebooks -[27]: https://github.com/jupyter/docker-stacks/blob/main/scipy-notebook/Dockerfile -[28]: https://github.com/advisories?query=type%3Areviewed+ecosystem%3Apip -[29]: https://github.com/marketplace/actions/devcontainers-ci -[30]: https://github.com/Anselmoo/spectrafit/pkgs/container/spectrafit-devcontainer -[31]: https://github.com/Anselmoo/spectrafit/pkgs/container/spectrafit -[32]: https://github.com/Anselmoo/spectrafit/blob/6ca69132a199d3bf458927cf3d4ce6f8fdef0eae/.pre-commit-config.yaml -[33]: https://github.com/conda-forge/spectrafit-feedstock -[34]: https://fishshell.com -[35]: https://githubnext.com/projects/copilot-labs/ -[36]: https://en.wikipedia.org/wiki/Resonant_inelastic_X-ray_scattering -[37]: https://github.com/pypa/build -[38]: https://en.wikipedia.org/wiki/Cumulative_distribution_function diff --git a/docs/changelogs/v0.x.x.md b/docs/changelogs/v0.x.x.md new file mode 100644 index 000000000..56cf28523 --- /dev/null +++ b/docs/changelogs/v0.x.x.md @@ -0,0 +1,363 @@ +# Change Log for SpectraFit package for 0.x.x + +--- + +## v0.16.0 + +--- + +- Add [Cumulative Distribution Function][38] to the `SpectraFit` package +- Refactor the `model.py` of `SpectraFit` package + +## v0.15.1 + +--- + +- Maintenance of the `SpectraFit` package + +## v0.15.0 + +--- + +- Add `plugins` to the `SpectraFit` package for working with [RIXS][36] data +- `pikle`-file converter and visualizer for [RIXS][36] data +- Simplify the `SpectraFit` continous deployment by using [build][37] + +## v0.14.0 + +--- + +- Add `SpectraFit` to [Conda-Forge][2] as [spectrafit][3] package. +- Extend `SpectraFit` to print current peak values as `dataframe` + in Jupyter-Notebook. +- Add converters for _input-_, _output-_, and _data-files_. +- Add extended _output-print_ for `SpectraFit` in Jupyter-Notebook. + +## v0.13.1 + +--- + +- Fix crashed regression analysis due to _negative_ values in the `y`-data. + +## v0.13.0 + +--- + +- Update `devcontainer` to use `VScode`. +- Removed [`fish-shell`][34] from `devcontainer`. +- Applied code refactoring performed by [Copilot Labs][35] + +## v0.12.5 + +--- + +- Updating `spectrafit`-installer in `Dockerfile`. +- Adding images to `Jupyter-Notebook-Examples`. + +## v0.12.4 + +--- + +- Include metric plots into the [jupyter-notebook][25] interface. +- Removed `dash` dependency from `pyproject.toml`. +- Removed `spectrafit`-dependency from `Dockerfile`. + +## v0.12.3 + +--- + +- Update `Dockerimage` to the previous version of the [Conda-Forge-Recipe][33]. +- Reformat license in the docs. + +## v0.12.2. + +--- + +- Update `Dockerimage` to use `SpectraFit` in the Jupyter Notebook. + +## v0.12.1 + +--- + +- New release for triggering `Conda-Forge` build + +## v0.12.0 + +--- + +- Adding metrics for regression analysis as part of the post analysis; see also + [sklearn-metrics-regression][23] +- Add [art][24] for generating ASCII Decor in the terminal / output. +- Using transposed dataframes for the tabulated output to the terminal. +- Change `global` to `global_` to avoid keyword clash. +- Add plugin for [jupyter-notebook][25] integration in VSCode; see also + [jupyter-notebook-VSCode][26] +- Change `Dockerimage` to use [jupyter/scipy][27] as base image, see also + [SpectraFit-Dockerfile][31] +- Adding devcontainer for VSCode; see also [devcontainer][30] +- Change from `to_dict(orient="list")` to `to_dict(orient="split")` for the + `json` output for including the index. +- Add link to the [GitHub Advisory Database][28] for security issues in the + `Security nodes`. +- Add CI-Test for `devcontainer` in VSCode; see also [devcontainer-ci][29]. +- Add [`pyupgrade`][32] to pre-commit hooks. + +## v0.11.0 + +--- + +- Focus on maintenance fixed for the `spectrafit` package: + - [Synk][21] security vulnerabilities fixed + - [SonarCloud][22] code quality fixed + +## v0.10.4 + +--- + +- Update docs with topics: ``Changelog`, `README`, `Security`, `Licencse` +- Add docs for `conda` installation + +## v0.10.1 - v.10.3 + +--- + +- Downgrading `numdifftools` and `openpyxl` for compatibility with the + [conda-forge-formula][20] + +## v0.10.0 + +--- + +- Refactor the `pyproject.toml` file for getting it working with `conda`. + +## v0.9.0 + +--- + +- Adding Python 3.10 support +- Adding [Athena file][19] support +- Increasing code quality by using [`pylint`][18] +- Adding plugin support for `SpectraFit` + - Starting with input file converter + +## v0.8.6 + +--- + +- Updating the way of poetry caching +- Update docker actions +- Fixed typo in README.md + +## v0.8.3 - v0.8.5 + +--- + +- Dependency and GitHub Action Updates + +## v0.8.2 + +--- + +- Refactor buffer of the _covariance matrix_ + +## v0.8.1 + +--- + +- Updating all `raise` statements +- Add [prettier][17] to CI/CD workflow + +## v0.8.0 + +--- + +- Introduced smaller enhancement: + - Printout of the fit parameters in the output file: True/False &#8594; [0, 1, + 2] + - Keyword check for `SpectraFit` +- Fix smaller bugs: + - `Pseudo-Voigt` power factor from 0.25 &#8594; 0.2 + - Correct type-definitions for `SpectraFit` + +## v0.7.1 + +--- + +- Maintenance of the `SpectraFit` package + +## v0.7.0 + +--- + +- Introducing automatic peak detection for spectra fitting; see also SciPy's + [`find_peaks`][16] + +## v0.6.1 + +--- + +- Reformat the [README.md][14] for [PyPi - SpectraFit][15] + +## v0.6.0 + +--- + +- Introduce the **Global-Fitting** option, which allows to fit the several + spectra with a single model. +- Changed the input for **Pseudo-Voigt**: + - _`fwhm_g`_ &#8594; **`fwhmg`** + - _`fwhm_l`_ &#8594; **`fwhml`** +- Changed the input for **Gaussian-FWHM** and **Lorentzian-FWHM**: + - _`fwhm`_ &#8594; **`fwhmg`** + - _`fwhm`_ &#8594; **`fwhml`** +- Changed the input for **Voigt-FWHM**: + - _`fwhm`_ &#8594; **`fwhmv`** +- Adding error-handling for not determatination of _Confiden Interval_. + +## v0.5.6 + +--- + +- CI/CD pipeline is now token-protected. + +## v0.5.5 + +--- + +- Removed the `setuptools==57.5.0` limitation due to formally `Python2.7`. + +## v0.5.4 + +--- + +- Adding a [stale boot][13] for keeping the issue and PRs up-to-date. + +## v0.5.3 + +--- + +- Extending unit tests to the `SpectraFit` package. + +## v0.5.2 + +--- + +- Adding maintainer to the `pyproject.yml` file. + +## v0.5.1 + +--- + +- Minor fix of broken links in docs. + +## v0.5.0 + +--- + +- Rewrite `SpectraFit` main to become a more object-oriented approach. +- Increase the coverage quality of the tests. + +## v0.4.2 + +--- + +- Removed the [`GIT LFS`][12] integration for avoiding trouble with broken + images. +- Adding [`YAML`-Forms][11] as pull request template. + +## v0.4.1 + +--- + +- Change from `MarkDown` based issue templates to [`YAML`-Forms][11] by GitHub + as issue and feature request templates. + +## v0.4.0 + +--- + +- Create [SECURITY policy][8] for the `spectrafit` application. +- Adding [dependabot][9] for updating `poetry.lock`, `pyproject.toml` and GitHub + Action workflow. +- Adding a [codeql-analysis][10] +- Increasing the coverage level + +## v0.3.2 + +--- + +- Replaced poetry hosted `pre-commit` hook with [pre-commit action][6]. +- Extend `pre-commit` hook [MyPy][7]. +- Fixed a bug for the energy range separation. +- Removed the `--display` option. + +## v0.3.1 + +--- + +- Introducing `pytest` and `coverage` for increasing code quality. +- Adding [`codecov.io`][5] into the GitHub actions workflow. +- Updating the [contribution guideline][4] with inside milestones. + +## v0.2.4 + +--- + +- Adding a Docker Workflow via [https://ghcr.io/anselmoo/spectrafit:latest][2]. +- Poetry for PyPi release via [https://pypi.org/project/spectrafit/][3]. + +## v0.2.0 + +--- + +- Changed from text file based input to object based input. +- Extended `matplotlib` with `seaborn` for the plotting. +- Start outsourcing code into submodules. + +## v0.1.0 + +--- + +- The orginal program `fastfit` is now running as `spectrafit` with an own + installer besed on [POETRY](https://python-poetry.org). + +> See also: [https://github.com/Anselmoo/spectrafit/releases][1] + +[1]: https://github.com/Anselmoo/spectrafit/releases +[2]: https://ghcr.io/anselmoo/spectrafit:latest +[3]: https://pypi.org/project/spectrafit/ +[4]: https://github.com/Anselmoo/spectrafit/blob/main/CONTRIBUTING.md +[5]: https://codecov.io/gh/Anselmoo/spectrafit +[6]: https://github.com/marketplace/actions/pre-commit +[7]: https://mypy.readthedocs.io/en/stable/ +[8]: https://github.com/Anselmoo/spectrafit/security +[9]: https://dependabot.com +[10]: https://securitylab.github.com/tools/codeql/ +[11]: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository +[12]: https://git-lfs.github.com +[13]: https://github.com/apps/stale +[14]: https://github.com/Anselmoo/spectrafit/blob/main/README.md +[15]: https://pypi.org/project/spectrafit/ +[16]: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html +[17]: https://prettier.io +[18]: https://github.com/PyCQA/pylint +[19]: http://bruceravel.github.io/demeter/documents/Athena/index.html +[20]: https://anaconda.org/conda-forge/spectrafit +[21]: https://docs.snyk.io/products/snyk-open-source/language-and-package-manager-support/snyk-for-python +[22]: https://sonarcloud.io +[23]: https://scikit-learn.org/stable/modules/model_evaluation.html +[24]: https://www.4r7.ir +[25]: https://jupyter.org +[26]: https://code.visualstudio.com/docs/datascience/jupyter-notebooks +[27]: https://github.com/jupyter/docker-stacks/blob/main/scipy-notebook/Dockerfile +[28]: https://github.com/advisories?query=type%3Areviewed+ecosystem%3Apip +[29]: https://github.com/marketplace/actions/devcontainers-ci +[30]: https://github.com/Anselmoo/spectrafit/pkgs/container/spectrafit-devcontainer +[31]: https://github.com/Anselmoo/spectrafit/pkgs/container/spectrafit +[32]: https://github.com/Anselmoo/spectrafit/blob/6ca69132a199d3bf458927cf3d4ce6f8fdef0eae/.pre-commit-config.yaml +[33]: https://github.com/conda-forge/spectrafit-feedstock +[34]: https://fishshell.com +[35]: https://githubnext.com/projects/copilot-labs/ +[36]: https://en.wikipedia.org/wiki/Resonant_inelastic_X-ray_scattering +[37]: https://github.com/pypa/build +[38]: https://en.wikipedia.org/wiki/Cumulative_distribution_function diff --git a/docs/doc/models.md b/docs/doc/models.md index d273a3ce7..7c3e1ba28 100644 --- a/docs/doc/models.md +++ b/docs/doc/models.md @@ -104,5 +104,4 @@ implemented in the `constants` module. ::: spectrafit.models.Constants -[1] -https://lmfit.github.io/lmfit-py/examples/documentation/model_two_components.html#sphx-glr-examples-documentation-model-two-components-py +[1]: https://lmfit.github.io/lmfit-py/examples/documentation/model_two_components.html#sphx-glr-examples-documentation-model-two-components-py diff --git a/mkdocs.yaml b/mkdocs.yml similarity index 96% rename from mkdocs.yaml rename to mkdocs.yml index a773ee295..1298d9184 100644 --- a/mkdocs.yaml +++ b/mkdocs.yml @@ -100,7 +100,10 @@ markdown_extensions: - pymdownx.emoji: emoji_index: !!python/name:materialx.emoji.twemoji emoji_generator: !!python/name:materialx.emoji.to_svg - - pymdownx.highlight + - pymdownx.highlight: + use_pygments: true + pygments_lang_class: true + - pymdownx.magiclink - pymdownx.inlinehilite - pymdownx.keys - pymdownx.magiclink: @@ -183,5 +186,7 @@ nav: - Code of Conduct: code_of_conduct.md - Security: security.md - Releases: - - Changelog: changelog.md + - Changelog: + - v1.x.x: changelog.md + - v0.x.x: changelogs/v0.x.x.md - Contributors: contributors.md diff --git a/poetry.lock b/poetry.lock index 21ee1fa4f..1ab457d63 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2761,6 +2761,28 @@ files = [ {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, ] +[[package]] +name = "mike" +version = "1.1.2" +description = "Manage multiple versions of your MkDocs-powered documentation" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "mike-1.1.2-py3-none-any.whl", hash = "sha256:4c307c28769834d78df10f834f57f810f04ca27d248f80a75f49c6fa2d1527ca"}, + {file = "mike-1.1.2.tar.gz", hash = "sha256:56c3f1794c2d0b5fdccfa9b9487beb013ca813de2e3ad0744724e9d34d40b77b"}, +] + +[package.dependencies] +jinja2 = "*" +mkdocs = ">=1.0" +pyyaml = ">=5.1" +verspec = "*" + +[package.extras] +dev = ["coverage", "flake8 (>=3.0)", "shtab"] +test = ["coverage", "flake8 (>=3.0)", "shtab"] + [[package]] name = "missingno" version = "0.4.2" @@ -5329,14 +5351,14 @@ dev = ["flake8 (<4.0.0)", "flake8-annotations", "flake8-bugbear", "flake8-commas [[package]] name = "urllib3" -version = "1.26.14" +version = "1.26.15" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "urllib3-1.26.14-py2.py3-none-any.whl", hash = "sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1"}, - {file = "urllib3-1.26.14.tar.gz", hash = "sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72"}, + {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, + {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, ] [package.dependencies] @@ -5364,6 +5386,21 @@ files = [ {file = "urllib3_secure_extra-0.1.0-py2.py3-none-any.whl", hash = "sha256:f7adcb108b4d12a4b26b99eb60e265d087f435052a76aefa396b6ee85e9a6ef9"}, ] +[[package]] +name = "verspec" +version = "0.1.0" +description = "Flexible version handling" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "verspec-0.1.0-py3-none-any.whl", hash = "sha256:741877d5633cc9464c45a469ae2a31e801e6dbbaa85b9675d481cda100f11c31"}, + {file = "verspec-0.1.0.tar.gz", hash = "sha256:c4504ca697b2056cdb4bfa7121461f5a0e81809255b41c03dda4ba823637c01e"}, +] + +[package.extras] +test = ["coverage", "flake8 (>=3.7)", "mypy", "pretend", "pytest"] + [[package]] name = "virtualenv" version = "20.20.0" @@ -5788,4 +5825,4 @@ jupyter-dash = ["dash-bootstrap-components", "dash-bootstrap-templates", "ipywid [metadata] lock-version = "2.0" python-versions = ">=3.8,<3.11" -content-hash = "a4a6924319fdee4d3a8e2bce431d1ddc1941c604766de1a68e1479b893c649be" +content-hash = "070508548dba139057f7cb191de19c0b058e2d4da7cfa0325fe38e27cd3895e3" diff --git a/pyproject.toml b/pyproject.toml index 02527df97..8007686d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "SpectraFit" -version = "1.0.0a2" +version = "1.0.0a3" description = "Fast fitting of 2D- and 3D-Spectra with established routines" readme = "README.md" authors = ["Anselm Hahn <[email protected]>"] @@ -95,10 +95,13 @@ exclude = ["docs/", "tools/"] mkdocs-jupyter = ">=0.21,<0.23" mkdocstrings = { extras = ["python"], version = ">=0.19,<0.21" } mkdocs-material = "^8.5.9" - mkdocs-minify-plugin = ">=0.5,<0.7" mathjax = "^0.1.2" mkdocs-literate-nav = ">=0.4.1,<0.7.0" mkdocs-section-index = "^0.3.4" + mike = "^1.1.2" + mkdocs-autorefs = "^0.4.1" + mkdocs-minify-plugin = "^0.6.2" + [build-system] requires = ["poetry-core>=1.0.0"] diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py index 08047802a..484537753 100644 --- a/spectrafit/__init__.py +++ b/spectrafit/__init__.py @@ -1,2 +1,2 @@ """SpectraFit, fast command line tool for fitting data.""" -__version__ = "1.0.0a2" +__version__ = "1.0.0a3"
cupy__cupy-1795
Complex scalar is not correctly casted ``` >>> cupy.arange(3, dtype='e') + 1j array([0.+1.j, 1.+1.j, 2.+1.j]) >>> numpy.arange(3, dtype='e') + 1j array([0.+1.j, 1.+1.j, 2.+1.j], dtype=complex64) ``` No problem with floats: ``` >>> cupy.arange(3, dtype='e') + 1e30 array([1.e+30, 1.e+30, 1.e+30], dtype=float32) >>> cupy.arange(3, dtype='e') + 1e100 array([1.e+100, 1.e+100, 1.e+100]) ``` Test complex in TestArrayElementwiseOp
[ { "content": "import functools\nimport six\nfrom six.moves import builtins\nimport string\nimport threading\nimport warnings\n\nimport numpy\n\nimport cupy\nfrom cupy.core._dtype import get_dtype\nfrom cupy.core import core\n\n\n_thread_local = threading.local()\n\n_kind_score = {\n 'b': 0,\n 'u': 1,\n 'i': 1,\n 'f': 2,\n 'c': 3,\n}\n\n_dtype_to_ctype = {\n numpy.dtype('float64'): 'double',\n numpy.dtype('float32'): 'float',\n numpy.dtype('float16'): 'float16',\n numpy.dtype('complex128'): 'complex<double>',\n numpy.dtype('complex64'): 'complex<float>',\n numpy.dtype('int64'): 'long long',\n numpy.dtype('int32'): 'int',\n numpy.dtype('int16'): 'short',\n numpy.dtype('int8'): 'signed char',\n numpy.dtype('uint64'): 'unsigned long long',\n numpy.dtype('uint32'): 'unsigned int',\n numpy.dtype('uint16'): 'unsigned short',\n numpy.dtype('uint8'): 'unsigned char',\n numpy.dtype('bool'): 'bool',\n}\n\n_dtype_list = [numpy.dtype(_) for _ in '?bhilqBHILQefdFD']\n\n\nclass Submodule(object):\n \"\"\"Ufunc or elementwise kernel with types.\n\n Attributes:\n name (str): The name of submodule\n in_params (list of tuples of dtype and str):\n The tuple of dtype and name of input parameters.\n out_params (list of tuples of dtype and str):\n The tuple of dtype and name of output parameters.\n op (str): The operation code.\n preamble (str): The preamble code.\n dtypes (list of dtypes): The list of dtypes of the parameters.\n \"\"\"\n\n def __init__(self, ufunc, in_params, out_params, op):\n self.name = ufunc.name\n self.in_params = in_params\n self.out_params = out_params\n self.op = op\n self.preamble = ufunc._preamble\n self.dtypes = [dtype for dtype, _ in self.in_params + self.out_params]\n\n def __repr__(self):\n return '<Submodule {}>'.format(self.name)\n\n def fcall(self, args):\n return self.name + '(' + ', '.join(args) + ');\\n'\n\n def key(self):\n return (self.name, tuple(self.dtypes))\n\n def code(self):\n params = ', '.join('{} &{}'.format(_dtype_to_ctype[t], s)\n for t, s in self.in_params + self.out_params)\n typedef = ''.join('typedef {} {}_type;\\n'.format(_dtype_to_ctype[t], s)\n for t, s in self.in_params + self.out_params)\n module_code = string.Template('''\n __device__ void ${name}(${parameters}) {\n ${typedef}\n ${operation};\n }\n ''').substitute(\n name=self.name,\n parameters=params,\n operation=self.op,\n typedef=typedef)\n return module_code + '\\n'\n\n\nclass _FusionVarCUDA(object):\n\n \"\"\"Local variable in CUDA program.\n\n Attributes:\n index (int): The name of the variable.\n dtype (dtype): The dtype of the variable.\n const (any of primitive types): The constant value (or None)\n \"\"\"\n\n def __init__(self, index, dtype, const=None):\n self.index = index\n self.dtype = dtype\n self.const = const\n self.mutable = False\n\n def __repr__(self):\n return 'v{}'.format(self.index)\n\n def mutate(self):\n self.mutable = True\n\n def declaration(self):\n c = self.const\n val = numpy.asscalar(c) if hasattr(c, 'dtype') else c\n ctype = _dtype_to_ctype[self.dtype]\n\n if self.const is None:\n return '{} v{};\\n'.format(ctype, self.index)\n\n if isinstance(val, bool):\n init = '= {}'.format(str(c).lower())\n elif isinstance(val, complex):\n init = '({}, {})'.format(c.real, c.imag)\n elif isinstance(val, six.integer_types + (float,)):\n init = '= {}'.format(c)\n else:\n raise TypeError('Invalid constant type: {}'.format(type(c)))\n return 'const {} v{} {};\\n'.format(ctype, self.index, init)\n\n def declaration_in_param(self):\n non_const = '_non_const ' if self.mutable else ''\n return '{}{} v{}'.format(non_const, self.dtype, self.index)\n\n def declaration_out_param(self):\n return '{} v{}'.format(self.dtype, self.index)\n\n\nclass FusionOp(object):\n\n \"\"\"Function call with arguments in CUDA program.\n\n Attributes:\n index (int): The index of this operation.\n submodule (submodule): The submodules called in this operation.\n args (list of _FusionVarCUDA): The arguments.\n types (list of dtype): The types of parameters.\n \"\"\"\n\n def __init__(self, index, submodule, args):\n self.index = index\n self.submodule = submodule\n self.args = args\n self.dtypes = submodule.dtypes\n\n def __repr__(self):\n return '<FusionOp #{}, {} types=[{}]>'.format(\n self.index, self.submodule.name, ', '.join(self.dtypes))\n\n def declaration_args(self):\n return ' '.join('{} v{}_{};'.format(_dtype_to_ctype[t], self.index, j)\n for j, t in enumerate(self.dtypes)) + '\\n'\n\n def code(self):\n args_sub = ['v{}_{}'.format(self.index, i)\n for i in six.moves.range(len(self.args))]\n ctypes = [_dtype_to_ctype[t] for t in self.dtypes]\n args_list = list(zip(self.args, args_sub, ctypes))\n code = '// op # {}\\n'.format(self.index)\n code += ''.join('{} = static_cast< {} >(v{});\\n'.format(s, t, v.index)\n for v, s, t in args_list)\n code += self.submodule.fcall(args_sub)\n code += ''.join('v{} = static_cast< {} >({});\\n'.format(\n v.index, _dtype_to_ctype[v.dtype], s)\n for v, s, _ in\n args_list[len(self.submodule.in_params):])\n return code\n\n\nclass FusionVarPython(object):\n\n \"\"\"The values of variables in target function of fusion.\n\n Args:\n var (_FusionVarCUDA)\n\n Attributes:\n dtype (dtype): The data type.\n \"\"\"\n\n def __init__(self, var, is_postmap):\n self._var = var\n self.dtype = var.dtype\n self._is_postmap = is_postmap\n\n def __repr__(self):\n return '<FusionVarPython, dtype={}>'.format(self.dtype)\n\n def __neg__(self):\n return cupy.negative(self)\n\n def __add__(self, other):\n return cupy.add(self, other)\n\n def __iadd__(self, other):\n return cupy.add(self, other, self)\n\n def __radd__(self, other):\n return cupy.add(other, self)\n\n def __sub__(self, other):\n return cupy.subtract(self, other)\n\n def __isub__(self, other):\n return cupy.subtract(self, other, self)\n\n def __rsub__(self, other):\n return cupy.subtract(other, self)\n\n def __mul__(self, other):\n return cupy.multiply(self, other)\n\n def __imul__(self, other):\n return cupy.multiply(self, other, self)\n\n def __rmul__(self, other):\n return cupy.multiply(other, self)\n\n def __div__(self, other):\n return cupy.divide(self, other)\n\n def __idiv__(self, other):\n return cupy.divide(self, other, self)\n\n def __rdiv__(self, other):\n return cupy.divide(other, self)\n\n def __truediv__(self, other):\n return cupy.true_divide(self, other)\n\n def __itruediv__(self, other):\n return cupy.true_divide(self, other, self)\n\n def __rtruediv__(self, other):\n return cupy.true_divide(other, self)\n\n def __floordiv__(self, other):\n return cupy.floor_divide(self, other)\n\n def __ifloordiv__(self, other):\n return cupy.floor_divide(self, other, self)\n\n def __rfloordiv__(self, other):\n return cupy.floor_divide(other, self)\n\n def __mod__(self, other):\n return cupy.remainder(self, other)\n\n def __imod__(self, other):\n return cupy.remainder(self, other, self)\n\n def __rmod__(self, other):\n return cupy.remainder(other, self)\n\n def __pow__(x, y):\n return cupy.power(x, y)\n\n def __ipow__(self, other):\n return cupy.power(self, other, self)\n\n def __lshift__(self, other):\n return cupy.left_shift(self, other)\n\n def __ilshift__(self, other):\n return cupy.left_shift(self, other, self)\n\n def __rlshift__(self, other):\n return cupy.left_shift(other, self)\n\n def __rshift__(self, other):\n return cupy.right_shift(self, other)\n\n def __irshift__(self, other):\n return cupy.right_shift(self, other, self)\n\n def __rrshift__(self, other):\n return cupy.right_shift(other, self)\n\n def __and__(self, other):\n return cupy.bitwise_and(self, other)\n\n def __iand__(self, other):\n return cupy.bitwise_and(self, other, self)\n\n def __rand__(self, other):\n return cupy.bitwise_and(other, self)\n\n def __or__(self, other):\n return cupy.bitwise_or(self, other)\n\n def __ior__(self, other):\n return cupy.bitwise_or(self, other, self)\n\n def __ror__(self, other):\n return cupy.bitwise_or(other, self)\n\n def __xor__(self, other):\n return cupy.bitwise_xor(self, other)\n\n def __ixor__(self, other):\n return cupy.bitwise_xor(self, other, self)\n\n def __rxor__(self, other):\n return cupy.bitwise_xor(other, self)\n\n def __invert__(self):\n return cupy.invert(self)\n\n def __lt__(self, other):\n return cupy.less(self, other)\n\n def __le__(self, other):\n return cupy.less_equal(self, other)\n\n def __eq__(self, other):\n return cupy.equal(self, other)\n\n def __ne__(self, other):\n return cupy.not_equal(self, other)\n\n def __gt__(self, other):\n return cupy.greater(self, other)\n\n def __ge__(self, other):\n return cupy.greater_equal(self, other)\n\n def __nonzero__(self):\n raise Exception('Can\\'t cast to bool')\n\n def __bool__(self):\n raise Exception('Can\\'t cast to bool')\n\n def __setitem__(self, slices, value):\n if slices is Ellipsis or (isinstance(slices, slice) and\n slices == slice(None)):\n cupy.copy(value, self)\n else:\n raise ValueError('The fusion supports `[...]` or `[:]`.')\n\n def copy(self):\n return cupy.copy(self)\n\n def astype(self, dtype, order=None, casting=None, subok=None, copy=True):\n dtype = get_dtype(dtype)\n if order is not None:\n raise TypeError('order is not supported yet')\n if casting is not None:\n raise TypeError('casting is not supported yet')\n if subok is not None:\n raise TypeError('subok is not supported yet')\n if not copy and self.dtype == dtype:\n return self\n return _dtype_to_astype(dtype)(self)\n\n\nclass _FusionHistory(object):\n\n \"\"\"History of operation exectuted in the target function of fusion.\n\n Attributes:\n preamble_set (set of str): The preambles of submodules.\n submodules (dict from str to submodule): The submodules.\n count (int): The number of variables in the fused function.\n\n op_list (list of FusionOp): The map operations.\n param_list (list of _FusionVarCUDA): The parameters\n local_list (list of _FusionVarCUDA): The local variables.\n\n Only when fusing the reduction, the following attributes are updated.\n\n reduce_op (tuple): One of the element of reduction.***._raws._ops.\n reduce_identity (any type): The identity value of the reduction.\n reduce_kwargs (dict or None): kwargs of the reduction.\n\n premap_ret (_FusionVarCUDA or None): The target of reduction\n postmap_param (_FusionVarCUDA or None): The result of reduction\n postmap_op_list (list of FuisonOp): The post-map operations.\n postmap_local_list (list of _FusionVarCUDA): The local variables which\n appears in the post-map operations\n \"\"\"\n\n def __init__(self):\n self.preamble_set = set()\n self.submodules = dict()\n self.count = 0\n\n self.op_list = []\n self.param_list = []\n self.local_list = []\n\n self.reduce_op = None\n self.reduce_identity = None\n self.reduce_kwargs = None\n\n self.postmap_op_list = []\n self.premap_ret = None\n self.postmap_param = None\n self.postmap_local_list = []\n\n def __repr__(self):\n return '<_FusionMem, op_list={}, var_list={}>'.format(\n self.op_list, self.var_list)\n\n def _has_reduction(self):\n return self.reduce_op is not None\n\n def _fresh_index(self):\n res = self.count\n self.count += 1\n return res\n\n def _fresh_premap_param(self, *args, **kwargs):\n index = self._fresh_index()\n var = _FusionVarCUDA(index, *args, **kwargs)\n self.param_list.append(var)\n return var\n\n def _fresh_postmap_param(self, *args, **kwargs):\n assert self.postmap_param is None\n index = self._fresh_index()\n var = _FusionVarCUDA(index, *args, **kwargs)\n self.postmap_param = var\n return var\n\n def _fresh_premap_local(self, *args, **kwargs):\n index = self._fresh_index()\n var = _FusionVarCUDA(index, *args, **kwargs)\n self.local_list.append(var)\n return var\n\n def _fresh_postmap_local(self, *args, **kwargs):\n index = self._fresh_index()\n var = _FusionVarCUDA(index, *args, **kwargs)\n self.postmap_local_list.append(var)\n return var\n\n def _fresh_local(self, *args, **kwargs):\n if self._has_reduction():\n return self._fresh_postmap_local(*args, **kwargs)\n else:\n return self._fresh_premap_local(*args, **kwargs)\n\n def _add_premap_op(self, *args, **kwargs):\n op = FusionOp(len(self.op_list), *args, **kwargs)\n subm = op.submodule\n self.submodules[subm.key()] = subm\n self.op_list.append(op)\n self._add_preamble(subm.preamble)\n return op\n\n def _add_postmap_op(self, *args, **kwargs):\n op = FusionOp(len(self.postmap_op_list), *args, **kwargs)\n subm = op.submodule\n self.submodules[subm.key()] = subm\n self.postmap_op_list.append(op)\n self._add_preamble(subm.preamble)\n return op\n\n def add_op(self, *args, **kwargs):\n if self._has_reduction():\n return self._add_postmap_op(*args, **kwargs)\n else:\n return self._add_premap_op(*args, **kwargs)\n\n def set_reduce_op(self, raw, arg, kwargs):\n assert self.reduce_op is None\n for op in raw._ops:\n (input_type,), (output_type,), _ = op\n if numpy.can_cast(arg.dtype.type, input_type):\n return_dtype = numpy.dtype(output_type)\n self.premap_ret = self._get_cuda_var(arg)\n self.reduce_op = op\n self.reduce_identity = raw.identity\n self.reduce_kwargs = kwargs\n self._add_preamble(raw._preamble)\n return self._fresh_postmap_param(return_dtype)\n raise TypeError('Type is mismatched. {}(...), {}'.format(\n self.raw._ops.name, arg.dtype.type))\n\n def _add_preamble(self, preamble):\n self.preamble_set.add(preamble)\n\n def _get_cuda_var(self, arg):\n \"\"\"This converts `arg` to _FusionVarCUDA data.\n\n Args:\n arg (FusionVarPython or a primitive type)\n\n Return value: _FusionVarCUDA\n \"\"\"\n if isinstance(arg, FusionVarPython):\n if arg._is_postmap == self._has_reduction():\n return arg._var\n else:\n # Map operation between pre-map variable and post-map variable\n raise Exception('Shape mismatch')\n is_scalar = isinstance(arg, six.integer_types + (float, bool, complex))\n is_ndarray = hasattr(arg, 'dtype') and arg.dtype in _dtype_list\n if is_scalar or is_ndarray:\n return self._fresh_local(numpy.dtype(type(arg)), const=arg)\n raise Exception('Unsupported type {}'.format(type(type)))\n\n def call_ufunc(self, ufunc, args, kwargs):\n nin = ufunc.nin\n nout = ufunc.nout\n\n # Corresponds to _check_should_use_min_scalar in elementwise.pxi\n # This function decides which typecast rule to use.\n def _should_use_min_scalar(in_args):\n max_array_kind = -2\n max_scalar_kind = -1\n for arg in in_args:\n kind = _kind_score[arg.dtype.kind]\n if arg.const is None:\n max_array_kind = max(max_array_kind, kind)\n else:\n max_scalar_kind = max(max_scalar_kind, kind)\n return (max_scalar_kind != -1 and\n max_array_kind >= max_scalar_kind)\n\n def can_cast1(args, in_dtypes):\n for i in six.moves.range(nin):\n if args[i].const is None:\n if not numpy.can_cast(args[i].dtype, in_dtypes[i]):\n return False\n else:\n if not numpy.can_cast(args[i].const, in_dtypes[i]):\n return False\n return True\n\n def can_cast2(args, in_dtypes):\n for i in six.moves.range(nin):\n if not numpy.can_cast(args[i].dtype, in_dtypes[i]):\n return False\n return True\n\n var_list = [self._get_cuda_var(_) for _ in args]\n if 'out' in kwargs:\n var_list.append(self._get_cuda_var(kwargs.pop('out')))\n if kwargs:\n raise TypeError('Wrong arguments {}'.format(kwargs))\n assert nin <= len(var_list) <= nin + nout\n in_vars = var_list[:nin]\n out_vars = var_list[nin:]\n can_cast = can_cast1 if _should_use_min_scalar(in_vars) else can_cast2\n for in_dtypes, out_dtypes, op in ufunc._ops:\n in_dtypes = [numpy.dtype(_) for _ in in_dtypes]\n out_dtypes = [numpy.dtype(_) for _ in out_dtypes]\n if can_cast(in_vars, in_dtypes):\n ret = []\n for i in six.moves.range(nout):\n if i >= len(out_vars):\n v = self._fresh_local(out_dtypes[i])\n out_vars.append(v)\n elif numpy.can_cast(out_dtypes[i], out_vars[i].dtype,\n 'same_kind'):\n v = out_vars[i]\n else:\n raise TypeError(\n 'output (typecode \\'{}\\') could not be coerced '\n 'to provided output parameter (typecode \\'{}\\') '\n 'according to the casting rule '\n '\"same_kind\"'.format(\n out_dtypes[i].char, out_vars[i].dtype.char))\n v.mutate()\n ret.append(FusionVarPython(v, self._has_reduction()))\n in_params = [(in_dtypes[i], 'in{}'.format(i))\n for i, t in enumerate(in_vars)]\n out_params = [(out_dtypes[i], 'out{}'.format(i))\n for i, t in enumerate(out_vars)]\n subm = Submodule(ufunc, in_params, out_params, op)\n self.add_op(subm, in_vars + out_vars)\n return ret[0] if len(ret) == 1 else tuple(ret)\n in_dtypes = [_.dtype for _ in in_vars]\n out_dtypes = [_.dtype for _ in out_vars]\n raise TypeError('Invalid type cast in \\'{}\\': {} -> {}'.format(\n ufunc.name, in_dtypes, out_dtypes))\n\n def call_elementwise(self, f, args, kwargs):\n raise NotImplementedError(\n 'Fusion for elementwise-kernel is not implemented yet')\n\n def _emit_submodules_code(self):\n res = ''.join(self.preamble_set)\n res += '\\n'.join([_.code() for _ in self.submodules.values()])\n return res\n\n def _emit_operation_code(self):\n res = '// {} operations\\n'.format(len(self.op_list))\n res += ''.join(v.declaration() for v in self.local_list)\n res += ''.join(op.declaration_args() for op in self.op_list)\n res += ''.join(op.code() for op in self.op_list)\n return res\n\n def _emit_premap_code(self, in_params, operation):\n return_var = self.premap_ret\n module_code = string.Template('''\n __device__ ${return_ctype} _pre_map(${in_params}) {\n ${operation};\n return ${return_var};\n }\n ''').substitute(\n return_ctype=_dtype_to_ctype[return_var.dtype],\n in_params=', '.join('{} v{}'.format(_dtype_to_ctype[v.dtype],\n v.index)\n for v in in_params),\n operation=operation,\n return_var=return_var)\n return module_code\n\n def _emit_postmap_code(self, out_params, operation):\n in_param = self.postmap_param\n in_ctype = _dtype_to_ctype[in_param.dtype]\n module_code = string.Template('''\n __device__ void _post_map(${in_ctype} in, ${out_params}) {\n ${in_param} = in;\n ${operation};\n }\n ''').substitute(\n in_ctype=in_ctype,\n in_param='{} v{}'.format(in_ctype, in_param.index),\n out_params=', '.join('{} &v{}'.format(_dtype_to_ctype[v.dtype],\n v.index)\n for v in out_params),\n operation=operation)\n return module_code\n\n def _emit_postmap_cast_code(self, reduce_ctype, postmap_dtype, operation):\n module_code = string.Template('''\n __device__ ${postmap_ctype} _postmap_cast(${reduce_ctype} a) {\n ${postmap_ctype} out0;\n ${operation};\n return out0;\n }\n ''').substitute(\n reduce_ctype=reduce_ctype,\n postmap_ctype=_dtype_to_ctype[postmap_dtype],\n operation=operation)\n return module_code\n\n def get_fusion(self, func, in_dtypes, name):\n \"\"\"This generates CUDA kernel from the given function and dtypes.\n\n This function generates ElementwiseKernel or ReductioKernel from the\n given function and the list of dtypes of parameters.\n\n Args:\n func (function): The function to be fused.\n in_types (list of dtypes): The list of dtypes of input parameters.\n name (str): The name of the kernel.\n\n Return value (tuple of ElementwiseKernel/ReductionKernel and dict):\n The second element of return values is kwargs that will give into\n the elementwise kernel or reduction kernel.\n \"\"\"\n in_params = [self._fresh_premap_param(t) for t in in_dtypes]\n in_pvars = [FusionVarPython(_, False) for _ in in_params]\n return_value = func(*in_pvars)\n\n if isinstance(return_value, tuple):\n return_tuple = True\n no_return = False\n out_pvars = return_value\n elif isinstance(return_value, FusionVarPython):\n return_tuple = False\n no_return = False\n out_pvars = [return_value]\n elif return_value is None:\n return_tuple = False\n no_return = True\n out_pvars = []\n else:\n raise TypeError(\n 'Fusion function can\\'t return {}'.format(type(return_value)))\n\n out_pvars = [_ for _ in out_pvars if _ is not None]\n out_cvars = [self._get_cuda_var(_) for _ in out_pvars]\n\n out_dtypes = [_.dtype for _ in out_pvars]\n out_params = [self._fresh_premap_param(t) for t in out_dtypes]\n\n in_params_code = ', '.join(var.declaration_in_param()\n for var in in_params)\n out_params_code = ', '.join(var.declaration_out_param()\n for var in out_params)\n\n operation = self._emit_operation_code()\n submodule_code = self._emit_submodules_code()\n\n if self.reduce_op is None:\n operation += ' '.join('{} = {};'.format(t, s)\n for s, t in zip(out_cvars, out_params))\n kernel = core.ElementwiseKernel(\n in_params_code, out_params_code, operation,\n preamble=submodule_code,\n return_tuple=return_tuple,\n no_return=no_return,\n name=name)\n return kernel, {}\n else:\n _, (postmap_type,), (_, reduce_code, postmap_cast_code,\n reduce_ctype) = self.reduce_op\n if reduce_ctype is None:\n reduce_ctype = 'type_in0_raw'\n\n postmap_dtype = numpy.dtype(postmap_type)\n postmap_ctype = _dtype_to_ctype[postmap_dtype]\n\n postmap_code = '// {} operations\\n'.format(\n len(self.postmap_op_list))\n postmap_code += ''.join(v.declaration()\n for v in self.postmap_local_list)\n postmap_code += ''.join(op.declaration_args()\n for op in self.postmap_op_list)\n postmap_code += ''.join(op.code() for op in self.postmap_op_list)\n postmap_code += ' '.join('{} = {};'.format(t, s)\n for s, t in zip(out_cvars, out_params))\n\n submodule_code += self._emit_premap_code(in_params, operation)\n submodule_code += 'typedef {} type_in0_raw;\\n'.format(\n postmap_ctype)\n submodule_code += 'typedef {} type_out0_raw;\\n'.format(\n postmap_ctype)\n submodule_code += self._emit_postmap_cast_code(\n reduce_ctype, postmap_dtype, postmap_cast_code)\n submodule_code += self._emit_postmap_code(out_params, postmap_code)\n\n kernel = core.ReductionKernel(\n in_params_code,\n out_params_code,\n '_pre_map({})'.format(', '.join([repr(p) for p in in_params])),\n reduce_code,\n '_post_map(_postmap_cast(a), {})'.format(\n ', '.join([repr(p) for p in out_params])),\n self.reduce_identity,\n name=name,\n reduce_type=reduce_ctype,\n preamble=submodule_code)\n return kernel, self.reduce_kwargs\n\n\nclass Fusion(object):\n\n \"\"\"Function class.\n\n This class can be get by using `fuse` function and\n works like `ElementwiseKernel` or `ReductionKernel`.\n\n Attributes:\n func (function): The function before fusing.\n name (str): The name of the function.\n \"\"\"\n\n def __init__(self, func, name=None):\n self.func = func\n self.name = name or func.__name__\n self._memo = {}\n\n def __repr__(self):\n return '<Fusion \\'{}\\'>'.format(self.name)\n\n def __call__(self, *args, **kwargs):\n if not hasattr(_thread_local, 'history'):\n func, kw = self._compile(*args, **kwargs)\n kwargs = dict(kwargs, **kw)\n return func(*args, **kwargs)\n else:\n return self.func(*args, **kwargs)\n\n def _compile_from_dtypes(self, *dtypes):\n assert not hasattr(_thread_local, 'history')\n _thread_local.history = _FusionHistory()\n try:\n key = tuple(dtypes)\n if key not in self._memo:\n self._memo[key] = _thread_local.history.get_fusion(\n self.func, dtypes, self.name)\n return self._memo[key]\n finally:\n del _thread_local.history\n\n def _compile(self, *args, **kwargs):\n if builtins.any(\n not isinstance(_, (core.ndarray, numpy.ndarray, numpy.generic))\n for _ in args):\n raise TypeError('Invalid argument type for \\'{}\\': ({})'.format(\n self.name,\n ', '.join(repr(type(_)) for _ in args)))\n\n def is_cupy_data(a):\n return isinstance(a, (core.ndarray, numpy.generic))\n if builtins.all(is_cupy_data(_) for _ in args):\n dtypes = [_.dtype for _ in args]\n return self._compile_from_dtypes(*dtypes)\n else:\n if builtins.any(type(_) is core.ndarray for _ in args):\n types_str = '.'.join(repr(type(_)) for _ in args)\n message = 'Can\\'t fuse \\n {}({})'.format(self.name, types_str)\n warnings.warn(message)\n else:\n return self.func, {}\n\n def clear_cache(self):\n self._memo = {}\n\n\ndef fuse(*args, **kwargs):\n \"\"\"Function fusing decorator.\n\n This decorator can be used to define an elementwise or reduction kernel\n more easily than `ElementwiseKernel` class or `ReductionKernel` class.\n\n This decorator makes `Fusion` class from the given function.\n\n Args:\n kernel_name (str): Name of the fused kernel function.\n If omitted, the name of the decorated function is used.\n\n .. note::\n This API is currently experimental and the interface may be changed in\n the future version.\n\n \"\"\"\n\n def wrapper(f, kernel_name=None):\n return Fusion(f, kernel_name)\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return functools.update_wrapper(wrapper(args[0]), args[0])\n else:\n return lambda f: functools.update_wrapper(\n wrapper(f, *args, **kwargs), f)\n\n\ndef _ufunc_wrapper(fusion_op):\n def func(f):\n def call(*args, **kwargs):\n if not hasattr(_thread_local, 'history'):\n return f(*args, **kwargs)\n return _thread_local.history.call_ufunc(fusion_op, args, kwargs)\n return functools.update_wrapper(call, f)\n return func\n\n\ndef _reduction_wrapper(fusion_op):\n def func(f):\n def call(*args, **kwargs):\n if not hasattr(_thread_local, 'history'):\n return f(*args, **kwargs)\n arg = args[0]\n if arg._is_postmap:\n # Multiple reduction\n raise NotImplementedError(\n 'Multiple reduction is not implemented yet')\n if len(args) != 1:\n mes = '{}() takes 1 positional argument but {} were given'\n raise TypeError(mes.format(fusion_op._ops.name, len(args)))\n return FusionVarPython(\n _thread_local.history.set_reduce_op(fusion_op, arg, kwargs),\n True)\n return functools.update_wrapper(call, f)\n return func\n\n\ndef _create_astype_ufunc(dtype):\n name = 'astype_{}'.format(dtype)\n rules = tuple(['{}->{}'.format(cast_from.char, dtype.char)\n for cast_from in _dtype_list])\n command = 'out0 = static_cast<{}>(in0)'.format(_dtype_to_ctype[dtype])\n return core.create_ufunc(name, rules, command)\n\n\n_dtype_to_astype_dict = None\n\n\ndef _dtype_to_astype(dtype):\n global _dtype_to_astype_dict\n if _dtype_to_astype_dict is None:\n _dtype_to_astype_dict = dict([\n (dt, _create_astype_ufunc(dt))\n for dt in _dtype_list])\n return _dtype_to_astype_dict[dtype]\n", "path": "cupy/core/fusion.py" } ]
[ { "content": "import functools\nimport six\nfrom six.moves import builtins\nimport string\nimport threading\nimport warnings\n\nimport numpy\n\nimport cupy\nfrom cupy.core._dtype import get_dtype\nfrom cupy.core import core\n\n\n_thread_local = threading.local()\n\n_kind_score = {\n 'b': 0,\n 'u': 1,\n 'i': 1,\n 'f': 2,\n 'c': 2,\n}\n\n_dtype_to_ctype = {\n numpy.dtype('float64'): 'double',\n numpy.dtype('float32'): 'float',\n numpy.dtype('float16'): 'float16',\n numpy.dtype('complex128'): 'complex<double>',\n numpy.dtype('complex64'): 'complex<float>',\n numpy.dtype('int64'): 'long long',\n numpy.dtype('int32'): 'int',\n numpy.dtype('int16'): 'short',\n numpy.dtype('int8'): 'signed char',\n numpy.dtype('uint64'): 'unsigned long long',\n numpy.dtype('uint32'): 'unsigned int',\n numpy.dtype('uint16'): 'unsigned short',\n numpy.dtype('uint8'): 'unsigned char',\n numpy.dtype('bool'): 'bool',\n}\n\n_dtype_list = [numpy.dtype(_) for _ in '?bhilqBHILQefdFD']\n\n\nclass Submodule(object):\n \"\"\"Ufunc or elementwise kernel with types.\n\n Attributes:\n name (str): The name of submodule\n in_params (list of tuples of dtype and str):\n The tuple of dtype and name of input parameters.\n out_params (list of tuples of dtype and str):\n The tuple of dtype and name of output parameters.\n op (str): The operation code.\n preamble (str): The preamble code.\n dtypes (list of dtypes): The list of dtypes of the parameters.\n \"\"\"\n\n def __init__(self, ufunc, in_params, out_params, op):\n self.name = ufunc.name\n self.in_params = in_params\n self.out_params = out_params\n self.op = op\n self.preamble = ufunc._preamble\n self.dtypes = [dtype for dtype, _ in self.in_params + self.out_params]\n\n def __repr__(self):\n return '<Submodule {}>'.format(self.name)\n\n def fcall(self, args):\n return self.name + '(' + ', '.join(args) + ');\\n'\n\n def key(self):\n return (self.name, tuple(self.dtypes))\n\n def code(self):\n params = ', '.join('{} &{}'.format(_dtype_to_ctype[t], s)\n for t, s in self.in_params + self.out_params)\n typedef = ''.join('typedef {} {}_type;\\n'.format(_dtype_to_ctype[t], s)\n for t, s in self.in_params + self.out_params)\n module_code = string.Template('''\n __device__ void ${name}(${parameters}) {\n ${typedef}\n ${operation};\n }\n ''').substitute(\n name=self.name,\n parameters=params,\n operation=self.op,\n typedef=typedef)\n return module_code + '\\n'\n\n\nclass _FusionVarCUDA(object):\n\n \"\"\"Local variable in CUDA program.\n\n Attributes:\n index (int): The name of the variable.\n dtype (dtype): The dtype of the variable.\n const (any of primitive types): The constant value (or None)\n \"\"\"\n\n def __init__(self, index, dtype, const=None):\n self.index = index\n self.dtype = dtype\n self.const = const\n self.mutable = False\n\n def __repr__(self):\n return 'v{}'.format(self.index)\n\n def mutate(self):\n self.mutable = True\n\n def declaration(self):\n c = self.const\n val = numpy.asscalar(c) if hasattr(c, 'dtype') else c\n ctype = _dtype_to_ctype[self.dtype]\n\n if self.const is None:\n return '{} v{};\\n'.format(ctype, self.index)\n\n if isinstance(val, bool):\n init = '= {}'.format(str(c).lower())\n elif isinstance(val, complex):\n init = '({}, {})'.format(c.real, c.imag)\n elif isinstance(val, six.integer_types + (float,)):\n init = '= {}'.format(c)\n else:\n raise TypeError('Invalid constant type: {}'.format(type(c)))\n return 'const {} v{} {};\\n'.format(ctype, self.index, init)\n\n def declaration_in_param(self):\n non_const = '_non_const ' if self.mutable else ''\n return '{}{} v{}'.format(non_const, self.dtype, self.index)\n\n def declaration_out_param(self):\n return '{} v{}'.format(self.dtype, self.index)\n\n\nclass FusionOp(object):\n\n \"\"\"Function call with arguments in CUDA program.\n\n Attributes:\n index (int): The index of this operation.\n submodule (submodule): The submodules called in this operation.\n args (list of _FusionVarCUDA): The arguments.\n types (list of dtype): The types of parameters.\n \"\"\"\n\n def __init__(self, index, submodule, args):\n self.index = index\n self.submodule = submodule\n self.args = args\n self.dtypes = submodule.dtypes\n\n def __repr__(self):\n return '<FusionOp #{}, {} types=[{}]>'.format(\n self.index, self.submodule.name, ', '.join(self.dtypes))\n\n def declaration_args(self):\n return ' '.join('{} v{}_{};'.format(_dtype_to_ctype[t], self.index, j)\n for j, t in enumerate(self.dtypes)) + '\\n'\n\n def code(self):\n args_sub = ['v{}_{}'.format(self.index, i)\n for i in six.moves.range(len(self.args))]\n ctypes = [_dtype_to_ctype[t] for t in self.dtypes]\n args_list = list(zip(self.args, args_sub, ctypes))\n code = '// op # {}\\n'.format(self.index)\n code += ''.join('{} = static_cast< {} >(v{});\\n'.format(s, t, v.index)\n for v, s, t in args_list)\n code += self.submodule.fcall(args_sub)\n code += ''.join('v{} = static_cast< {} >({});\\n'.format(\n v.index, _dtype_to_ctype[v.dtype], s)\n for v, s, _ in\n args_list[len(self.submodule.in_params):])\n return code\n\n\nclass FusionVarPython(object):\n\n \"\"\"The values of variables in target function of fusion.\n\n Args:\n var (_FusionVarCUDA)\n\n Attributes:\n dtype (dtype): The data type.\n \"\"\"\n\n def __init__(self, var, is_postmap):\n self._var = var\n self.dtype = var.dtype\n self._is_postmap = is_postmap\n\n def __repr__(self):\n return '<FusionVarPython, dtype={}>'.format(self.dtype)\n\n def __neg__(self):\n return cupy.negative(self)\n\n def __add__(self, other):\n return cupy.add(self, other)\n\n def __iadd__(self, other):\n return cupy.add(self, other, self)\n\n def __radd__(self, other):\n return cupy.add(other, self)\n\n def __sub__(self, other):\n return cupy.subtract(self, other)\n\n def __isub__(self, other):\n return cupy.subtract(self, other, self)\n\n def __rsub__(self, other):\n return cupy.subtract(other, self)\n\n def __mul__(self, other):\n return cupy.multiply(self, other)\n\n def __imul__(self, other):\n return cupy.multiply(self, other, self)\n\n def __rmul__(self, other):\n return cupy.multiply(other, self)\n\n def __div__(self, other):\n return cupy.divide(self, other)\n\n def __idiv__(self, other):\n return cupy.divide(self, other, self)\n\n def __rdiv__(self, other):\n return cupy.divide(other, self)\n\n def __truediv__(self, other):\n return cupy.true_divide(self, other)\n\n def __itruediv__(self, other):\n return cupy.true_divide(self, other, self)\n\n def __rtruediv__(self, other):\n return cupy.true_divide(other, self)\n\n def __floordiv__(self, other):\n return cupy.floor_divide(self, other)\n\n def __ifloordiv__(self, other):\n return cupy.floor_divide(self, other, self)\n\n def __rfloordiv__(self, other):\n return cupy.floor_divide(other, self)\n\n def __mod__(self, other):\n return cupy.remainder(self, other)\n\n def __imod__(self, other):\n return cupy.remainder(self, other, self)\n\n def __rmod__(self, other):\n return cupy.remainder(other, self)\n\n def __pow__(x, y):\n return cupy.power(x, y)\n\n def __ipow__(self, other):\n return cupy.power(self, other, self)\n\n def __lshift__(self, other):\n return cupy.left_shift(self, other)\n\n def __ilshift__(self, other):\n return cupy.left_shift(self, other, self)\n\n def __rlshift__(self, other):\n return cupy.left_shift(other, self)\n\n def __rshift__(self, other):\n return cupy.right_shift(self, other)\n\n def __irshift__(self, other):\n return cupy.right_shift(self, other, self)\n\n def __rrshift__(self, other):\n return cupy.right_shift(other, self)\n\n def __and__(self, other):\n return cupy.bitwise_and(self, other)\n\n def __iand__(self, other):\n return cupy.bitwise_and(self, other, self)\n\n def __rand__(self, other):\n return cupy.bitwise_and(other, self)\n\n def __or__(self, other):\n return cupy.bitwise_or(self, other)\n\n def __ior__(self, other):\n return cupy.bitwise_or(self, other, self)\n\n def __ror__(self, other):\n return cupy.bitwise_or(other, self)\n\n def __xor__(self, other):\n return cupy.bitwise_xor(self, other)\n\n def __ixor__(self, other):\n return cupy.bitwise_xor(self, other, self)\n\n def __rxor__(self, other):\n return cupy.bitwise_xor(other, self)\n\n def __invert__(self):\n return cupy.invert(self)\n\n def __lt__(self, other):\n return cupy.less(self, other)\n\n def __le__(self, other):\n return cupy.less_equal(self, other)\n\n def __eq__(self, other):\n return cupy.equal(self, other)\n\n def __ne__(self, other):\n return cupy.not_equal(self, other)\n\n def __gt__(self, other):\n return cupy.greater(self, other)\n\n def __ge__(self, other):\n return cupy.greater_equal(self, other)\n\n def __nonzero__(self):\n raise Exception('Can\\'t cast to bool')\n\n def __bool__(self):\n raise Exception('Can\\'t cast to bool')\n\n def __setitem__(self, slices, value):\n if slices is Ellipsis or (isinstance(slices, slice) and\n slices == slice(None)):\n cupy.copy(value, self)\n else:\n raise ValueError('The fusion supports `[...]` or `[:]`.')\n\n def copy(self):\n return cupy.copy(self)\n\n def astype(self, dtype, order=None, casting=None, subok=None, copy=True):\n dtype = get_dtype(dtype)\n if order is not None:\n raise TypeError('order is not supported yet')\n if casting is not None:\n raise TypeError('casting is not supported yet')\n if subok is not None:\n raise TypeError('subok is not supported yet')\n if not copy and self.dtype == dtype:\n return self\n return _dtype_to_astype(dtype)(self)\n\n\nclass _FusionHistory(object):\n\n \"\"\"History of operation exectuted in the target function of fusion.\n\n Attributes:\n preamble_set (set of str): The preambles of submodules.\n submodules (dict from str to submodule): The submodules.\n count (int): The number of variables in the fused function.\n\n op_list (list of FusionOp): The map operations.\n param_list (list of _FusionVarCUDA): The parameters\n local_list (list of _FusionVarCUDA): The local variables.\n\n Only when fusing the reduction, the following attributes are updated.\n\n reduce_op (tuple): One of the element of reduction.***._raws._ops.\n reduce_identity (any type): The identity value of the reduction.\n reduce_kwargs (dict or None): kwargs of the reduction.\n\n premap_ret (_FusionVarCUDA or None): The target of reduction\n postmap_param (_FusionVarCUDA or None): The result of reduction\n postmap_op_list (list of FuisonOp): The post-map operations.\n postmap_local_list (list of _FusionVarCUDA): The local variables which\n appears in the post-map operations\n \"\"\"\n\n def __init__(self):\n self.preamble_set = set()\n self.submodules = dict()\n self.count = 0\n\n self.op_list = []\n self.param_list = []\n self.local_list = []\n\n self.reduce_op = None\n self.reduce_identity = None\n self.reduce_kwargs = None\n\n self.postmap_op_list = []\n self.premap_ret = None\n self.postmap_param = None\n self.postmap_local_list = []\n\n def __repr__(self):\n return '<_FusionMem, op_list={}, var_list={}>'.format(\n self.op_list, self.var_list)\n\n def _has_reduction(self):\n return self.reduce_op is not None\n\n def _fresh_index(self):\n res = self.count\n self.count += 1\n return res\n\n def _fresh_premap_param(self, *args, **kwargs):\n index = self._fresh_index()\n var = _FusionVarCUDA(index, *args, **kwargs)\n self.param_list.append(var)\n return var\n\n def _fresh_postmap_param(self, *args, **kwargs):\n assert self.postmap_param is None\n index = self._fresh_index()\n var = _FusionVarCUDA(index, *args, **kwargs)\n self.postmap_param = var\n return var\n\n def _fresh_premap_local(self, *args, **kwargs):\n index = self._fresh_index()\n var = _FusionVarCUDA(index, *args, **kwargs)\n self.local_list.append(var)\n return var\n\n def _fresh_postmap_local(self, *args, **kwargs):\n index = self._fresh_index()\n var = _FusionVarCUDA(index, *args, **kwargs)\n self.postmap_local_list.append(var)\n return var\n\n def _fresh_local(self, *args, **kwargs):\n if self._has_reduction():\n return self._fresh_postmap_local(*args, **kwargs)\n else:\n return self._fresh_premap_local(*args, **kwargs)\n\n def _add_premap_op(self, *args, **kwargs):\n op = FusionOp(len(self.op_list), *args, **kwargs)\n subm = op.submodule\n self.submodules[subm.key()] = subm\n self.op_list.append(op)\n self._add_preamble(subm.preamble)\n return op\n\n def _add_postmap_op(self, *args, **kwargs):\n op = FusionOp(len(self.postmap_op_list), *args, **kwargs)\n subm = op.submodule\n self.submodules[subm.key()] = subm\n self.postmap_op_list.append(op)\n self._add_preamble(subm.preamble)\n return op\n\n def add_op(self, *args, **kwargs):\n if self._has_reduction():\n return self._add_postmap_op(*args, **kwargs)\n else:\n return self._add_premap_op(*args, **kwargs)\n\n def set_reduce_op(self, raw, arg, kwargs):\n assert self.reduce_op is None\n for op in raw._ops:\n (input_type,), (output_type,), _ = op\n if numpy.can_cast(arg.dtype.type, input_type):\n return_dtype = numpy.dtype(output_type)\n self.premap_ret = self._get_cuda_var(arg)\n self.reduce_op = op\n self.reduce_identity = raw.identity\n self.reduce_kwargs = kwargs\n self._add_preamble(raw._preamble)\n return self._fresh_postmap_param(return_dtype)\n raise TypeError('Type is mismatched. {}(...), {}'.format(\n self.raw._ops.name, arg.dtype.type))\n\n def _add_preamble(self, preamble):\n self.preamble_set.add(preamble)\n\n def _get_cuda_var(self, arg):\n \"\"\"This converts `arg` to _FusionVarCUDA data.\n\n Args:\n arg (FusionVarPython or a primitive type)\n\n Return value: _FusionVarCUDA\n \"\"\"\n if isinstance(arg, FusionVarPython):\n if arg._is_postmap == self._has_reduction():\n return arg._var\n else:\n # Map operation between pre-map variable and post-map variable\n raise Exception('Shape mismatch')\n is_scalar = isinstance(arg, six.integer_types + (float, bool, complex))\n is_ndarray = hasattr(arg, 'dtype') and arg.dtype in _dtype_list\n if is_scalar or is_ndarray:\n return self._fresh_local(numpy.dtype(type(arg)), const=arg)\n raise Exception('Unsupported type {}'.format(type(type)))\n\n def call_ufunc(self, ufunc, args, kwargs):\n nin = ufunc.nin\n nout = ufunc.nout\n\n # Corresponds to _check_should_use_min_scalar in elementwise.pxi\n # This function decides which typecast rule to use.\n def _should_use_min_scalar(in_args):\n max_array_kind = -2\n max_scalar_kind = -1\n for arg in in_args:\n kind = _kind_score[arg.dtype.kind]\n if arg.const is None:\n max_array_kind = max(max_array_kind, kind)\n else:\n max_scalar_kind = max(max_scalar_kind, kind)\n return (max_scalar_kind != -1 and\n max_array_kind >= max_scalar_kind)\n\n def can_cast1(args, in_dtypes):\n for i in six.moves.range(nin):\n if args[i].const is None:\n if not numpy.can_cast(args[i].dtype, in_dtypes[i]):\n return False\n else:\n if not numpy.can_cast(args[i].const, in_dtypes[i]):\n return False\n return True\n\n def can_cast2(args, in_dtypes):\n for i in six.moves.range(nin):\n if not numpy.can_cast(args[i].dtype, in_dtypes[i]):\n return False\n return True\n\n var_list = [self._get_cuda_var(_) for _ in args]\n if 'out' in kwargs:\n var_list.append(self._get_cuda_var(kwargs.pop('out')))\n if kwargs:\n raise TypeError('Wrong arguments {}'.format(kwargs))\n assert nin <= len(var_list) <= nin + nout\n in_vars = var_list[:nin]\n out_vars = var_list[nin:]\n can_cast = can_cast1 if _should_use_min_scalar(in_vars) else can_cast2\n for in_dtypes, out_dtypes, op in ufunc._ops:\n in_dtypes = [numpy.dtype(_) for _ in in_dtypes]\n out_dtypes = [numpy.dtype(_) for _ in out_dtypes]\n if can_cast(in_vars, in_dtypes):\n ret = []\n for i in six.moves.range(nout):\n if i >= len(out_vars):\n v = self._fresh_local(out_dtypes[i])\n out_vars.append(v)\n elif numpy.can_cast(out_dtypes[i], out_vars[i].dtype,\n 'same_kind'):\n v = out_vars[i]\n else:\n raise TypeError(\n 'output (typecode \\'{}\\') could not be coerced '\n 'to provided output parameter (typecode \\'{}\\') '\n 'according to the casting rule '\n '\"same_kind\"'.format(\n out_dtypes[i].char, out_vars[i].dtype.char))\n v.mutate()\n ret.append(FusionVarPython(v, self._has_reduction()))\n in_params = [(in_dtypes[i], 'in{}'.format(i))\n for i, t in enumerate(in_vars)]\n out_params = [(out_dtypes[i], 'out{}'.format(i))\n for i, t in enumerate(out_vars)]\n subm = Submodule(ufunc, in_params, out_params, op)\n self.add_op(subm, in_vars + out_vars)\n return ret[0] if len(ret) == 1 else tuple(ret)\n in_dtypes = [_.dtype for _ in in_vars]\n out_dtypes = [_.dtype for _ in out_vars]\n raise TypeError('Invalid type cast in \\'{}\\': {} -> {}'.format(\n ufunc.name, in_dtypes, out_dtypes))\n\n def call_elementwise(self, f, args, kwargs):\n raise NotImplementedError(\n 'Fusion for elementwise-kernel is not implemented yet')\n\n def _emit_submodules_code(self):\n res = ''.join(self.preamble_set)\n res += '\\n'.join([_.code() for _ in self.submodules.values()])\n return res\n\n def _emit_operation_code(self):\n res = '// {} operations\\n'.format(len(self.op_list))\n res += ''.join(v.declaration() for v in self.local_list)\n res += ''.join(op.declaration_args() for op in self.op_list)\n res += ''.join(op.code() for op in self.op_list)\n return res\n\n def _emit_premap_code(self, in_params, operation):\n return_var = self.premap_ret\n module_code = string.Template('''\n __device__ ${return_ctype} _pre_map(${in_params}) {\n ${operation};\n return ${return_var};\n }\n ''').substitute(\n return_ctype=_dtype_to_ctype[return_var.dtype],\n in_params=', '.join('{} v{}'.format(_dtype_to_ctype[v.dtype],\n v.index)\n for v in in_params),\n operation=operation,\n return_var=return_var)\n return module_code\n\n def _emit_postmap_code(self, out_params, operation):\n in_param = self.postmap_param\n in_ctype = _dtype_to_ctype[in_param.dtype]\n module_code = string.Template('''\n __device__ void _post_map(${in_ctype} in, ${out_params}) {\n ${in_param} = in;\n ${operation};\n }\n ''').substitute(\n in_ctype=in_ctype,\n in_param='{} v{}'.format(in_ctype, in_param.index),\n out_params=', '.join('{} &v{}'.format(_dtype_to_ctype[v.dtype],\n v.index)\n for v in out_params),\n operation=operation)\n return module_code\n\n def _emit_postmap_cast_code(self, reduce_ctype, postmap_dtype, operation):\n module_code = string.Template('''\n __device__ ${postmap_ctype} _postmap_cast(${reduce_ctype} a) {\n ${postmap_ctype} out0;\n ${operation};\n return out0;\n }\n ''').substitute(\n reduce_ctype=reduce_ctype,\n postmap_ctype=_dtype_to_ctype[postmap_dtype],\n operation=operation)\n return module_code\n\n def get_fusion(self, func, in_dtypes, name):\n \"\"\"This generates CUDA kernel from the given function and dtypes.\n\n This function generates ElementwiseKernel or ReductioKernel from the\n given function and the list of dtypes of parameters.\n\n Args:\n func (function): The function to be fused.\n in_types (list of dtypes): The list of dtypes of input parameters.\n name (str): The name of the kernel.\n\n Return value (tuple of ElementwiseKernel/ReductionKernel and dict):\n The second element of return values is kwargs that will give into\n the elementwise kernel or reduction kernel.\n \"\"\"\n in_params = [self._fresh_premap_param(t) for t in in_dtypes]\n in_pvars = [FusionVarPython(_, False) for _ in in_params]\n return_value = func(*in_pvars)\n\n if isinstance(return_value, tuple):\n return_tuple = True\n no_return = False\n out_pvars = return_value\n elif isinstance(return_value, FusionVarPython):\n return_tuple = False\n no_return = False\n out_pvars = [return_value]\n elif return_value is None:\n return_tuple = False\n no_return = True\n out_pvars = []\n else:\n raise TypeError(\n 'Fusion function can\\'t return {}'.format(type(return_value)))\n\n out_pvars = [_ for _ in out_pvars if _ is not None]\n out_cvars = [self._get_cuda_var(_) for _ in out_pvars]\n\n out_dtypes = [_.dtype for _ in out_pvars]\n out_params = [self._fresh_premap_param(t) for t in out_dtypes]\n\n in_params_code = ', '.join(var.declaration_in_param()\n for var in in_params)\n out_params_code = ', '.join(var.declaration_out_param()\n for var in out_params)\n\n operation = self._emit_operation_code()\n submodule_code = self._emit_submodules_code()\n\n if self.reduce_op is None:\n operation += ' '.join('{} = {};'.format(t, s)\n for s, t in zip(out_cvars, out_params))\n kernel = core.ElementwiseKernel(\n in_params_code, out_params_code, operation,\n preamble=submodule_code,\n return_tuple=return_tuple,\n no_return=no_return,\n name=name)\n return kernel, {}\n else:\n _, (postmap_type,), (_, reduce_code, postmap_cast_code,\n reduce_ctype) = self.reduce_op\n if reduce_ctype is None:\n reduce_ctype = 'type_in0_raw'\n\n postmap_dtype = numpy.dtype(postmap_type)\n postmap_ctype = _dtype_to_ctype[postmap_dtype]\n\n postmap_code = '// {} operations\\n'.format(\n len(self.postmap_op_list))\n postmap_code += ''.join(v.declaration()\n for v in self.postmap_local_list)\n postmap_code += ''.join(op.declaration_args()\n for op in self.postmap_op_list)\n postmap_code += ''.join(op.code() for op in self.postmap_op_list)\n postmap_code += ' '.join('{} = {};'.format(t, s)\n for s, t in zip(out_cvars, out_params))\n\n submodule_code += self._emit_premap_code(in_params, operation)\n submodule_code += 'typedef {} type_in0_raw;\\n'.format(\n postmap_ctype)\n submodule_code += 'typedef {} type_out0_raw;\\n'.format(\n postmap_ctype)\n submodule_code += self._emit_postmap_cast_code(\n reduce_ctype, postmap_dtype, postmap_cast_code)\n submodule_code += self._emit_postmap_code(out_params, postmap_code)\n\n kernel = core.ReductionKernel(\n in_params_code,\n out_params_code,\n '_pre_map({})'.format(', '.join([repr(p) for p in in_params])),\n reduce_code,\n '_post_map(_postmap_cast(a), {})'.format(\n ', '.join([repr(p) for p in out_params])),\n self.reduce_identity,\n name=name,\n reduce_type=reduce_ctype,\n preamble=submodule_code)\n return kernel, self.reduce_kwargs\n\n\nclass Fusion(object):\n\n \"\"\"Function class.\n\n This class can be get by using `fuse` function and\n works like `ElementwiseKernel` or `ReductionKernel`.\n\n Attributes:\n func (function): The function before fusing.\n name (str): The name of the function.\n \"\"\"\n\n def __init__(self, func, name=None):\n self.func = func\n self.name = name or func.__name__\n self._memo = {}\n\n def __repr__(self):\n return '<Fusion \\'{}\\'>'.format(self.name)\n\n def __call__(self, *args, **kwargs):\n if not hasattr(_thread_local, 'history'):\n func, kw = self._compile(*args, **kwargs)\n kwargs = dict(kwargs, **kw)\n return func(*args, **kwargs)\n else:\n return self.func(*args, **kwargs)\n\n def _compile_from_dtypes(self, *dtypes):\n assert not hasattr(_thread_local, 'history')\n _thread_local.history = _FusionHistory()\n try:\n key = tuple(dtypes)\n if key not in self._memo:\n self._memo[key] = _thread_local.history.get_fusion(\n self.func, dtypes, self.name)\n return self._memo[key]\n finally:\n del _thread_local.history\n\n def _compile(self, *args, **kwargs):\n if builtins.any(\n not isinstance(_, (core.ndarray, numpy.ndarray, numpy.generic))\n for _ in args):\n raise TypeError('Invalid argument type for \\'{}\\': ({})'.format(\n self.name,\n ', '.join(repr(type(_)) for _ in args)))\n\n def is_cupy_data(a):\n return isinstance(a, (core.ndarray, numpy.generic))\n if builtins.all(is_cupy_data(_) for _ in args):\n dtypes = [_.dtype for _ in args]\n return self._compile_from_dtypes(*dtypes)\n else:\n if builtins.any(type(_) is core.ndarray for _ in args):\n types_str = '.'.join(repr(type(_)) for _ in args)\n message = 'Can\\'t fuse \\n {}({})'.format(self.name, types_str)\n warnings.warn(message)\n else:\n return self.func, {}\n\n def clear_cache(self):\n self._memo = {}\n\n\ndef fuse(*args, **kwargs):\n \"\"\"Function fusing decorator.\n\n This decorator can be used to define an elementwise or reduction kernel\n more easily than `ElementwiseKernel` class or `ReductionKernel` class.\n\n This decorator makes `Fusion` class from the given function.\n\n Args:\n kernel_name (str): Name of the fused kernel function.\n If omitted, the name of the decorated function is used.\n\n .. note::\n This API is currently experimental and the interface may be changed in\n the future version.\n\n \"\"\"\n\n def wrapper(f, kernel_name=None):\n return Fusion(f, kernel_name)\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return functools.update_wrapper(wrapper(args[0]), args[0])\n else:\n return lambda f: functools.update_wrapper(\n wrapper(f, *args, **kwargs), f)\n\n\ndef _ufunc_wrapper(fusion_op):\n def func(f):\n def call(*args, **kwargs):\n if not hasattr(_thread_local, 'history'):\n return f(*args, **kwargs)\n return _thread_local.history.call_ufunc(fusion_op, args, kwargs)\n return functools.update_wrapper(call, f)\n return func\n\n\ndef _reduction_wrapper(fusion_op):\n def func(f):\n def call(*args, **kwargs):\n if not hasattr(_thread_local, 'history'):\n return f(*args, **kwargs)\n arg = args[0]\n if arg._is_postmap:\n # Multiple reduction\n raise NotImplementedError(\n 'Multiple reduction is not implemented yet')\n if len(args) != 1:\n mes = '{}() takes 1 positional argument but {} were given'\n raise TypeError(mes.format(fusion_op._ops.name, len(args)))\n return FusionVarPython(\n _thread_local.history.set_reduce_op(fusion_op, arg, kwargs),\n True)\n return functools.update_wrapper(call, f)\n return func\n\n\ndef _create_astype_ufunc(dtype):\n name = 'astype_{}'.format(dtype)\n rules = tuple(['{}->{}'.format(cast_from.char, dtype.char)\n for cast_from in _dtype_list])\n command = 'out0 = static_cast<{}>(in0)'.format(_dtype_to_ctype[dtype])\n return core.create_ufunc(name, rules, command)\n\n\n_dtype_to_astype_dict = None\n\n\ndef _dtype_to_astype(dtype):\n global _dtype_to_astype_dict\n if _dtype_to_astype_dict is None:\n _dtype_to_astype_dict = dict([\n (dt, _create_astype_ufunc(dt))\n for dt in _dtype_list])\n return _dtype_to_astype_dict[dtype]\n", "path": "cupy/core/fusion.py" } ]
diff --git a/cupy/core/_kernel.pyx b/cupy/core/_kernel.pyx index 643c8ca6b6a..5965a9db5f8 100644 --- a/cupy/core/_kernel.pyx +++ b/cupy/core/_kernel.pyx @@ -57,7 +57,7 @@ cdef dict _kind_score = { 'u': 1, 'i': 1, 'f': 2, - 'c': 3, + 'c': 2, } diff --git a/cupy/core/fusion.py b/cupy/core/fusion.py index 466b780d554..60e805d4ef8 100644 --- a/cupy/core/fusion.py +++ b/cupy/core/fusion.py @@ -19,7 +19,7 @@ 'u': 1, 'i': 1, 'f': 2, - 'c': 3, + 'c': 2, } _dtype_to_ctype = { diff --git a/tests/cupy_tests/core_tests/test_fusion.py b/tests/cupy_tests/core_tests/test_fusion.py index 9b9216d42c8..d3e0dc34cfe 100644 --- a/tests/cupy_tests/core_tests/test_fusion.py +++ b/tests/cupy_tests/core_tests/test_fusion.py @@ -1435,23 +1435,23 @@ def func_a1(x, y, z): @testing.gpu class TestFusionPythonConstant(unittest.TestCase): - @testing.for_all_dtypes() + @testing.for_all_dtypes_combination(names=['dtype1', 'dtype2']) @testing.numpy_cupy_array_equal() - def test_python_scalar(self, xp, dtype): + def test_python_scalar(self, xp, dtype1, dtype2): @cupy.fuse() def f(x): - return x * numpy.asscalar(dtype(1)) - return f(testing.shaped_arange((1,), xp, dtype)) + return x * numpy.asscalar(dtype2(1)) + return f(testing.shaped_arange((1,), xp, dtype1)) - @testing.for_all_dtypes() + @testing.for_all_dtypes_combination(names=['dtype1', 'dtype2']) @testing.numpy_cupy_array_equal() - def test_numpy_scalar(self, xp, dtype): + def test_numpy_scalar(self, xp, dtype1, dtype2): @cupy.fuse() def f(x): - return x * dtype(1) - return f(testing.shaped_arange((1,), xp, dtype)) + return x * dtype2(1) + return f(testing.shaped_arange((1,), xp, dtype1)) @testing.gpu diff --git a/tests/cupy_tests/core_tests/test_ndarray_elementwise_op.py b/tests/cupy_tests/core_tests/test_ndarray_elementwise_op.py index d1ae47adc3f..3ed93673e17 100644 --- a/tests/cupy_tests/core_tests/test_ndarray_elementwise_op.py +++ b/tests/cupy_tests/core_tests/test_ndarray_elementwise_op.py @@ -11,8 +11,7 @@ @testing.gpu class TestArrayElementwiseOp(unittest.TestCase): - @testing.for_all_dtypes_combination(names=['x_type', 'y_type'], - no_complex=True) + @testing.for_all_dtypes_combination(names=['x_type', 'y_type']) @testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError) def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False, no_bool=False, no_complex=False):
holoviz__panel-5919
Plotting styling guides are gone As discussed on Discourse the styling gallery notebooks for Plotly, Altair, Seaborn and Matplotlib have gone missing with Panel 1.0 release. Please re-add. Thx.
[ { "content": "\"\"\"\nDefines custom bokeh model to render ECharts plots.\n\"\"\"\nfrom bokeh.core.properties import (\n Any, Dict, Enum, List, Nullable, String,\n)\nfrom bokeh.events import ModelEvent\nfrom bokeh.models import LayoutDOM\n\nfrom ..config import config\nfrom ..io.resources import bundled_files\nfrom ..util import classproperty\n\n\nclass EChartsEvent(ModelEvent):\n\n event_name = 'echarts_event'\n\n def __init__(self, model, type=None, data=None, query=None):\n self.type = type\n self.data = data\n self.query = query\n super().__init__(model=model)\n\n\nclass ECharts(LayoutDOM):\n \"\"\"\n A Bokeh model that wraps around an ECharts plot and renders it\n inside a Bokeh.\n \"\"\"\n\n __javascript_raw__ = [\n f\"{config.npm_cdn}/[email protected]/dist/echarts.min.js\",\n f\"{config.npm_cdn}/[email protected]/dist/echarts-gl.min.js\"\n ]\n\n @classproperty\n def __javascript__(cls):\n return bundled_files(cls)\n\n @classproperty\n def __js_skip__(cls):\n return {\n 'echarts': cls.__javascript__[:1]\n }\n\n __js_require__ = {\n 'paths': {\n \"echarts\": f\"{config.npm_cdn}/[email protected]/dist/echarts.min\",\n \"echarts-gl\": f\"{config.npm_cdn}/[email protected]/dist/echarts-gl.min\"\n },\n 'exports': {}\n }\n\n data = Nullable(Dict(String, Any))\n\n options = Nullable(Dict(String, Any))\n\n event_config = Dict(String, Any)\n\n js_events = Dict(String, List(Any))\n\n renderer = Enum(\"canvas\", \"svg\")\n\n theme = Enum(\"default\", \"light\", \"dark\")\n", "path": "panel/models/echarts.py" } ]
[ { "content": "\"\"\"\nDefines custom bokeh model to render ECharts plots.\n\"\"\"\nfrom bokeh.core.properties import (\n Any, Dict, Enum, List, Nullable, String,\n)\nfrom bokeh.events import ModelEvent\nfrom bokeh.models import LayoutDOM\n\nfrom ..config import config\nfrom ..io.resources import bundled_files\nfrom ..util import classproperty\n\n\nclass EChartsEvent(ModelEvent):\n\n event_name = 'echarts_event'\n\n def __init__(self, model, type=None, data=None, query=None):\n self.type = type\n self.data = data\n self.query = query\n super().__init__(model=model)\n\n\nclass ECharts(LayoutDOM):\n \"\"\"\n A Bokeh model that wraps around an ECharts plot and renders it\n inside a Bokeh.\n \"\"\"\n\n __javascript_raw__ = [\n f\"{config.npm_cdn}/[email protected]/dist/echarts.min.js\",\n f\"{config.npm_cdn}/[email protected]/dist/echarts-gl.min.js\"\n ]\n\n @classproperty\n def __javascript__(cls):\n return bundled_files(cls)\n\n @classproperty\n def __js_skip__(cls):\n return {\n 'echarts': cls.__javascript__[:1]\n }\n\n __js_require__ = {\n 'paths': {\n \"echarts\": f\"{config.npm_cdn}/[email protected]/dist/echarts.min\",\n \"echarts-gl\": f\"{config.npm_cdn}/[email protected]/dist/echarts-gl.min\"\n },\n 'exports': {}\n }\n\n data = Nullable(Dict(String, Any))\n\n options = Nullable(Dict(String, Any))\n\n event_config = Dict(String, Any)\n\n js_events = Dict(String, List(Any))\n\n renderer = Enum(\"canvas\", \"svg\")\n\n theme = String(\"default\")\n", "path": "panel/models/echarts.py" } ]
diff --git a/doc/_static/logos/altair-logo.png b/doc/_static/logos/altair-logo.png new file mode 100644 index 0000000000..8dd96d6b23 Binary files /dev/null and b/doc/_static/logos/altair-logo.png differ diff --git a/doc/_static/logos/echarts-logo.png b/doc/_static/logos/echarts-logo.png new file mode 100644 index 0000000000..4b3b847ff4 Binary files /dev/null and b/doc/_static/logos/echarts-logo.png differ diff --git a/doc/_static/logos/matplotlib-logo.png b/doc/_static/logos/matplotlib-logo.png new file mode 100644 index 0000000000..deac9e28e6 Binary files /dev/null and b/doc/_static/logos/matplotlib-logo.png differ diff --git a/doc/_static/logos/plotly-logo.png b/doc/_static/logos/plotly-logo.png new file mode 100644 index 0000000000..7128c2f059 Binary files /dev/null and b/doc/_static/logos/plotly-logo.png differ diff --git a/doc/_static/logos/vegalite-logo.png b/doc/_static/logos/vegalite-logo.png new file mode 100644 index 0000000000..c312f8a319 Binary files /dev/null and b/doc/_static/logos/vegalite-logo.png differ diff --git a/doc/how_to/styling/altair.md b/doc/how_to/styling/altair.md new file mode 100644 index 0000000000..3f67c6957f --- /dev/null +++ b/doc/how_to/styling/altair.md @@ -0,0 +1,57 @@ +# Style Altair Plots + +This guide addresses how to style Altair plots displayed using the [Vega pane](../../../examples/reference/panes/Vega). + +You can select the theme of Altair plots using [`altair.themes.enable`](https://altair-viz.github.io/user_guide/customization.html#changing-the-theme) and an accent color using the `configure_mark` method. The list of themes is available via `altair.themes.names()`. + +The gif below displays an example of what can be achieved with a little styling of the Altair plot and the `FastListTemplate`. + +![VegaAltairStyle.gif](https://assets.holoviews.org/panel/thumbnails/gallery/styles/vega-styles.gif) + +## An Altair plot with custom theme and accent color + +In this example we will give the Altair plot a custom theme and accent color. + +```{pyodide} +import altair as alt +import panel as pn + +from vega_datasets import data + +pn.extension("vega") + +def plot(theme, color): + alt.themes.enable(theme) + + return ( + alt.Chart(data.cars()) + .mark_circle(size=200) + .encode( + x='Horsepower:Q', + y='Miles_per_Gallon:Q', + tooltip=["Name", "Origin", "Horsepower", "Miles_per_Gallon"], + ) + .configure_mark( + color=color + ) + .properties( + height=300, + width="container", + ) + .interactive() + ) + +themes = sorted(alt.themes.names()) +theme = pn.widgets.Select(value="dark", options=themes, name="Theme") +color = pn.widgets.ColorPicker(value="#F08080", name="Color") + +pn.Column( + pn.Row(theme, color), + pn.pane.Vega(pn.bind(plot, theme=theme, color=color), height=350, sizing_mode="stretch_width"), + "**Altair Themes**: " + ", ".join(themes), + styles={"border": "1px solid lightgray"} +).servable() +``` + +Please note that the line `alt.themes.enable(theme)` will set the theme of all future generated plots +unless you specifically change it before usage in a `Vega` pane. diff --git a/doc/how_to/styling/echarts.md b/doc/how_to/styling/echarts.md new file mode 100644 index 0000000000..e92f180542 --- /dev/null +++ b/doc/how_to/styling/echarts.md @@ -0,0 +1,48 @@ +# Style Echarts Plots + +This guide addresses how to style ECharts plots displayed using the [ECharts pane](../../../examples/reference/panes/ECharts.ipynb). + +You can select the theme of ECharts plots using the `ECharts.theme` parameter. + +![ECharts Themes](https://assets.holoviz.org/panel/gifs/echarts-styles.gif) + +## An ECharts plot with a custom theme + +In this example we will extend the `themes` available to the `ECharts` pane to the themes listed in the [ECharts Themes Guide](https://echarts.apache.org/en/download-theme.html) and then use one of them. + +```{pyodide} +import panel as pn + +THEME = "shine" + +ECHARTS_THEMES = { + "infographic": "https://fastly.jsdelivr.net/npm/echarts/theme/infographic.js?_v_=20200710_1", + "macarons": "https://fastly.jsdelivr.net/npm/echarts/theme/macarons.js?_v_=20200710_1", + "roma": "https://fastly.jsdelivr.net/npm/echarts/theme/roma.js?_v_=20200710_1", + "shine": "https://fastly.jsdelivr.net/npm/echarts/theme/shine.js?_v_=20200710_1", + "vintage": "https://fastly.jsdelivr.net/npm/echarts/theme/vintage.js?_v_=20200710_1", +} + +pn.pane.ECharts.param.theme.objects = pn.pane.ECharts.param.theme.objects + list( + ECHARTS_THEMES +) + +pn.extension("echarts", js_files=ECHARTS_THEMES) + +echart_bar = { + "title": {"text": "ECharts Example"}, + "tooltip": {}, + "legend": {"data": ["Sales"]}, + "xAxis": {"data": ["shirt", "cardign", "chiffon shirt", "pants", "heels", "socks"]}, + "yAxis": {}, + "series": [{"name": "Sales", "type": "bar", "data": [5, 20, 36, 10, 10, 20]}], +} + +plot = pn.pane.ECharts( + echart_bar, + height=500, + sizing_mode="stretch_width", + theme=THEME, +) +pn.Column(plot.param.theme, plot, sizing_mode="stretch_width").servable() +``` diff --git a/doc/how_to/styling/index.md b/doc/how_to/styling/index.md index f0c698660b..ff2131f900 100644 --- a/doc/how_to/styling/index.md +++ b/doc/how_to/styling/index.md @@ -1,6 +1,7 @@ # Style Components -Panel provides a comprehensive system for applying designs, themes and custom styling for components. This section will take you through these concepts. +Panel provides a comprehensive system for applying designs, themes and custom styling for components. +This section will take you through these concepts. ::::{grid} 1 2 2 3 :gutter: 1 1 1 2 @@ -49,6 +50,78 @@ How to control the visibility of a component. :::: +This section will show you how to style the most common plotting libraries for use with Panel. + +::::{grid} 1 2 2 3 +:gutter: 1 1 1 2 + +:::{grid-item-card} Altair +:link: altair +:link-type: doc + +```{image} ../../_static/logos/altair-logo.png +:width: 125px +:align: center +:name: Altair +``` + +How to style an Altair plot +::: + +:::{grid-item-card} ECharts +:link: echarts +:link-type: doc + +```{image} ../../_static/logos/echarts-logo.png +:width: 125px +:align: center +:name: ECharts +``` + +How to style an ECharts plot +::: + +:::{grid-item-card} Matplotlib +:link: matplotlib +:link-type: doc + +```{image} ../../_static/logos/matplotlib-logo.png +:width: 125px +:align: center +:name: Matplotlib +``` + +How to style a Matplotlib plot +::: + +:::{grid-item-card} Plotly +:link: plotly +:link-type: doc + +```{image} ../../_static/logos/plotly-logo.png +:width: 125px +:align: center +:name: Plotly +``` + +How to style a Plotly plot +::: + +:::{grid-item-card} Vega Lite +:link: vega +:link-type: doc + +```{image} ../../_static/logos/vegalite-logo.png +:width: 125px +:align: center +:name: Vega +``` + +How to style a Vega Lite plot +::: + +:::: + ```{toctree} :titlesonly: :hidden: @@ -59,4 +132,9 @@ themes apply_css load_icon visibility +altair +echarts +matplotlib +plotly +vega ``` diff --git a/doc/how_to/styling/matplotlib.md b/doc/how_to/styling/matplotlib.md new file mode 100644 index 0000000000..7facd680e2 --- /dev/null +++ b/doc/how_to/styling/matplotlib.md @@ -0,0 +1,57 @@ +# Style Matplotlib Plots + +This guide addresses how to style Matplotlib plots displayed using the [Matplotlib pane](../../../examples/reference/panes/Matplotlib.ipynb). + +There are nearly 30 builtin styles to Matplotlib that can be activated with the `plt.style.use` function. The style names are listed in the `plt.style.available` array. + +For more info check out the [Matplotlib style sheets reference](https://matplotlib.org/stable/gallery/style_sheets/style_sheets_reference.html) and the alternative themes [dracula theme](https://draculatheme.com/matplotlib) and [gadfly](https://towardsdatascience.com/a-new-plot-theme-for-matplotlib-gadfly-2cffc745ff84). + +The gif below displays an example of what can be achieved with a little styling of the `Matplotlib` figure and the `FastListTemplate`. + +![Matplotlib + FastListTemlate Styling Example](https://assets.holoviews.org/panel/thumbnails/gallery/styles/matplotlib-styles.gif) + +## A Matplotlib plot with custom style and accent color + +In this example we will give the Matplotlib plot a custom style and accent color. + +```{pyodide} +import numpy as np + +from matplotlib.figure import Figure +import matplotlib.pyplot as plt +import panel as pn + +pn.extension() + + +def plot(style, color): + x = np.arange(-2, 8, 0.1) + y = 0.1 * x**3 - x**2 + 3 * x + 2 + + plt.style.use("default") # reset to not be affected by previous style changes + plt.style.use(style) # change to the specified style + + fig0 = Figure(figsize=(12, 6)) + ax0 = fig0.subplots() + ax0.plot(x, y, linewidth=10.0, color=color) + ax0.set_title(f"Matplotlib Style: {style}") + + plt.style.use("default") # reset to not affect style of other plots + + return fig0 + + +styles = sorted(plt.style.available) +style = pn.widgets.Select(value="dark_background", options=styles, name="Style") +color = pn.widgets.ColorPicker(value="#F08080", name="Color") + +pn.Column( + pn.Row(style, color), + pn.pane.Matplotlib( + pn.bind(plot, style=style, color=color), + height=400, + sizing_mode="fixed", + ), + "**Matplotlib Styles**: " + ", ".join(styles), +).servable() +``` diff --git a/doc/how_to/styling/plotly.md b/doc/how_to/styling/plotly.md new file mode 100644 index 0000000000..77b423c24d --- /dev/null +++ b/doc/how_to/styling/plotly.md @@ -0,0 +1,86 @@ +# Style Plotly Plots + +This guide addresses how to style Plotly plots displayed using the [Plotly pane](../../../examples/reference/panes/Plotly.ipynb). + +Plotly provides a list of built in templates in `plotly.io.templates`. See the [Plotly Templates Guide](https://plotly.com/python/templates/). + +The gif below displays an example of what can be achieved with a little styling of the Plotly plot and the `FastListTemplate`. + +![PlotlyStyle.gif](https://assets.holoviews.org/panel/thumbnails/gallery/styles/plotly-styles.gif) + +## A Plotly Express plot with a custom theme and accent color + +In this example we will give the Plotly Express plot a dark theme and a custom accent color. + +```{pyodide} +import pandas as pd +import plotly.express as px +import plotly.io as pio + +import panel as pn + +pn.extension("plotly") + +data = pd.DataFrame( + [ + ("Monday", 7), + ("Tuesday", 4), + ("Wednesday", 9), + ("Thursday", 4), + ("Friday", 4), + ("Saturday", 4), + ("Sunday", 4), + ], + columns=["Day", "Orders"], +) + +def plot(template, color): + fig = px.line( + data, + x="Day", + y="Orders", + template=template, + color_discrete_sequence=(color,), + title=f"Template: {template}", + ) + fig.update_traces(mode="lines+markers", marker=dict(size=10), line=dict(width=4)) + fig.layout.autosize = True + return fig + +templates = sorted(pio.templates) +template = pn.widgets.Select(value="plotly_dark", options=templates, name="Template") +color = pn.widgets.ColorPicker(value="#F08080", name="Color") + +pn.Column( + pn.Row(template, color), + pn.pane.Plotly(pn.bind(plot, template, color), sizing_mode="stretch_width"), + "**Plotly Templates**: " + ", ".join(templates), +).servable() +``` + +## A Plotly `go.Figure` plot with dark theme + +In this example we will give the Plotly `go.Figure` plot a dark theme. + +```{pyodide} +import pandas as pd +import plotly.graph_objects as go + +import panel as pn + +pn.extension("plotly") + +TEMPLATE = "plotly_dark" # "ggplot2", "seaborn", "simple_white", "plotly", "plotly_white", "plotly_dark", "presentation", "xgridoff", "ygridoff", "gridon", "none" + +z_data = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/api_docs/mt_bruno_elevation.csv") + +fig = go.Figure( + data=go.Surface(z=z_data.values), + layout=go.Layout( + title="Mt Bruno Elevation", + )) +fig.layout.autosize = True +fig.update_layout(template=TEMPLATE, title=f"Mt Bruno Elevation in a '{TEMPLATE}' template") + +pn.pane.Plotly(fig, height=500, sizing_mode="stretch_width").servable() +``` diff --git a/doc/how_to/styling/vega.md b/doc/how_to/styling/vega.md new file mode 100644 index 0000000000..24df70fd46 --- /dev/null +++ b/doc/how_to/styling/vega.md @@ -0,0 +1,57 @@ +# Style Vega Plots + +This guide addresses how to style Vega plots displayed using the [Vega pane](../../../examples/reference/panes/Vega.ipynb). + +The gif below displays an example of what can be achieved with a little styling of the Vega plot and the `FastListTemplate`. + +![VegaAltairStyle.gif](https://assets.holoviews.org/panel/thumbnails/gallery/styles/vega-styles.gif) + +## A Vega plot with dark theme and accent color + +In this example we will give the Vega Plot a dark theme and a custom accent color. + +```{pyodide} +import panel as pn + +from vega_datasets import data + +pn.extension("vega") + +VEGA_ACCENT_COLOR = "#F08080" +VEGA_THEME = { + "background": "#333", + "title": {"color": "#fff"}, + "style": {"guide-label": {"fill": "#fff"}, "guide-title": {"fill": "#fff"}}, + "axis": {"domainColor": "#fff", "gridColor": "#888", "tickColor": "#fff"}, +} + +vegalite = { + "$schema": "https://vega.github.io/schema/vega-lite/v5.json", + "description": "A simple bar chart with rounded corners at the end of the bar.", + "width": "container", + "height": 300, + "data": { + "values": [ + {"a": "A", "b": 28}, + {"a": "B", "b": 55}, + {"a": "C", "b": 43}, + {"a": "D", "b": 91}, + {"a": "E", "b": 81}, + {"a": "F", "b": 53}, + {"a": "G", "b": 19}, + {"a": "H", "b": 87}, + {"a": "I", "b": 52} + ] + }, + "mark": {"type": "bar", "cornerRadiusEnd": 4, "tooltip": True}, + "encoding": { + "x": {"field": "a", "type": "ordinal"}, + "y": {"field": "b", "type": "quantitative"}, + "color": {"value": VEGA_ACCENT_COLOR} + }, +} + +vegalite["config"] = VEGA_THEME + +pn.pane.Vega(vegalite, height=350, sizing_mode="stretch_width").servable() +``` diff --git a/panel/models/echarts.py b/panel/models/echarts.py index ac1de55f62..6186057f0a 100644 --- a/panel/models/echarts.py +++ b/panel/models/echarts.py @@ -62,4 +62,4 @@ def __js_skip__(cls): renderer = Enum("canvas", "svg") - theme = Enum("default", "light", "dark") + theme = String("default") diff --git a/panel/models/echarts.ts b/panel/models/echarts.ts index 49108951ea..b15a18d7f5 100644 --- a/panel/models/echarts.ts +++ b/panel/models/echarts.ts @@ -46,7 +46,10 @@ export class EChartsView extends HTMLBoxView { this.connect(this.model.properties.data.change, () => this._plot()) const {width, height, renderer, theme, event_config, js_events} = this.model.properties this.on_change([width, height], () => this._resize()) - this.on_change([theme, renderer], () => this.render()) + this.on_change([theme, renderer], () => { + this.render() + this._chart.resize() + }) this.on_change([event_config, js_events], () => this._subscribe()) }
edgedb__edgedb-2139
Better syntax errors for substitution tokens Currently as of 1.0-alpha.8+dev.5341.g66ec73494 it fails with InternalServerError: ``` edgedb> SELECT \(x); ERROR: InternalServerError: (<class 'edb.edgeql.parser.grammar.tokens.TokenMeta'>, 'SUBSTITUTION') Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md Server traceback: Traceback (most recent call last): File "/work/edb/server/procpool/worker.py", line 75, in worker res = await meth(*args) File "/work/edb/server/compiler/compiler.py", line 1935, in compile return self._compile(ctx=ctx, source=source) File "/work/edb/server/compiler/compiler.py", line 1487, in _compile return self._try_compile(ctx=ctx, source=source) File "/work/edb/server/compiler/compiler.py", line 1519, in _try_compile statements = edgeql.parse_block(source) File "/work/edb/edgeql/parser/__init__.py", line 69, in parse_block return parser.parse(source) File "/work/edb/common/parsing.py", line 401, in parse token = self.process_lex_token(mod, tok) File "/work/edb/common/parsing.py", line 390, in process_lex_token return mod.TokenMeta.for_lex_token(tok.kind())( File "/work/edb/common/parsing.py", line 100, in for_lex_token return mcls.token_map[mcls, token] KeyError: (<class 'edb.edgeql.parser.grammar.tokens.TokenMeta'>, 'SUBSTITUTION') ``` This bug appeared after #2131
[ { "content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\n\nimport re\nimport sys\nimport types\n\nfrom edb.common import parsing\n\nfrom . import keywords\nfrom . import precedence\nfrom . import lexer\n\n\nclean_string = re.compile(r\"'(?:\\s|\\n)+'\")\nstring_quote = re.compile(lexer.re_dquote)\n\n\nclass TokenMeta(parsing.TokenMeta):\n pass\n\n\nclass Token(parsing.Token, metaclass=TokenMeta,\n precedence_class=precedence.PrecedenceMeta):\n pass\n\n\nclass T_DOT(Token, lextoken='.'):\n pass\n\n\nclass T_DOTBW(Token, lextoken='.<'):\n pass\n\n\nclass T_LBRACKET(Token, lextoken='['):\n pass\n\n\nclass T_RBRACKET(Token, lextoken=']'):\n pass\n\n\nclass T_LPAREN(Token, lextoken='('):\n pass\n\n\nclass T_RPAREN(Token, lextoken=')'):\n pass\n\n\nclass T_LBRACE(Token, lextoken='{'):\n pass\n\n\nclass T_RBRACE(Token, lextoken='}'):\n pass\n\n\nclass T_DOUBLECOLON(Token, lextoken='::'):\n pass\n\n\nclass T_DOUBLEQMARK(Token, lextoken='??'):\n pass\n\n\nclass T_COLON(Token, lextoken=':'):\n pass\n\n\nclass T_SEMICOLON(Token, lextoken=';'):\n pass\n\n\nclass T_COMMA(Token, lextoken=','):\n pass\n\n\nclass T_PLUS(Token, lextoken='+'):\n pass\n\n\nclass T_DOUBLEPLUS(Token, lextoken='++'):\n pass\n\n\nclass T_MINUS(Token, lextoken='-'):\n pass\n\n\nclass T_STAR(Token, lextoken='*'):\n pass\n\n\nclass T_SLASH(Token, lextoken='/'):\n pass\n\n\nclass T_DOUBLESLASH(Token, lextoken='//'):\n pass\n\n\nclass T_PERCENT(Token, lextoken='%'):\n pass\n\n\nclass T_CIRCUMFLEX(Token, lextoken='^'):\n pass\n\n\nclass T_AT(Token, lextoken='@'):\n pass\n\n\nclass T_ARGUMENT(Token):\n pass\n\n\nclass T_ASSIGN(Token):\n pass\n\n\nclass T_ADDASSIGN(Token):\n pass\n\n\nclass T_REMASSIGN(Token):\n pass\n\n\nclass T_ARROW(Token):\n pass\n\n\nclass T_LANGBRACKET(Token, lextoken='<'):\n pass\n\n\nclass T_RANGBRACKET(Token, lextoken='>'):\n pass\n\n\nclass T_EQUALS(Token, lextoken='='):\n pass\n\n\nclass T_AMPER(Token, lextoken='&'):\n pass\n\n\nclass T_PIPE(Token, lextoken='|'):\n pass\n\n\nclass T_NAMEDONLY(Token):\n pass\n\n\nclass T_SETANNOTATION(Token):\n pass\n\n\nclass T_SETTYPE(Token):\n pass\n\n\nclass T_ICONST(Token):\n pass\n\n\nclass T_NICONST(Token):\n pass\n\n\nclass T_FCONST(Token):\n pass\n\n\nclass T_NFCONST(Token):\n pass\n\n\nclass T_BCONST(Token):\n pass\n\n\nclass T_SCONST(Token):\n pass\n\n\nclass T_RSCONST(Token):\n pass\n\n\nclass T_IDENT(Token):\n pass\n\n\nclass T_OP(Token):\n pass\n\n\nclass T_EOF(Token):\n pass\n\n\ndef _gen_keyword_tokens():\n # Define keyword tokens\n\n mod = sys.modules[__name__]\n\n def clsexec(ns):\n ns['__module__'] = __name__\n return ns\n\n for token, _ in keywords.edgeql_keywords.values():\n clsname = 'T_{}'.format(token)\n clskwds = dict(metaclass=parsing.TokenMeta, token=token)\n cls = types.new_class(clsname, (Token,), clskwds, clsexec)\n setattr(mod, clsname, cls)\n\n\n_gen_keyword_tokens()\n", "path": "edb/edgeql/parser/grammar/tokens.py" } ]
[ { "content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\n\nimport re\nimport sys\nimport types\n\nfrom edb.common import parsing\n\nfrom . import keywords\nfrom . import precedence\nfrom . import lexer\n\n\nclean_string = re.compile(r\"'(?:\\s|\\n)+'\")\nstring_quote = re.compile(lexer.re_dquote)\n\n\nclass TokenMeta(parsing.TokenMeta):\n pass\n\n\nclass Token(parsing.Token, metaclass=TokenMeta,\n precedence_class=precedence.PrecedenceMeta):\n pass\n\n\nclass T_DOT(Token, lextoken='.'):\n pass\n\n\nclass T_DOTBW(Token, lextoken='.<'):\n pass\n\n\nclass T_LBRACKET(Token, lextoken='['):\n pass\n\n\nclass T_RBRACKET(Token, lextoken=']'):\n pass\n\n\nclass T_LPAREN(Token, lextoken='('):\n pass\n\n\nclass T_RPAREN(Token, lextoken=')'):\n pass\n\n\nclass T_LBRACE(Token, lextoken='{'):\n pass\n\n\nclass T_RBRACE(Token, lextoken='}'):\n pass\n\n\nclass T_DOUBLECOLON(Token, lextoken='::'):\n pass\n\n\nclass T_DOUBLEQMARK(Token, lextoken='??'):\n pass\n\n\nclass T_COLON(Token, lextoken=':'):\n pass\n\n\nclass T_SEMICOLON(Token, lextoken=';'):\n pass\n\n\nclass T_COMMA(Token, lextoken=','):\n pass\n\n\nclass T_PLUS(Token, lextoken='+'):\n pass\n\n\nclass T_DOUBLEPLUS(Token, lextoken='++'):\n pass\n\n\nclass T_MINUS(Token, lextoken='-'):\n pass\n\n\nclass T_STAR(Token, lextoken='*'):\n pass\n\n\nclass T_SLASH(Token, lextoken='/'):\n pass\n\n\nclass T_DOUBLESLASH(Token, lextoken='//'):\n pass\n\n\nclass T_PERCENT(Token, lextoken='%'):\n pass\n\n\nclass T_CIRCUMFLEX(Token, lextoken='^'):\n pass\n\n\nclass T_AT(Token, lextoken='@'):\n pass\n\n\nclass T_ARGUMENT(Token):\n pass\n\n\nclass T_ASSIGN(Token):\n pass\n\n\nclass T_ADDASSIGN(Token):\n pass\n\n\nclass T_REMASSIGN(Token):\n pass\n\n\nclass T_ARROW(Token):\n pass\n\n\nclass T_LANGBRACKET(Token, lextoken='<'):\n pass\n\n\nclass T_RANGBRACKET(Token, lextoken='>'):\n pass\n\n\nclass T_EQUALS(Token, lextoken='='):\n pass\n\n\nclass T_AMPER(Token, lextoken='&'):\n pass\n\n\nclass T_PIPE(Token, lextoken='|'):\n pass\n\n\nclass T_NAMEDONLY(Token):\n pass\n\n\nclass T_SETANNOTATION(Token):\n pass\n\n\nclass T_SETTYPE(Token):\n pass\n\n\nclass T_ICONST(Token):\n pass\n\n\nclass T_NICONST(Token):\n pass\n\n\nclass T_FCONST(Token):\n pass\n\n\nclass T_NFCONST(Token):\n pass\n\n\nclass T_BCONST(Token):\n pass\n\n\nclass T_SCONST(Token):\n pass\n\n\nclass T_RSCONST(Token):\n pass\n\n\nclass T_IDENT(Token):\n pass\n\n\nclass T_OP(Token):\n pass\n\n\nclass T_SUBSTITUTION(Token):\n pass\n\n\nclass T_EOF(Token):\n pass\n\n\ndef _gen_keyword_tokens():\n # Define keyword tokens\n\n mod = sys.modules[__name__]\n\n def clsexec(ns):\n ns['__module__'] = __name__\n return ns\n\n for token, _ in keywords.edgeql_keywords.values():\n clsname = 'T_{}'.format(token)\n clskwds = dict(metaclass=parsing.TokenMeta, token=token)\n cls = types.new_class(clsname, (Token,), clskwds, clsexec)\n setattr(mod, clsname, cls)\n\n\n_gen_keyword_tokens()\n", "path": "edb/edgeql/parser/grammar/tokens.py" } ]
diff --git a/edb/edgeql/parser/grammar/tokens.py b/edb/edgeql/parser/grammar/tokens.py index e7dc0f4e248..69a24c24daa 100644 --- a/edb/edgeql/parser/grammar/tokens.py +++ b/edb/edgeql/parser/grammar/tokens.py @@ -219,6 +219,10 @@ class T_OP(Token): pass +class T_SUBSTITUTION(Token): + pass + + class T_EOF(Token): pass
strawberry-graphql__strawberry-378
Cannot create type with multiple Unions ```python from typing import Union import strawberry @strawberry.type class CoolType: @strawberry.type class UnionA1: value: int @strawberry.type class UnionA2: value: int @strawberry.type class UnionB1: value: int @strawberry.type class UnionB2: value: int field1: Union[UnionA1, UnionA2] field2: Union[UnionB1, UnionB2] schema = strawberry.Schema(query=CoolType) ``` ```.pytb Traceback (most recent call last): File "/home/ignormies/.config/JetBrains/PyCharm2020.1/scratches/scratch.py", line 28, in <module> schema = strawberry.Schema(query=CoolType) File "/home/ignormies/.local/share/virtualenvs/gql-bf-XGX4szKA-py3.8/lib/python3.8/site-packages/strawberry/schema.py", line 25, in __init__ super().__init__( File "/home/ignormies/.local/share/virtualenvs/gql-bf-XGX4szKA-py3.8/lib/python3.8/site-packages/graphql/type/schema.py", line 239, in __init__ raise TypeError( TypeError: Schema must contain uniquely named types but contains multiple types named '_resolver'. ``` Removing either `field1` or `field2` allows the schema to be created
[ { "content": "import copy\nimport dataclasses\nfrom functools import partial\nfrom typing import Optional\n\nfrom graphql import GraphQLInputObjectType, GraphQLInterfaceType, GraphQLObjectType\n\nfrom .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE\nfrom .field import field, strawberry_field\nfrom .type_registry import register_type\nfrom .utils.str_converters import to_camel_case\nfrom .utils.typing import get_actual_type, has_type_var, is_type_var\n\n\ndef _interface_resolve_type(result, info, return_type):\n \"\"\"Resolves the correct type for an interface\"\"\"\n return result.__class__.graphql_type\n\n\ndef _get_resolver(cls, field_name):\n class_field = getattr(cls, field_name, None)\n\n if class_field and getattr(class_field, \"resolver\", None):\n return class_field.resolver\n\n def _resolver(root, info):\n if not root:\n return None\n\n field_resolver = getattr(root, field_name, None)\n\n if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):\n return field_resolver(root, info)\n\n elif field_resolver.__class__ is strawberry_field:\n # TODO: support default values\n return None\n\n return field_resolver\n\n return _resolver\n\n\ndef _process_type(\n cls, *, name=None, is_input=False, is_interface=False, description=None\n):\n name = name or cls.__name__\n\n def _get_fields(wrapped, types_replacement_map=None):\n class_fields = dataclasses.fields(wrapped)\n\n fields = {}\n\n for class_field in class_fields:\n # we want to make a copy of the original field when dealing\n # with generic types and also get the actual type for the type var\n if is_type_var(class_field.type) or has_type_var(class_field.type):\n class_field = copy.copy(class_field)\n class_field.type = get_actual_type(\n class_field.type, types_replacement_map\n )\n # like args, a None default implies Optional\n if class_field.default is None:\n class_field.type = Optional[class_field.type]\n\n field_name = getattr(class_field, \"field_name\", None) or to_camel_case(\n class_field.name\n )\n description = getattr(class_field, \"field_description\", None)\n permission_classes = getattr(class_field, \"field_permission_classes\", None)\n resolver = getattr(class_field, \"field_resolver\", None) or _get_resolver(\n cls, class_field.name\n )\n resolver.__annotations__[\"return\"] = class_field.type\n\n fields[field_name] = field(\n resolver,\n is_input=is_input,\n description=description,\n permission_classes=permission_classes,\n ).graphql_type\n # supply a graphql default_value if the type annotation has a default\n if class_field.default not in (dataclasses.MISSING, None):\n fields[field_name].default_value = class_field.default\n\n strawberry_fields = {}\n\n for base in [cls, *cls.__bases__]:\n strawberry_fields.update(\n {\n key: value\n for key, value in base.__dict__.items()\n if getattr(value, IS_STRAWBERRY_FIELD, False)\n }\n )\n\n for key, value in strawberry_fields.items():\n name = getattr(value, \"field_name\", None) or to_camel_case(key)\n\n fields[name] = value.graphql_type\n\n return fields\n\n if is_input:\n setattr(cls, IS_STRAWBERRY_INPUT, True)\n elif is_interface:\n setattr(cls, IS_STRAWBERRY_INTERFACE, True)\n\n extra_kwargs = {\"description\": description or cls.__doc__}\n\n wrapped = dataclasses.dataclass(cls)\n\n if is_input:\n TypeClass = GraphQLInputObjectType\n elif is_interface:\n TypeClass = GraphQLInterfaceType\n\n # TODO: in future we might want to be able to override this\n # for example to map a class (like a django model) to one\n # type of the interface\n extra_kwargs[\"resolve_type\"] = _interface_resolve_type\n else:\n TypeClass = GraphQLObjectType\n\n extra_kwargs[\"interfaces\"] = [\n klass.graphql_type\n for klass in cls.__bases__\n if hasattr(klass, IS_STRAWBERRY_INTERFACE)\n ]\n\n graphql_type = TypeClass(\n name,\n lambda types_replacement_map=None: _get_fields(wrapped, types_replacement_map),\n **extra_kwargs\n )\n register_type(cls, graphql_type)\n\n return wrapped\n\n\ndef type(cls=None, *, name=None, is_input=False, is_interface=False, description=None):\n \"\"\"Annotates a class as a GraphQL type.\n\n Example usage:\n\n >>> @strawberry.type:\n >>> class X:\n >>> field_abc: str = \"ABC\"\n \"\"\"\n\n def wrap(cls):\n return _process_type(\n cls,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)\n\n\ninput = partial(type, is_input=True)\ninterface = partial(type, is_interface=True)\n", "path": "strawberry/type.py" } ]
[ { "content": "import copy\nimport dataclasses\nfrom functools import partial\nfrom typing import Optional\n\nfrom graphql import GraphQLInputObjectType, GraphQLInterfaceType, GraphQLObjectType\n\nfrom .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE\nfrom .field import field, strawberry_field\nfrom .type_registry import register_type\nfrom .utils.str_converters import to_camel_case\nfrom .utils.typing import get_actual_type, has_type_var, is_type_var\n\n\ndef _interface_resolve_type(result, info, return_type):\n \"\"\"Resolves the correct type for an interface\"\"\"\n return result.__class__.graphql_type\n\n\ndef _get_resolver(cls, field_name):\n class_field = getattr(cls, field_name, None)\n\n if class_field and getattr(class_field, \"resolver\", None):\n return class_field.resolver\n\n def _resolver(root, info):\n if not root:\n return None\n\n field_resolver = getattr(root, field_name, None)\n\n if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):\n return field_resolver(root, info)\n\n elif field_resolver.__class__ is strawberry_field:\n # TODO: support default values\n return None\n\n return field_resolver\n\n _resolver.__name__ = field_name\n\n return _resolver\n\n\ndef _process_type(\n cls, *, name=None, is_input=False, is_interface=False, description=None\n):\n name = name or cls.__name__\n\n def _get_fields(wrapped, types_replacement_map=None):\n class_fields = dataclasses.fields(wrapped)\n\n fields = {}\n\n for class_field in class_fields:\n # we want to make a copy of the original field when dealing\n # with generic types and also get the actual type for the type var\n if is_type_var(class_field.type) or has_type_var(class_field.type):\n class_field = copy.copy(class_field)\n class_field.type = get_actual_type(\n class_field.type, types_replacement_map\n )\n # like args, a None default implies Optional\n if class_field.default is None:\n class_field.type = Optional[class_field.type]\n\n field_name = getattr(class_field, \"field_name\", None) or to_camel_case(\n class_field.name\n )\n description = getattr(class_field, \"field_description\", None)\n permission_classes = getattr(class_field, \"field_permission_classes\", None)\n resolver = getattr(class_field, \"field_resolver\", None) or _get_resolver(\n cls, class_field.name\n )\n resolver.__annotations__[\"return\"] = class_field.type\n\n fields[field_name] = field(\n resolver,\n is_input=is_input,\n description=description,\n permission_classes=permission_classes,\n ).graphql_type\n # supply a graphql default_value if the type annotation has a default\n if class_field.default not in (dataclasses.MISSING, None):\n fields[field_name].default_value = class_field.default\n\n strawberry_fields = {}\n\n for base in [cls, *cls.__bases__]:\n strawberry_fields.update(\n {\n key: value\n for key, value in base.__dict__.items()\n if getattr(value, IS_STRAWBERRY_FIELD, False)\n }\n )\n\n for key, value in strawberry_fields.items():\n name = getattr(value, \"field_name\", None) or to_camel_case(key)\n\n fields[name] = value.graphql_type\n\n return fields\n\n if is_input:\n setattr(cls, IS_STRAWBERRY_INPUT, True)\n elif is_interface:\n setattr(cls, IS_STRAWBERRY_INTERFACE, True)\n\n extra_kwargs = {\"description\": description or cls.__doc__}\n\n wrapped = dataclasses.dataclass(cls)\n\n if is_input:\n TypeClass = GraphQLInputObjectType\n elif is_interface:\n TypeClass = GraphQLInterfaceType\n\n # TODO: in future we might want to be able to override this\n # for example to map a class (like a django model) to one\n # type of the interface\n extra_kwargs[\"resolve_type\"] = _interface_resolve_type\n else:\n TypeClass = GraphQLObjectType\n\n extra_kwargs[\"interfaces\"] = [\n klass.graphql_type\n for klass in cls.__bases__\n if hasattr(klass, IS_STRAWBERRY_INTERFACE)\n ]\n\n graphql_type = TypeClass(\n name,\n lambda types_replacement_map=None: _get_fields(wrapped, types_replacement_map),\n **extra_kwargs\n )\n register_type(cls, graphql_type)\n\n return wrapped\n\n\ndef type(cls=None, *, name=None, is_input=False, is_interface=False, description=None):\n \"\"\"Annotates a class as a GraphQL type.\n\n Example usage:\n\n >>> @strawberry.type:\n >>> class X:\n >>> field_abc: str = \"ABC\"\n \"\"\"\n\n def wrap(cls):\n return _process_type(\n cls,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)\n\n\ninput = partial(type, is_input=True)\ninterface = partial(type, is_interface=True)\n", "path": "strawberry/type.py" } ]
diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000..45888264a9 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,3 @@ +Release type: patch + +This PR fixes a bug when declaring multiple non-named union types diff --git a/strawberry/type.py b/strawberry/type.py index deccac2815..9871b5fb02 100644 --- a/strawberry/type.py +++ b/strawberry/type.py @@ -38,6 +38,8 @@ def _resolver(root, info): return field_resolver + _resolver.__name__ = field_name + return _resolver diff --git a/tests/test_union.py b/tests/test_union.py index a0f7705537..4377af518e 100644 --- a/tests/test_union.py +++ b/tests/test_union.py @@ -284,3 +284,49 @@ class B: with pytest.raises(ValueError, match=r"Cannot use union type directly"): Result() + + +def test_multiple_unions(): + @strawberry.type + class CoolType: + @strawberry.type + class UnionA1: + value: int + + @strawberry.type + class UnionA2: + value: int + + @strawberry.type + class UnionB1: + value: int + + @strawberry.type + class UnionB2: + value: int + + field1: Union[UnionA1, UnionA2] + field2: Union[UnionB1, UnionB2] + + schema = strawberry.Schema(query=CoolType) + + query = """ + { + __type(name:"CoolType") { + name + description + fields { + name + } + } + } + """ + + result = graphql_sync(schema, query) + + assert not result.errors + assert result.data["__type"] == { + "description": None, + "fields": [{"name": "field1"}, {"name": "field2"}], + "name": "CoolType", + }
cython__cython-5647
[BUG] Unable to run `Cythonize.py` ### Describe the bug I tried to run `Cythonize.py` but it failed because of relative imports in file: ``` Traceback (most recent call last): File "/tmp/cython/Cython/Build/Cythonize.py", line 10, in <module> from .Dependencies import cythonize, extended_iglob ImportError: attempted relative import with no known parent package ``` ### Code to reproduce the behaviour: ```bash # download repo cd /tmp/ git clone https://github.com/cython/cython.git # make file runable cd /tmp/cython/Cython/Build/ chmod +x Cythonize.py # run file ./Cythonize.py ``` ### Expected behaviour `Cythonize.py` has the shebang so I expected it to be executable. ### OS Linux ### Python version 3.10.12 ### Cython version main branch from git ### Additional context _No response_
[ { "content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport shutil\nimport tempfile\nfrom distutils.core import setup\n\nfrom .Dependencies import cythonize, extended_iglob\nfrom ..Utils import is_package_dir\nfrom ..Compiler import Options\n\ntry:\n import multiprocessing\n parallel_compiles = int(multiprocessing.cpu_count() * 1.5)\nexcept ImportError:\n multiprocessing = None\n parallel_compiles = 0\n\n\nclass _FakePool(object):\n def map_async(self, func, args):\n try:\n from itertools import imap\n except ImportError:\n imap=map\n for _ in imap(func, args):\n pass\n\n def close(self):\n pass\n\n def terminate(self):\n pass\n\n def join(self):\n pass\n\n\ndef find_package_base(path):\n base_dir, package_path = os.path.split(path)\n while is_package_dir(base_dir):\n base_dir, parent = os.path.split(base_dir)\n package_path = '%s/%s' % (parent, package_path)\n return base_dir, package_path\n\ndef cython_compile(path_pattern, options):\n all_paths = map(os.path.abspath, extended_iglob(path_pattern))\n _cython_compile_files(all_paths, options)\n\ndef _cython_compile_files(all_paths, options):\n pool = None\n try:\n for path in all_paths:\n if options.build_inplace:\n base_dir = path\n while not os.path.isdir(base_dir) or is_package_dir(base_dir):\n base_dir = os.path.dirname(base_dir)\n else:\n base_dir = None\n\n if os.path.isdir(path):\n # recursively compiling a package\n paths = [os.path.join(path, '**', '*.{py,pyx}')]\n else:\n # assume it's a file(-like thing)\n paths = [path]\n\n ext_modules = cythonize(\n paths,\n nthreads=options.parallel,\n exclude_failures=options.keep_going,\n exclude=options.excludes,\n compiler_directives=options.directives,\n compile_time_env=options.compile_time_env,\n force=options.force,\n quiet=options.quiet,\n depfile=options.depfile,\n **options.options)\n\n if ext_modules and options.build:\n if len(ext_modules) > 1 and options.parallel > 1:\n if pool is None:\n try:\n pool = multiprocessing.Pool(options.parallel)\n except OSError:\n pool = _FakePool()\n pool.map_async(run_distutils, [\n (base_dir, [ext]) for ext in ext_modules])\n else:\n run_distutils((base_dir, ext_modules))\n except:\n if pool is not None:\n pool.terminate()\n raise\n else:\n if pool is not None:\n pool.close()\n pool.join()\n\n\ndef run_distutils(args):\n base_dir, ext_modules = args\n script_args = ['build_ext', '-i']\n cwd = os.getcwd()\n temp_dir = None\n try:\n if base_dir:\n os.chdir(base_dir)\n temp_dir = tempfile.mkdtemp(dir=base_dir)\n script_args.extend(['--build-temp', temp_dir])\n setup(\n script_name='setup.py',\n script_args=script_args,\n ext_modules=ext_modules,\n )\n finally:\n if base_dir:\n os.chdir(cwd)\n if temp_dir and os.path.isdir(temp_dir):\n shutil.rmtree(temp_dir)\n\n\ndef create_args_parser():\n from argparse import ArgumentParser, RawDescriptionHelpFormatter\n from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction\n\n parser = ArgumentParser(\n formatter_class=RawDescriptionHelpFormatter,\n epilog=\"\"\"\\\nEnvironment variables:\n CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless\n of modification times and changes.\n Environment variables accepted by setuptools are supported to configure the C compiler and build:\n https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options\"\"\"\n )\n\n parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',\n dest='directives', default={}, type=str,\n action=ParseDirectivesAction,\n help='set a compiler directive')\n parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',\n dest='compile_time_env', default={}, type=str,\n action=ParseCompileTimeEnvAction,\n help='set a compile time environment variable')\n parser.add_argument('-s', '--option', metavar='NAME=VALUE',\n dest='options', default={}, type=str,\n action=ParseOptionsAction,\n help='set a cythonize option')\n parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None,\n help='use Python 2 syntax mode by default')\n parser.add_argument('-3', dest='language_level', action='store_const', const=3,\n help='use Python 3 syntax mode by default')\n parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',\n help='use Python 3 syntax mode by default')\n parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',\n help='Produce a colorized HTML version of the source.')\n parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',\n help='Produce a colorized HTML version of the source '\n 'which includes entire generated C/C++-code.')\n parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes',\n action='append', default=[],\n help='exclude certain file patterns from the compilation')\n\n parser.add_argument('-b', '--build', dest='build', action='store_true', default=None,\n help='build extension modules using distutils')\n parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None,\n help='build extension modules in place using distutils (implies -b)')\n parser.add_argument('-j', '--parallel', dest='parallel', metavar='N',\n type=int, default=parallel_compiles,\n help=('run builds in N parallel jobs (default: %d)' %\n parallel_compiles or 1))\n parser.add_argument('-f', '--force', dest='force', action='store_true', default=None,\n help='force recompilation')\n parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None,\n help='be less verbose during compilation')\n\n parser.add_argument('--lenient', dest='lenient', action='store_true', default=None,\n help='increase Python compatibility by ignoring some compile time errors')\n parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None,\n help='compile as much as possible, ignore compilation failures')\n parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None,\n help='strip docstrings')\n parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources')\n parser.add_argument('sources', nargs='*')\n return parser\n\n\ndef parse_args_raw(parser, args):\n options, unknown = parser.parse_known_args(args)\n sources = options.sources\n # if positional arguments were interspersed\n # some of them are in unknown\n for option in unknown:\n if option.startswith('-'):\n parser.error(\"unknown option \"+option)\n else:\n sources.append(option)\n del options.sources\n return (options, sources)\n\n\ndef parse_args(args):\n parser = create_args_parser()\n options, args = parse_args_raw(parser, args)\n\n if not args:\n parser.error(\"no source files provided\")\n if options.build_inplace:\n options.build = True\n if multiprocessing is None:\n options.parallel = 0\n if options.language_level:\n assert options.language_level in (2, 3, '3str')\n options.options['language_level'] = options.language_level\n\n if options.lenient:\n # increase Python compatibility by ignoring compile time errors\n Options.error_on_unknown_names = False\n Options.error_on_uninitialized = False\n\n if options.annotate:\n Options.annotate = options.annotate\n\n if options.no_docstrings:\n Options.docstrings = False\n\n return options, args\n\n\ndef main(args=None):\n options, paths = parse_args(args)\n\n all_paths = []\n for path in paths:\n expanded_path = [os.path.abspath(p) for p in extended_iglob(path)]\n if not expanded_path:\n import sys\n print(\"{}: No such file or directory: '{}'\".format(sys.argv[0], path), file=sys.stderr)\n sys.exit(1)\n all_paths.extend(expanded_path)\n _cython_compile_files(all_paths, options)\n\n\nif __name__ == '__main__':\n main()\n", "path": "Cython/Build/Cythonize.py" } ]
[ { "content": "from __future__ import absolute_import, print_function\n\nimport os\nimport shutil\nimport tempfile\nfrom distutils.core import setup\n\nfrom .Dependencies import cythonize, extended_iglob\nfrom ..Utils import is_package_dir\nfrom ..Compiler import Options\n\ntry:\n import multiprocessing\n parallel_compiles = int(multiprocessing.cpu_count() * 1.5)\nexcept ImportError:\n multiprocessing = None\n parallel_compiles = 0\n\n\nclass _FakePool(object):\n def map_async(self, func, args):\n try:\n from itertools import imap\n except ImportError:\n imap=map\n for _ in imap(func, args):\n pass\n\n def close(self):\n pass\n\n def terminate(self):\n pass\n\n def join(self):\n pass\n\n\ndef find_package_base(path):\n base_dir, package_path = os.path.split(path)\n while is_package_dir(base_dir):\n base_dir, parent = os.path.split(base_dir)\n package_path = '%s/%s' % (parent, package_path)\n return base_dir, package_path\n\ndef cython_compile(path_pattern, options):\n all_paths = map(os.path.abspath, extended_iglob(path_pattern))\n _cython_compile_files(all_paths, options)\n\ndef _cython_compile_files(all_paths, options):\n pool = None\n try:\n for path in all_paths:\n if options.build_inplace:\n base_dir = path\n while not os.path.isdir(base_dir) or is_package_dir(base_dir):\n base_dir = os.path.dirname(base_dir)\n else:\n base_dir = None\n\n if os.path.isdir(path):\n # recursively compiling a package\n paths = [os.path.join(path, '**', '*.{py,pyx}')]\n else:\n # assume it's a file(-like thing)\n paths = [path]\n\n ext_modules = cythonize(\n paths,\n nthreads=options.parallel,\n exclude_failures=options.keep_going,\n exclude=options.excludes,\n compiler_directives=options.directives,\n compile_time_env=options.compile_time_env,\n force=options.force,\n quiet=options.quiet,\n depfile=options.depfile,\n **options.options)\n\n if ext_modules and options.build:\n if len(ext_modules) > 1 and options.parallel > 1:\n if pool is None:\n try:\n pool = multiprocessing.Pool(options.parallel)\n except OSError:\n pool = _FakePool()\n pool.map_async(run_distutils, [\n (base_dir, [ext]) for ext in ext_modules])\n else:\n run_distutils((base_dir, ext_modules))\n except:\n if pool is not None:\n pool.terminate()\n raise\n else:\n if pool is not None:\n pool.close()\n pool.join()\n\n\ndef run_distutils(args):\n base_dir, ext_modules = args\n script_args = ['build_ext', '-i']\n cwd = os.getcwd()\n temp_dir = None\n try:\n if base_dir:\n os.chdir(base_dir)\n temp_dir = tempfile.mkdtemp(dir=base_dir)\n script_args.extend(['--build-temp', temp_dir])\n setup(\n script_name='setup.py',\n script_args=script_args,\n ext_modules=ext_modules,\n )\n finally:\n if base_dir:\n os.chdir(cwd)\n if temp_dir and os.path.isdir(temp_dir):\n shutil.rmtree(temp_dir)\n\n\ndef create_args_parser():\n from argparse import ArgumentParser, RawDescriptionHelpFormatter\n from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction\n\n parser = ArgumentParser(\n formatter_class=RawDescriptionHelpFormatter,\n epilog=\"\"\"\\\nEnvironment variables:\n CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless\n of modification times and changes.\n Environment variables accepted by setuptools are supported to configure the C compiler and build:\n https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options\"\"\"\n )\n\n parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',\n dest='directives', default={}, type=str,\n action=ParseDirectivesAction,\n help='set a compiler directive')\n parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',\n dest='compile_time_env', default={}, type=str,\n action=ParseCompileTimeEnvAction,\n help='set a compile time environment variable')\n parser.add_argument('-s', '--option', metavar='NAME=VALUE',\n dest='options', default={}, type=str,\n action=ParseOptionsAction,\n help='set a cythonize option')\n parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None,\n help='use Python 2 syntax mode by default')\n parser.add_argument('-3', dest='language_level', action='store_const', const=3,\n help='use Python 3 syntax mode by default')\n parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',\n help='use Python 3 syntax mode by default')\n parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',\n help='Produce a colorized HTML version of the source.')\n parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',\n help='Produce a colorized HTML version of the source '\n 'which includes entire generated C/C++-code.')\n parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes',\n action='append', default=[],\n help='exclude certain file patterns from the compilation')\n\n parser.add_argument('-b', '--build', dest='build', action='store_true', default=None,\n help='build extension modules using distutils')\n parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None,\n help='build extension modules in place using distutils (implies -b)')\n parser.add_argument('-j', '--parallel', dest='parallel', metavar='N',\n type=int, default=parallel_compiles,\n help=('run builds in N parallel jobs (default: %d)' %\n parallel_compiles or 1))\n parser.add_argument('-f', '--force', dest='force', action='store_true', default=None,\n help='force recompilation')\n parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None,\n help='be less verbose during compilation')\n\n parser.add_argument('--lenient', dest='lenient', action='store_true', default=None,\n help='increase Python compatibility by ignoring some compile time errors')\n parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None,\n help='compile as much as possible, ignore compilation failures')\n parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None,\n help='strip docstrings')\n parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources')\n parser.add_argument('sources', nargs='*')\n return parser\n\n\ndef parse_args_raw(parser, args):\n options, unknown = parser.parse_known_args(args)\n sources = options.sources\n # if positional arguments were interspersed\n # some of them are in unknown\n for option in unknown:\n if option.startswith('-'):\n parser.error(\"unknown option \"+option)\n else:\n sources.append(option)\n del options.sources\n return (options, sources)\n\n\ndef parse_args(args):\n parser = create_args_parser()\n options, args = parse_args_raw(parser, args)\n\n if not args:\n parser.error(\"no source files provided\")\n if options.build_inplace:\n options.build = True\n if multiprocessing is None:\n options.parallel = 0\n if options.language_level:\n assert options.language_level in (2, 3, '3str')\n options.options['language_level'] = options.language_level\n\n if options.lenient:\n # increase Python compatibility by ignoring compile time errors\n Options.error_on_unknown_names = False\n Options.error_on_uninitialized = False\n\n if options.annotate:\n Options.annotate = options.annotate\n\n if options.no_docstrings:\n Options.docstrings = False\n\n return options, args\n\n\ndef main(args=None):\n options, paths = parse_args(args)\n\n all_paths = []\n for path in paths:\n expanded_path = [os.path.abspath(p) for p in extended_iglob(path)]\n if not expanded_path:\n import sys\n print(\"{}: No such file or directory: '{}'\".format(sys.argv[0], path), file=sys.stderr)\n sys.exit(1)\n all_paths.extend(expanded_path)\n _cython_compile_files(all_paths, options)\n\n\nif __name__ == '__main__':\n main()\n", "path": "Cython/Build/Cythonize.py" } ]
diff --git a/Cython/Build/Cythonize.py b/Cython/Build/Cythonize.py index 179c0406025..4acc7f05aac 100644 --- a/Cython/Build/Cythonize.py +++ b/Cython/Build/Cythonize.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - from __future__ import absolute_import, print_function import os
optuna__optuna-3342
Mention each tutorial page in API pages at least once <!-- Please write a clear and concise description of what content in https://optuna.readthedocs.io/ is an issue. --> [Optuna's tutorial](https://optuna.readthedocs.io/en/stable/tutorial/index.html) is a good source to understand Optuna's functionality with concrete examples. However, some tutorial pages might not be mentioned in corresponding Optuna's API pages. ## Description Please add a `note section` or `see also section` to mention the following tutorial pages. - ~[ ] [Lightweight, versatile, and platform agnostic architecture](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/001_first.html)~ - ~this is is a little bit conceptual page, so it might not be necessary to be linked.~ - [x] [Pythonic Search Space](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html) - [x] [Efficient Optimization Algorithms](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/003_efficient_optimization_algorithms.html) - [x] [Easy Parallelization](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/004_distributed.html) - [x] [Quick Visualization for Hyperparameter Optimization Analysis](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/005_visualization.html) - linked from https://optuna.readthedocs.io/en/stable/reference/visualization/index.html - [x] [Saving/Resuming Study with RDB Backend](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/001_rdb.html) - linked from https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html#optuna.study.create_study - [x] [Multi-objective Optimization with Optuna] (https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/002_multi_objective.html) https://github.com/optuna/optuna/pull/3339 - [x] [User Attributes](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/003_attributes.html) - [x] [Command-Line Interface](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/004_cli.html) - linked from https://optuna.readthedocs.io/en/stable/reference/index.html - [x] [User-Defined Sampler](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/005_user_defined_sampler.html) - linked from https://optuna.readthedocs.io/en/stable/reference/samplers.html - [x] [User-Defined Pruner](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/006_user_defined_pruner.html) - linked from https://optuna.readthedocs.io/en/stable/reference/pruners.html - [x] [Callback for Study.optimize](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/007_optuna_callback.html) - linked from `callbacks`'s section in https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize - [ ] [Specify Hyperparameters Manually](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/008_specify_params.html) - [x] [the first section](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) can be linked from `Study.enqueue_trial` - [ ] [the second section](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/008_specify_params.html#second-scenario-have-optuna-utilize-already-evaluated-hyperparameters) can be linked from `Study.add_trial` (see https://github.com/optuna/optuna/pull/3346) - [x] [Ask-and-Tell Interface](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/009_ask_and_tell.html) - [x] [Re-use the best values](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/010_reuse_best_trial.html) https://github.com/optuna/optuna/pull/3396
[ { "content": "\"\"\"\n.. _sampler:\n\nUser-Defined Sampler\n====================\n\nThanks to user-defined samplers, you can:\n\n- experiment your own sampling algorithms,\n- implement task-specific algorithms to refine the optimization performance, or\n- wrap other optimization libraries to integrate them into Optuna pipelines (e.g., :class:`~optuna.integration.SkoptSampler`).\n\nThis section describes the internal behavior of sampler classes and shows an example of implementing a user-defined sampler.\n\n\nOverview of Sampler\n-------------------\n\nA sampler has the responsibility to determine the parameter values to be evaluated in a trial.\nWhen a `suggest` API (e.g., :func:`~optuna.trial.Trial.suggest_float`) is called inside an objective function, the corresponding distribution object (e.g., :class:`~optuna.distributions.UniformDistribution`) is created internally. A sampler samples a parameter value from the distribution. The sampled value is returned to the caller of the `suggest` API and evaluated in the objective function.\n\nTo create a new sampler, you need to define a class that inherits :class:`~optuna.samplers.BaseSampler`.\nThe base class has three abstract methods;\n:meth:`~optuna.samplers.BaseSampler.infer_relative_search_space`,\n:meth:`~optuna.samplers.BaseSampler.sample_relative`, and\n:meth:`~optuna.samplers.BaseSampler.sample_independent`.\n\nAs the method names imply, Optuna supports two types of sampling: one is **relative sampling** that can consider the correlation of the parameters in a trial, and the other is **independent sampling** that samples each parameter independently.\n\nAt the beginning of a trial, :meth:`~optuna.samplers.BaseSampler.infer_relative_search_space` is called to provide the relative search space for the trial. Then, :meth:`~optuna.samplers.BaseSampler.sample_relative` is invoked to sample relative parameters from the search space. During the execution of the objective function, :meth:`~optuna.samplers.BaseSampler.sample_independent` is used to sample parameters that don't belong to the relative search space.\n\n.. note::\n Please refer to the document of :class:`~optuna.samplers.BaseSampler` for further details.\n\n\nAn Example: Implementing SimulatedAnnealingSampler\n--------------------------------------------------\n\nFor example, the following code defines a sampler based on\n`Simulated Annealing (SA) <https://en.wikipedia.org/wiki/Simulated_annealing>`_:\n\"\"\"\n\nimport numpy as np\nimport optuna\n\n\nclass SimulatedAnnealingSampler(optuna.samplers.BaseSampler):\n def __init__(self, temperature=100):\n self._rng = np.random.RandomState()\n self._temperature = temperature # Current temperature.\n self._current_trial = None # Current state.\n\n def sample_relative(self, study, trial, search_space):\n if search_space == {}:\n return {}\n\n # Simulated Annealing algorithm.\n # 1. Calculate transition probability.\n prev_trial = study.trials[-2]\n if self._current_trial is None or prev_trial.value <= self._current_trial.value:\n probability = 1.0\n else:\n probability = np.exp(\n (self._current_trial.value - prev_trial.value) / self._temperature\n )\n self._temperature *= 0.9 # Decrease temperature.\n\n # 2. Transit the current state if the previous result is accepted.\n if self._rng.uniform(0, 1) < probability:\n self._current_trial = prev_trial\n\n # 3. Sample parameters from the neighborhood of the current point.\n # The sampled parameters will be used during the next execution of\n # the objective function passed to the study.\n params = {}\n for param_name, param_distribution in search_space.items():\n if not isinstance(param_distribution, optuna.distributions.UniformDistribution):\n raise NotImplementedError(\"Only suggest_float() is supported\")\n\n current_value = self._current_trial.params[param_name]\n width = (param_distribution.high - param_distribution.low) * 0.1\n neighbor_low = max(current_value - width, param_distribution.low)\n neighbor_high = min(current_value + width, param_distribution.high)\n params[param_name] = self._rng.uniform(neighbor_low, neighbor_high)\n\n return params\n\n # The rest are unrelated to SA algorithm: boilerplate\n def infer_relative_search_space(self, study, trial):\n return optuna.samplers.intersection_search_space(study)\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n independent_sampler = optuna.samplers.RandomSampler()\n return independent_sampler.sample_independent(study, trial, param_name, param_distribution)\n\n\n###################################################################################################\n# .. note::\n# In favor of code simplicity, the above implementation doesn't support some features (e.g., maximization).\n# If you're interested in how to support those features, please see\n# `examples/samplers/simulated_annealing.py\n# <https://github.com/optuna/optuna-examples/blob/main/samplers/simulated_annealing_sampler.py>`_.\n#\n#\n# You can use ``SimulatedAnnealingSampler`` in the same way as built-in samplers as follows:\n\n\ndef objective(trial):\n x = trial.suggest_float(\"x\", -10, 10)\n y = trial.suggest_float(\"y\", -5, 5)\n return x**2 + y\n\n\nsampler = SimulatedAnnealingSampler()\nstudy = optuna.create_study(sampler=sampler)\nstudy.optimize(objective, n_trials=100)\n\nbest_trial = study.best_trial\nprint(\"Best value: \", best_trial.value)\nprint(\"Parameters that achieve the best value: \", best_trial.params)\n\n\n###################################################################################################\n# In this optimization, the values of ``x`` and ``y`` parameters are sampled by using\n# ``SimulatedAnnealingSampler.sample_relative`` method.\n#\n# .. note::\n# Strictly speaking, in the first trial,\n# ``SimulatedAnnealingSampler.sample_independent`` method is used to sample parameter values.\n# Because :func:`~optuna.samplers.intersection_search_space` used in\n# ``SimulatedAnnealingSampler.infer_relative_search_space`` cannot infer the search space\n# if there are no complete trials.\n", "path": "tutorial/20_recipes/005_user_defined_sampler.py" } ]
[ { "content": "\"\"\"\n.. _user_defined_sampler:\n\nUser-Defined Sampler\n====================\n\nThanks to user-defined samplers, you can:\n\n- experiment your own sampling algorithms,\n- implement task-specific algorithms to refine the optimization performance, or\n- wrap other optimization libraries to integrate them into Optuna pipelines (e.g., :class:`~optuna.integration.SkoptSampler`).\n\nThis section describes the internal behavior of sampler classes and shows an example of implementing a user-defined sampler.\n\n\nOverview of Sampler\n-------------------\n\nA sampler has the responsibility to determine the parameter values to be evaluated in a trial.\nWhen a `suggest` API (e.g., :func:`~optuna.trial.Trial.suggest_float`) is called inside an objective function, the corresponding distribution object (e.g., :class:`~optuna.distributions.UniformDistribution`) is created internally. A sampler samples a parameter value from the distribution. The sampled value is returned to the caller of the `suggest` API and evaluated in the objective function.\n\nTo create a new sampler, you need to define a class that inherits :class:`~optuna.samplers.BaseSampler`.\nThe base class has three abstract methods;\n:meth:`~optuna.samplers.BaseSampler.infer_relative_search_space`,\n:meth:`~optuna.samplers.BaseSampler.sample_relative`, and\n:meth:`~optuna.samplers.BaseSampler.sample_independent`.\n\nAs the method names imply, Optuna supports two types of sampling: one is **relative sampling** that can consider the correlation of the parameters in a trial, and the other is **independent sampling** that samples each parameter independently.\n\nAt the beginning of a trial, :meth:`~optuna.samplers.BaseSampler.infer_relative_search_space` is called to provide the relative search space for the trial. Then, :meth:`~optuna.samplers.BaseSampler.sample_relative` is invoked to sample relative parameters from the search space. During the execution of the objective function, :meth:`~optuna.samplers.BaseSampler.sample_independent` is used to sample parameters that don't belong to the relative search space.\n\n.. note::\n Please refer to the document of :class:`~optuna.samplers.BaseSampler` for further details.\n\n\nAn Example: Implementing SimulatedAnnealingSampler\n--------------------------------------------------\n\nFor example, the following code defines a sampler based on\n`Simulated Annealing (SA) <https://en.wikipedia.org/wiki/Simulated_annealing>`_:\n\"\"\"\n\nimport numpy as np\nimport optuna\n\n\nclass SimulatedAnnealingSampler(optuna.samplers.BaseSampler):\n def __init__(self, temperature=100):\n self._rng = np.random.RandomState()\n self._temperature = temperature # Current temperature.\n self._current_trial = None # Current state.\n\n def sample_relative(self, study, trial, search_space):\n if search_space == {}:\n return {}\n\n # Simulated Annealing algorithm.\n # 1. Calculate transition probability.\n prev_trial = study.trials[-2]\n if self._current_trial is None or prev_trial.value <= self._current_trial.value:\n probability = 1.0\n else:\n probability = np.exp(\n (self._current_trial.value - prev_trial.value) / self._temperature\n )\n self._temperature *= 0.9 # Decrease temperature.\n\n # 2. Transit the current state if the previous result is accepted.\n if self._rng.uniform(0, 1) < probability:\n self._current_trial = prev_trial\n\n # 3. Sample parameters from the neighborhood of the current point.\n # The sampled parameters will be used during the next execution of\n # the objective function passed to the study.\n params = {}\n for param_name, param_distribution in search_space.items():\n if not isinstance(param_distribution, optuna.distributions.UniformDistribution):\n raise NotImplementedError(\"Only suggest_float() is supported\")\n\n current_value = self._current_trial.params[param_name]\n width = (param_distribution.high - param_distribution.low) * 0.1\n neighbor_low = max(current_value - width, param_distribution.low)\n neighbor_high = min(current_value + width, param_distribution.high)\n params[param_name] = self._rng.uniform(neighbor_low, neighbor_high)\n\n return params\n\n # The rest are unrelated to SA algorithm: boilerplate\n def infer_relative_search_space(self, study, trial):\n return optuna.samplers.intersection_search_space(study)\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n independent_sampler = optuna.samplers.RandomSampler()\n return independent_sampler.sample_independent(study, trial, param_name, param_distribution)\n\n\n###################################################################################################\n# .. note::\n# In favor of code simplicity, the above implementation doesn't support some features (e.g., maximization).\n# If you're interested in how to support those features, please see\n# `examples/samplers/simulated_annealing.py\n# <https://github.com/optuna/optuna-examples/blob/main/samplers/simulated_annealing_sampler.py>`_.\n#\n#\n# You can use ``SimulatedAnnealingSampler`` in the same way as built-in samplers as follows:\n\n\ndef objective(trial):\n x = trial.suggest_float(\"x\", -10, 10)\n y = trial.suggest_float(\"y\", -5, 5)\n return x**2 + y\n\n\nsampler = SimulatedAnnealingSampler()\nstudy = optuna.create_study(sampler=sampler)\nstudy.optimize(objective, n_trials=100)\n\nbest_trial = study.best_trial\nprint(\"Best value: \", best_trial.value)\nprint(\"Parameters that achieve the best value: \", best_trial.params)\n\n\n###################################################################################################\n# In this optimization, the values of ``x`` and ``y`` parameters are sampled by using\n# ``SimulatedAnnealingSampler.sample_relative`` method.\n#\n# .. note::\n# Strictly speaking, in the first trial,\n# ``SimulatedAnnealingSampler.sample_independent`` method is used to sample parameter values.\n# Because :func:`~optuna.samplers.intersection_search_space` used in\n# ``SimulatedAnnealingSampler.infer_relative_search_space`` cannot infer the search space\n# if there are no complete trials.\n", "path": "tutorial/20_recipes/005_user_defined_sampler.py" } ]
diff --git a/docs/source/reference/pruners.rst b/docs/source/reference/pruners.rst index 70ad0f58c2..225c41d6c9 100644 --- a/docs/source/reference/pruners.rst +++ b/docs/source/reference/pruners.rst @@ -8,6 +8,9 @@ The :mod:`~optuna.pruners` module defines a :class:`~optuna.pruners.BasePruner` .. seealso:: :ref:`pruning` tutorial explains the concept of the pruner classes and a minimal example. +.. seealso:: + :ref:`user_defined_pruner` tutorial could be helpful if you want to implement your own pruner classes. + .. autosummary:: :toctree: generated/ :nosignatures: diff --git a/docs/source/reference/samplers/index.rst b/docs/source/reference/samplers/index.rst index e6350342d1..d890f16d97 100644 --- a/docs/source/reference/samplers/index.rst +++ b/docs/source/reference/samplers/index.rst @@ -8,6 +8,9 @@ The :mod:`~optuna.samplers` module defines a base class for parameter sampling a .. seealso:: :ref:`pruning` tutorial explains the overview of the sampler classes. +.. seealso:: + :ref:`user_defined_sampler` tutorial could be helpful if you want to implement your own sampler classes. + .. autosummary:: :toctree: generated/ :nosignatures: diff --git a/tutorial/20_recipes/005_user_defined_sampler.py b/tutorial/20_recipes/005_user_defined_sampler.py index 63b84c6bfa..59dd752834 100644 --- a/tutorial/20_recipes/005_user_defined_sampler.py +++ b/tutorial/20_recipes/005_user_defined_sampler.py @@ -1,5 +1,5 @@ """ -.. _sampler: +.. _user_defined_sampler: User-Defined Sampler ====================
streamlit__streamlit-2342
Clicking on a text field in the sidebar (on mobile) causes the sidebar to close. # Summary When the window is too narrow, clicking on a text input in the sidebar causes the sidebar to disappear, making it impossible to type in text. [[video example](https://drive.google.com/file/d/1KetCBECPsg3UAiESZCRdm6W9M_zIrNkc/view?usp=sharing)] # Steps to reproduce 1. Put a text input in the sidebar. 2. Make the Streamlit app window narrow. 3. Click the text input in the sidebar. ## Expected behavior: Ideally, the sidebar would stay open and the text input box would have focus and you coudl type something in. In fact, this _does_ happen when the window is a bit wider. [[video example](https://drive.google.com/file/d/1fObxQWIjkL_5VBJY_niltG489Ki8tRB8/view?usp=sharing)] ## Actual behavior: Clicking on a text input in the sidebar causes the sidebar to disappear, making it impossible to type in text. [[video example](https://drive.google.com/file/d/1KetCBECPsg3UAiESZCRdm6W9M_zIrNkc/view?usp=sharing)] ## Is this a regression? Unkown # Debug info - Streamlit version: `Streamlit, version 0.68.0` - Python version: `Python 3.8.5` - Using Conda? PipEnv? PyEnv? Pex? `pipenv, version 2020.8.13` - OS version: ``` Distributor ID: Ubuntu Description: Ubuntu 20.04.1 LTS Release: 20.04 Codename: focal ``` - Browser version: `Safari on iPadOS 14`
[ { "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\nfrom datetime import datetime\nfrom datetime import date\n\nw1 = st.sidebar.date_input(\"Label 1\", date(1970, 1, 1))\nst.write(\"Value 1:\", w1)\n\nw2 = st.sidebar.date_input(\"Label 2\", datetime(2019, 7, 6, 21, 15))\nst.write(\"Value 2:\", w2)\n\nx = st.sidebar.text(\"overwrite me\")\nx.text(\"overwritten\")\n", "path": "e2e/scripts/st_sidebar.py" } ]
[ { "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\nfrom datetime import datetime\nfrom datetime import date\n\nw1 = st.sidebar.date_input(\"Label 1\", date(1970, 1, 1))\nst.write(\"Value 1:\", w1)\n\nw2 = st.sidebar.date_input(\"Label 2\", datetime(2019, 7, 6, 21, 15))\nst.write(\"Value 2:\", w2)\n\nx = st.sidebar.text(\"overwrite me\")\nx.text(\"overwritten\")\n\ny = st.sidebar.text_input(\"type here\")\n", "path": "e2e/scripts/st_sidebar.py" } ]
diff --git a/e2e/scripts/st_sidebar.py b/e2e/scripts/st_sidebar.py index 42e1dc2ab3c8..9b44c2a1865f 100644 --- a/e2e/scripts/st_sidebar.py +++ b/e2e/scripts/st_sidebar.py @@ -24,3 +24,5 @@ x = st.sidebar.text("overwrite me") x.text("overwritten") + +y = st.sidebar.text_input("type here") diff --git a/e2e/specs/st_sidebar.spec.js b/e2e/specs/st_sidebar.spec.js index 41cfc474422f..fb0ce07a1768 100644 --- a/e2e/specs/st_sidebar.spec.js +++ b/e2e/specs/st_sidebar.spec.js @@ -41,4 +41,36 @@ describe("st.sidebar", () => { it("handles overwriting elements", () => { cy.get("[data-testid='stSidebar'] .stText").contains("overwritten"); }); + + it("collapses the sidebar on mobile resize", () => { + cy.viewport(800, 400); + cy.get("[data-testid='stSidebar']").should( + "have.attr", + "aria-expanded", + "true" + ); + + cy.viewport(400, 800); + cy.get("[data-testid='stSidebar']").should( + "have.attr", + "aria-expanded", + "false" + ); + }); + + it("does not collapse on text input on mobile", () => { + cy.viewport(400, 800); + // Expand the sidebar on mobile, with a manual click + cy.get("[data-testid='stSidebar'] button") + .eq(1) + .click(); + + cy.get("[data-testid='stSidebar'] .stTextInput input").click(); + + cy.get("[data-testid='stSidebar']").should( + "have.attr", + "aria-expanded", + "true" + ); + }); }); diff --git a/frontend/src/components/core/Sidebar/Sidebar.tsx b/frontend/src/components/core/Sidebar/Sidebar.tsx index 50957b8eb299..9bf26f0c00dc 100644 --- a/frontend/src/components/core/Sidebar/Sidebar.tsx +++ b/frontend/src/components/core/Sidebar/Sidebar.tsx @@ -37,6 +37,7 @@ export interface SidebarProps { interface State { collapsedSidebar: boolean + lastInnerWidth: number } class Sidebar extends PureComponent<SidebarProps, State> { @@ -60,6 +61,7 @@ class Sidebar extends PureComponent<SidebarProps, State> { ) this.state = { collapsedSidebar: Sidebar.shouldCollapse(props, this.mediumBreakpointPx), + lastInnerWidth: window ? window.innerWidth : Infinity, } } @@ -126,8 +128,14 @@ class Sidebar extends PureComponent<SidebarProps, State> { const { innerWidth } = window - if (innerWidth <= this.mediumBreakpointPx) + // Collapse the sidebar if the window was narrowed and is now mobile-sized + if ( + innerWidth < this.state.lastInnerWidth && + innerWidth <= this.mediumBreakpointPx + ) { this.setState({ collapsedSidebar: true }) + } + this.setState({ lastInnerWidth: innerWidth }) return true }
pytorch__text-87
Length of iterator fails in Python 2 The division `len(dataset) / batch_size` will be cast to int in python2, so that `math.ceil` doesn't really work when `len(dataset)` is not a multiple of batch size.
[ { "content": "import math\nimport random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\nfrom .batch import Batch\nfrom .dataset import Dataset\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n\n\nclass Iterator(object):\n \"\"\"Defines an iterator that loads batches of data from a Dataset.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n batch_size_fn: Function of three arguments (new example to add, current\n count of examples in the batch, and current effective batch size)\n that returns the new effective batch size resulting from adding\n that example to a batch. This is useful for dynamic batching, where\n this function would add to the current effective batch size the\n number of tokens in the new example.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n repeat=None, shuffle=None, sort=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n self.random_shuffler = RandomShuffler()\n\n # For state loading/saving only\n self._iterations_this_epoch = 0\n self._random_state_this_epoch = None\n self._restored_from_state = False\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n \"\"\"Create Iterator objects for multiple splits of a dataset.\n\n Arguments:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.\n \"\"\"\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n if self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n elif self.shuffle:\n xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n \"\"\"Set up the batch generator for a new epoch.\"\"\"\n\n if self._restored_from_state:\n self.random_shuffler.random_state = self._random_state_this_epoch\n else:\n self._random_state_this_epoch = self.random_shuffler.random_state\n\n self.create_batches()\n\n if self._restored_from_state:\n self._restored_from_state = False\n else:\n self._iterations_this_epoch = 0\n\n if not self.repeat:\n self.iterations = 0\n\n def create_batches(self):\n self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\n def state_dict(self):\n return {\n \"iterations\": self.iterations,\n \"iterations_this_epoch\": self._iterations_this_epoch,\n \"random_state_this_epoch\": self._random_state_this_epoch}\n\n def load_state_dict(self, state_dict):\n self.iterations = state_dict[\"iterations\"]\n self._iterations_this_epoch = state_dict[\"iterations_this_epoch\"]\n self._random_state_this_epoch = state_dict[\"random_state_this_epoch\"]\n self._restored_from_state = True\n\n\nclass BPTTIterator(Iterator):\n \"\"\"Defines an iterator for language modeling tasks that use BPTT.\n\n Provides contiguous streams of examples together with targets that are\n one timestep further forward, for language modeling training with\n backpropagation through time (BPTT). Expects a Dataset with a single\n example and a single field called 'text' and produces Batches with text and\n target attributes.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n bptt_len: Length of sequences for backpropagation through time.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil(len(self.dataset[0].text) /\n (self.batch_size * self.bptt_len))\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - 1 - i)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\nclass BucketIterator(Iterator):\n \"\"\"Defines an iterator that batches examples of similar lengths together.\n\n Minimizes amount of padding needed while producing freshly shuffled\n batches for each new epoch. See pool for the bucketing procedure used.\n \"\"\"\n\n def create_batches(self):\n if self.sort:\n self.batches = batch(self.data(), self.batch_size,\n self.batch_size_fn)\n else:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key, self.batch_size_fn,\n random_shuffler=self.random_shuffler)\n\n\ndef batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)\n if minibatch:\n yield minibatch\n\n\ndef pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,\n random_shuffler=None):\n \"\"\"Sort within buckets, then batch, then shuffle batches.\n\n Partitions data into chunks of size 100*batch_size, sorts examples within\n each chunk using sort_key, then batch these examples and shuffle the\n batches.\n \"\"\"\n if random_shuffler is None:\n random_shuffler = random.shuffle\n for p in batch(data, batch_size * 100, batch_size_fn):\n p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n", "path": "torchtext/data/iterator.py" } ]
[ { "content": "from __future__ import division\n\nimport math\nimport random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\nfrom .batch import Batch\nfrom .dataset import Dataset\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n\n\nclass Iterator(object):\n \"\"\"Defines an iterator that loads batches of data from a Dataset.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n batch_size_fn: Function of three arguments (new example to add, current\n count of examples in the batch, and current effective batch size)\n that returns the new effective batch size resulting from adding\n that example to a batch. This is useful for dynamic batching, where\n this function would add to the current effective batch size the\n number of tokens in the new example.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n repeat=None, shuffle=None, sort=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n self.random_shuffler = RandomShuffler()\n\n # For state loading/saving only\n self._iterations_this_epoch = 0\n self._random_state_this_epoch = None\n self._restored_from_state = False\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n \"\"\"Create Iterator objects for multiple splits of a dataset.\n\n Arguments:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.\n \"\"\"\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n if self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n elif self.shuffle:\n xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n \"\"\"Set up the batch generator for a new epoch.\"\"\"\n\n if self._restored_from_state:\n self.random_shuffler.random_state = self._random_state_this_epoch\n else:\n self._random_state_this_epoch = self.random_shuffler.random_state\n\n self.create_batches()\n\n if self._restored_from_state:\n self._restored_from_state = False\n else:\n self._iterations_this_epoch = 0\n\n if not self.repeat:\n self.iterations = 0\n\n def create_batches(self):\n self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\n def state_dict(self):\n return {\n \"iterations\": self.iterations,\n \"iterations_this_epoch\": self._iterations_this_epoch,\n \"random_state_this_epoch\": self._random_state_this_epoch}\n\n def load_state_dict(self, state_dict):\n self.iterations = state_dict[\"iterations\"]\n self._iterations_this_epoch = state_dict[\"iterations_this_epoch\"]\n self._random_state_this_epoch = state_dict[\"random_state_this_epoch\"]\n self._restored_from_state = True\n\n\nclass BPTTIterator(Iterator):\n \"\"\"Defines an iterator for language modeling tasks that use BPTT.\n\n Provides contiguous streams of examples together with targets that are\n one timestep further forward, for language modeling training with\n backpropagation through time (BPTT). Expects a Dataset with a single\n example and a single field called 'text' and produces Batches with text and\n target attributes.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n bptt_len: Length of sequences for backpropagation through time.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil(len(self.dataset[0].text) /\n (self.batch_size * self.bptt_len))\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - 1 - i)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\nclass BucketIterator(Iterator):\n \"\"\"Defines an iterator that batches examples of similar lengths together.\n\n Minimizes amount of padding needed while producing freshly shuffled\n batches for each new epoch. See pool for the bucketing procedure used.\n \"\"\"\n\n def create_batches(self):\n if self.sort:\n self.batches = batch(self.data(), self.batch_size,\n self.batch_size_fn)\n else:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key, self.batch_size_fn,\n random_shuffler=self.random_shuffler)\n\n\ndef batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)\n if minibatch:\n yield minibatch\n\n\ndef pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,\n random_shuffler=None):\n \"\"\"Sort within buckets, then batch, then shuffle batches.\n\n Partitions data into chunks of size 100*batch_size, sorts examples within\n each chunk using sort_key, then batch these examples and shuffle the\n batches.\n \"\"\"\n if random_shuffler is None:\n random_shuffler = random.shuffle\n for p in batch(data, batch_size * 100, batch_size_fn):\n p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n", "path": "torchtext/data/iterator.py" } ]
diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py index 0970ac1335..08a99e21a4 100644 --- a/torchtext/data/iterator.py +++ b/torchtext/data/iterator.py @@ -1,3 +1,5 @@ +from __future__ import division + import math import random from contextlib import contextmanager
pandas-dev__pandas-28734
Error while installing dependencies While running: ``` python3 -m venv pandas-venv source pandas-venv/bin/activate cd pandas-MyUserName python -m pip install -r requirements-dev.txt ``` I get an error on line 20: > ERROR: Could not find a version that satisfies the requirement dask-core (from -r requirements-dev.txt (line 20)) (from versions: none) ERROR: No matching distribution found for dask-core (from -r requirements-dev.txt (line 20)) the output of ```uname -a``` is > Linux null 5.3.1-arch1-1-ARCH #1 SMP PREEMPT Sat Sep 21 11:33:49 UTC 2019 x86_64 GNU/Linux Error output: ![Error](https://user-images.githubusercontent.com/50263213/65962811-275f5d80-e462-11e9-8054-9b241e8811b6.png) ``` (pandas-venv) [bummy@null pandas-MomIsBestFriend]$ python -m pip install -r requirements-dev.txt Collecting numpy>=1.15 (from -r requirements-dev.txt (line 1)) Downloading https://files.pythonhosted.org/packages/ba/e0/46e2f0540370f2661b044647fa447fef2ecbcc8f7cdb4329ca2feb03fb23/numpy-1.17.2-cp37-cp37m-manylinux1_x86_64.whl (20.3MB) |████████████████████████████████| 20.3MB 4.1MB/s Collecting python-dateutil>=2.6.1 (from -r requirements-dev.txt (line 2)) Downloading https://files.pythonhosted.org/packages/41/17/c62faccbfbd163c7f57f3844689e3a78bae1f403648a6afb1d0866d87fbb/python_dateutil-2.8.0-py2.py3-none-any.whl (226kB) |████████████████████████████████| 235kB 3.9MB/s Collecting pytz (from -r requirements-dev.txt (line 3)) Downloading https://files.pythonhosted.org/packages/87/76/46d697698a143e05f77bec5a526bf4e56a0be61d63425b68f4ba553b51f2/pytz-2019.2-py2.py3-none-any.whl (508kB) |████████████████████████████████| 512kB 4.1MB/s Collecting asv (from -r requirements-dev.txt (line 4)) Downloading https://files.pythonhosted.org/packages/6e/94/4521cc0183a5656de9470452ddd2b6170a2d04ba9b18b84c597db09b8b0d/asv-0.4.1.tar.gz (470kB) |████████████████████████████████| 471kB 2.0MB/s Collecting cython>=0.29.13 (from -r requirements-dev.txt (line 5)) Downloading https://files.pythonhosted.org/packages/f1/d3/03a01bcf424eb86d3e9d818e2082ced2d512001af89183fca6f550c32bc2/Cython-0.29.13-cp37-cp37m-manylinux1_x86_64.whl (2.1MB) |████████████████████████████████| 2.1MB 4.4MB/s Collecting black (from -r requirements-dev.txt (line 6)) Downloading https://files.pythonhosted.org/packages/30/62/cf549544a5fe990bbaeca21e9c419501b2de7a701ab0afb377bc81676600/black-19.3b0-py36-none-any.whl (89kB) |████████████████████████████████| 92kB 6.2MB/s Collecting cpplint (from -r requirements-dev.txt (line 7)) Downloading https://files.pythonhosted.org/packages/30/9f/a44a503d457ebdb78298a1cb4dad99c1e506b901b300829fc28f3b3ddd6a/cpplint-1.4.4-py3-none-any.whl (73kB) |████████████████████████████████| 81kB 5.2MB/s Collecting flake8 (from -r requirements-dev.txt (line 8)) Downloading https://files.pythonhosted.org/packages/26/de/3f815a99d86eb10464ea7bd6059c0172c7ca97d4bdcfca41051b388a653b/flake8-3.7.8-py2.py3-none-any.whl (70kB) |████████████████████████████████| 71kB 6.5MB/s Collecting flake8-comprehensions (from -r requirements-dev.txt (line 9)) Downloading https://files.pythonhosted.org/packages/1e/5d/8e71c58199e70ee5e102212e4a6e8cd9ac6da004b03c1461c883cdbc3f83/flake8_comprehensions-2.2.0-py3-none-any.whl Collecting flake8-rst<=0.7.0,>=0.6.0 (from -r requirements-dev.txt (line 10)) Downloading https://files.pythonhosted.org/packages/81/50/96c9207354feae7ff27dcbd5e27585bac55a1a7b539f6b15e1fced1904a8/flake8_rst-0.7.0-py3-none-any.whl Collecting isort (from -r requirements-dev.txt (line 11)) Downloading https://files.pythonhosted.org/packages/e5/b0/c121fd1fa3419ea9bfd55c7f9c4fedfec5143208d8c7ad3ce3db6c623c21/isort-4.3.21-py2.py3-none-any.whl (42kB) |████████████████████████████████| 51kB 10.0MB/s Collecting mypy (from -r requirements-dev.txt (line 12)) Downloading https://files.pythonhosted.org/packages/12/b0/b89484a61af650b731284aa20a3d0c268645ab28ffdeed41beab6a7ed640/mypy-0.730-cp37-cp37m-manylinux1_x86_64.whl (22.8MB) |████████████████████████████████| 22.8MB 2.1MB/s Collecting pycodestyle (from -r requirements-dev.txt (line 13)) Downloading https://files.pythonhosted.org/packages/0e/0c/04a353e104d2f324f8ee5f4b32012618c1c86dd79e52a433b64fceed511b/pycodestyle-2.5.0-py2.py3-none-any.whl (51kB) |████████████████████████████████| 51kB 3.6MB/s Collecting gitpython (from -r requirements-dev.txt (line 14)) Downloading https://files.pythonhosted.org/packages/8e/c7/70bd352e8a561a9b6d1cde9aa313b9d7c871b0c94c3821f44c01f3187e1d/GitPython-3.0.2-py3-none-any.whl (453kB) |████████████████████████████████| 460kB 5.3MB/s Collecting sphinx==1.8.5 (from -r requirements-dev.txt (line 15)) Downloading https://files.pythonhosted.org/packages/7d/66/a4af242b4348b729b9d46ce5db23943ce9bca7da9bbe2ece60dc27f26420/Sphinx-1.8.5-py2.py3-none-any.whl (3.1MB) |████████████████████████████████| 3.1MB 2.7MB/s Collecting numpydoc>=0.9.0 (from -r requirements-dev.txt (line 16)) Downloading https://files.pythonhosted.org/packages/6a/f3/7cfe4c616e4b9fe05540256cc9c6661c052c8a4cec2915732793b36e1843/numpydoc-0.9.1.tar.gz Collecting nbconvert>=5.4.1 (from -r requirements-dev.txt (line 17)) Downloading https://files.pythonhosted.org/packages/f9/df/4505c0a7fea624cac461d0f41051f33456ae656753f65cee8c2f43121cb2/nbconvert-5.6.0-py2.py3-none-any.whl (453kB) |████████████████████████████████| 460kB 5.5MB/s Collecting nbsphinx (from -r requirements-dev.txt (line 18)) Downloading https://files.pythonhosted.org/packages/39/1f/c912f2f95d53d818dc76867f950883ae8a92849b7bef12a783106143cf08/nbsphinx-0.4.3-py2.py3-none-any.whl Collecting pandoc (from -r requirements-dev.txt (line 19)) Downloading https://files.pythonhosted.org/packages/49/b1/d2d4b30ee81ea5cb7aee5ba3591752a637fdc49d0a42fa9683874b60b9fb/pandoc-1.0.2.tar.gz (488kB) |████████████████████████████████| 491kB 4.4MB/s Collecting dask-core (from -r requirements-dev.txt (line 20)) ERROR: Could not find a version that satisfies the requirement dask-core (from -r requirements-dev.txt (line 20)) (from versions: none) ERROR: No matching distribution found for dask-core (from -r requirements-dev.txt (line 20)) (pandas-venv) [bummy@null pandas-MomIsBestFriend]$ ```
[ { "content": "#!/usr/bin/env python\n\"\"\"\nConvert the conda environment.yml to the pip requirements-dev.txt,\nor check that they have the same packages (for the CI)\n\nUsage:\n\n Generate `requirements-dev.txt`\n $ ./conda_to_pip\n\n Compare and fail (exit status != 0) if `requirements-dev.txt` has not been\n generated with this script:\n $ ./conda_to_pip --compare\n\"\"\"\nimport argparse\nimport os\nimport re\nimport sys\n\nimport yaml\n\nEXCLUDE = {\"python=3\"}\nRENAME = {\"pytables\": \"tables\", \"pyqt\": \"pyqt5\", \"dask-core\": \"dask\"}\n\n\ndef conda_package_to_pip(package):\n \"\"\"\n Convert a conda package to its pip equivalent.\n\n In most cases they are the same, those are the exceptions:\n - Packages that should be excluded (in `EXCLUDE`)\n - Packages that should be renamed (in `RENAME`)\n - A package requiring a specific version, in conda is defined with a single\n equal (e.g. ``pandas=1.0``) and in pip with two (e.g. ``pandas==1.0``)\n \"\"\"\n if package in EXCLUDE:\n return\n\n package = re.sub(\"(?<=[^<>])=\", \"==\", package).strip()\n for compare in (\"<=\", \">=\", \"==\"):\n if compare not in package:\n continue\n\n pkg, version = package.split(compare)\n\n if pkg in RENAME:\n return \"\".join((RENAME[pkg], compare, version))\n\n break\n\n return package\n\n\ndef main(conda_fname, pip_fname, compare=False):\n \"\"\"\n Generate the pip dependencies file from the conda file, or compare that\n they are synchronized (``compare=True``).\n\n Parameters\n ----------\n conda_fname : str\n Path to the conda file with dependencies (e.g. `environment.yml`).\n pip_fname : str\n Path to the pip file with dependencies (e.g. `requirements-dev.txt`).\n compare : bool, default False\n Whether to generate the pip file (``False``) or to compare if the\n pip file has been generated with this script and the last version\n of the conda file (``True``).\n\n Returns\n -------\n bool\n True if the comparison fails, False otherwise\n \"\"\"\n with open(conda_fname) as conda_fd:\n deps = yaml.safe_load(conda_fd)[\"dependencies\"]\n\n pip_deps = []\n for dep in deps:\n if isinstance(dep, str):\n conda_dep = conda_package_to_pip(dep)\n if conda_dep:\n pip_deps.append(conda_dep)\n elif isinstance(dep, dict) and len(dep) == 1 and \"pip\" in dep:\n pip_deps += dep[\"pip\"]\n else:\n raise ValueError(\"Unexpected dependency {}\".format(dep))\n\n pip_content = \"\\n\".join(pip_deps)\n\n if compare:\n with open(pip_fname) as pip_fd:\n return pip_content != pip_fd.read()\n else:\n with open(pip_fname, \"w\") as pip_fd:\n pip_fd.write(pip_content)\n return False\n\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser(\n description=\"convert (or compare) conda file to pip\"\n )\n argparser.add_argument(\n \"--compare\",\n action=\"store_true\",\n help=\"compare whether the two files are equivalent\",\n )\n argparser.add_argument(\n \"--azure\", action=\"store_true\", help=\"show the output in azure-pipelines format\"\n )\n args = argparser.parse_args()\n\n repo_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))\n res = main(\n os.path.join(repo_path, \"environment.yml\"),\n os.path.join(repo_path, \"requirements-dev.txt\"),\n compare=args.compare,\n )\n if res:\n msg = (\n \"`requirements-dev.txt` has to be generated with `{}` after \"\n \"`environment.yml` is modified.\\n\".format(sys.argv[0])\n )\n if args.azure:\n msg = (\n \"##vso[task.logissue type=error;\"\n \"sourcepath=requirements-dev.txt]{}\".format(msg)\n )\n sys.stderr.write(msg)\n sys.exit(res)\n", "path": "scripts/generate_pip_deps_from_conda.py" } ]
[ { "content": "#!/usr/bin/env python\n\"\"\"\nConvert the conda environment.yml to the pip requirements-dev.txt,\nor check that they have the same packages (for the CI)\n\nUsage:\n\n Generate `requirements-dev.txt`\n $ ./conda_to_pip\n\n Compare and fail (exit status != 0) if `requirements-dev.txt` has not been\n generated with this script:\n $ ./conda_to_pip --compare\n\"\"\"\nimport argparse\nimport os\nimport re\nimport sys\n\nimport yaml\n\nEXCLUDE = {\"python=3\"}\nRENAME = {\"pytables\": \"tables\", \"pyqt\": \"pyqt5\", \"dask-core\": \"dask\"}\n\n\ndef conda_package_to_pip(package):\n \"\"\"\n Convert a conda package to its pip equivalent.\n\n In most cases they are the same, those are the exceptions:\n - Packages that should be excluded (in `EXCLUDE`)\n - Packages that should be renamed (in `RENAME`)\n - A package requiring a specific version, in conda is defined with a single\n equal (e.g. ``pandas=1.0``) and in pip with two (e.g. ``pandas==1.0``)\n \"\"\"\n if package in EXCLUDE:\n return\n\n package = re.sub(\"(?<=[^<>])=\", \"==\", package).strip()\n for compare in (\"<=\", \">=\", \"==\"):\n if compare not in package:\n continue\n\n pkg, version = package.split(compare)\n\n if pkg in RENAME:\n return \"\".join((RENAME[pkg], compare, version))\n\n break\n\n if package in RENAME:\n return RENAME[package]\n\n return package\n\n\ndef main(conda_fname, pip_fname, compare=False):\n \"\"\"\n Generate the pip dependencies file from the conda file, or compare that\n they are synchronized (``compare=True``).\n\n Parameters\n ----------\n conda_fname : str\n Path to the conda file with dependencies (e.g. `environment.yml`).\n pip_fname : str\n Path to the pip file with dependencies (e.g. `requirements-dev.txt`).\n compare : bool, default False\n Whether to generate the pip file (``False``) or to compare if the\n pip file has been generated with this script and the last version\n of the conda file (``True``).\n\n Returns\n -------\n bool\n True if the comparison fails, False otherwise\n \"\"\"\n with open(conda_fname) as conda_fd:\n deps = yaml.safe_load(conda_fd)[\"dependencies\"]\n\n pip_deps = []\n for dep in deps:\n if isinstance(dep, str):\n conda_dep = conda_package_to_pip(dep)\n if conda_dep:\n pip_deps.append(conda_dep)\n elif isinstance(dep, dict) and len(dep) == 1 and \"pip\" in dep:\n pip_deps += dep[\"pip\"]\n else:\n raise ValueError(\"Unexpected dependency {}\".format(dep))\n\n pip_content = \"\\n\".join(pip_deps)\n\n if compare:\n with open(pip_fname) as pip_fd:\n return pip_content != pip_fd.read()\n else:\n with open(pip_fname, \"w\") as pip_fd:\n pip_fd.write(pip_content)\n return False\n\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser(\n description=\"convert (or compare) conda file to pip\"\n )\n argparser.add_argument(\n \"--compare\",\n action=\"store_true\",\n help=\"compare whether the two files are equivalent\",\n )\n argparser.add_argument(\n \"--azure\", action=\"store_true\", help=\"show the output in azure-pipelines format\"\n )\n args = argparser.parse_args()\n\n repo_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))\n res = main(\n os.path.join(repo_path, \"environment.yml\"),\n os.path.join(repo_path, \"requirements-dev.txt\"),\n compare=args.compare,\n )\n if res:\n msg = (\n \"`requirements-dev.txt` has to be generated with `{}` after \"\n \"`environment.yml` is modified.\\n\".format(sys.argv[0])\n )\n if args.azure:\n msg = (\n \"##vso[task.logissue type=error;\"\n \"sourcepath=requirements-dev.txt]{}\".format(msg)\n )\n sys.stderr.write(msg)\n sys.exit(res)\n", "path": "scripts/generate_pip_deps_from_conda.py" } ]
diff --git a/requirements-dev.txt b/requirements-dev.txt index 698e4f3aea094..e677d835b56a5 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -17,7 +17,7 @@ numpydoc>=0.9.0 nbconvert>=5.4.1 nbsphinx pandoc -dask-core +dask toolz>=0.7.3 fsspec>=0.5.1 partd>=0.3.10 diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py index 44fe50b99560a..f1c7c3298fb26 100755 --- a/scripts/generate_pip_deps_from_conda.py +++ b/scripts/generate_pip_deps_from_conda.py @@ -48,6 +48,9 @@ def conda_package_to_pip(package): break + if package in RENAME: + return RENAME[package] + return package
d2l-ai__d2l-en-2256
ModuleNotFoundError when running the official pytorch colab notebook ![image](https://user-images.githubusercontent.com/33608782/184685789-951935c7-1725-4a23-944d-8d48b32bf76c.png) I can replicate the error at multiple official pytorch colab notebooks, e.g. https://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce
[ { "content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'ipython>=7.23',\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'requests',\n 'pandas'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index a5f464a9eb..931a2ce067 100644 --- a/setup.py +++ b/setup.py @@ -2,6 +2,7 @@ import d2l requirements = [ + 'ipython>=7.23', 'jupyter', 'numpy', 'matplotlib',
d2l-ai__d2l-en-2254
ModuleNotFoundError when running the official pytorch colab notebook ![image](https://user-images.githubusercontent.com/33608782/184685789-951935c7-1725-4a23-944d-8d48b32bf76c.png) I can replicate the error at multiple official pytorch colab notebooks, e.g. https://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce ModuleNotFoundError when running the official pytorch colab notebook ![image](https://user-images.githubusercontent.com/33608782/184685789-951935c7-1725-4a23-944d-8d48b32bf76c.png) I can replicate the error at multiple official pytorch colab notebooks, e.g. https://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce
[ { "content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'ipython>=7.23',\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 2e3dec1d41..6f14d25677 100644 --- a/setup.py +++ b/setup.py @@ -2,6 +2,7 @@ import d2l requirements = [ + 'ipython>=7.23', 'jupyter', 'numpy', 'matplotlib',
gratipay__gratipay.com-4365
Fix deploy.sh regression [Looks](https://github.com/gratipay/gratipay.com/pull/4345#issuecomment-284062013) like #4345 introduced a regression in `deploy.sh`: ``` [gratipay] $ ./deploy.sh Already up-to-date. Traceback (most recent call last): File "gratipay/wireup.py", line 11, in <module> import aspen File "/Users/whit537/personal/gratipay/gratipay.com/env/lib/python2.7/site-packages/aspen/__init__.py", line 65, in <module> import pkg_resources File "/Users/whit537/personal/gratipay/gratipay.com/env/lib/python2.7/site-packages/pkg_resources/__init__.py", line 37, in <module> import email.parser File "/Users/whit537/personal/gratipay/gratipay.com/gratipay/email.py", line 5, in <module> from aspen.simplates.pagination import parse_specline, split_and_escape File "/Users/whit537/personal/gratipay/gratipay.com/env/lib/python2.7/site-packages/aspen/simplates/__init__.py", line 15, in <module> from .. import log ImportError: cannot import name log [gratipay] $ ```
[ { "content": "\"\"\"Wireup\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport atexit\nimport fnmatch\nimport os\nimport urlparse\nfrom tempfile import mkstemp\n\nimport aspen\nfrom aspen.testing.client import Client\nfrom babel.core import Locale\nfrom babel.messages.pofile import read_po\nfrom babel.numbers import parse_pattern\nimport balanced\nimport boto3\nimport braintree\nimport gratipay\nimport gratipay.billing.payday\nimport raven\nfrom environment import Environment, is_yesish\nfrom gratipay.elsewhere import PlatformRegistry\nfrom gratipay.elsewhere.bitbucket import Bitbucket\nfrom gratipay.elsewhere.bountysource import Bountysource\nfrom gratipay.elsewhere.github import GitHub\nfrom gratipay.elsewhere.facebook import Facebook\nfrom gratipay.elsewhere.google import Google\nfrom gratipay.elsewhere.openstreetmap import OpenStreetMap\nfrom gratipay.elsewhere.twitter import Twitter\nfrom gratipay.elsewhere.venmo import Venmo\nfrom gratipay.email import compile_email_spt, ConsoleMailer\nfrom gratipay.models.account_elsewhere import AccountElsewhere\nfrom gratipay.models.community import Community\nfrom gratipay.models.country import Country\nfrom gratipay.models.exchange_route import ExchangeRoute\nfrom gratipay.models.participant import Participant, Identity\nfrom gratipay.models.team import Team\nfrom gratipay.models import GratipayDB\nfrom gratipay.security.crypto import EncryptingPacker\nfrom gratipay.utils.http_caching import asset_etag\nfrom gratipay.utils.i18n import (\n ALIASES, ALIASES_R, COUNTRIES, LANGUAGES_2, LOCALES,\n get_function_from_rule, make_sorted_dict\n)\n\ndef base_url(website, env):\n gratipay.base_url = website.base_url = env.base_url\n\ndef secure_cookies(env):\n gratipay.use_secure_cookies = env.base_url.startswith('https')\n\ndef db(env):\n dburl = env.database_url\n maxconn = env.database_maxconn\n db = GratipayDB(dburl, maxconn=maxconn)\n\n for model in (AccountElsewhere, Community, Country, ExchangeRoute, Participant, Team):\n db.register_model(model)\n gratipay.billing.payday.Payday.db = db\n\n return db\n\ndef crypto(env):\n keys = [k.encode('ASCII') for k in env.crypto_keys.split()]\n out = Identity.encrypting_packer = EncryptingPacker(*keys)\n return out\n\ndef mail(env, project_root='.'):\n if env.aws_ses_access_key_id and env.aws_ses_secret_access_key and env.aws_ses_default_region:\n aspen.log_dammit(\"AWS SES is configured! We'll send mail through SES.\")\n Participant._mailer = boto3.client( service_name='ses'\n , region_name=env.aws_ses_default_region\n , aws_access_key_id=env.aws_ses_access_key_id\n , aws_secret_access_key=env.aws_ses_secret_access_key\n )\n else:\n aspen.log_dammit(\"AWS SES is not configured! Mail will be dumped to the console here.\")\n Participant._mailer = ConsoleMailer()\n emails = {}\n emails_dir = project_root+'/emails/'\n i = len(emails_dir)\n for spt in find_files(emails_dir, '*.spt'):\n base_name = spt[i:-4]\n emails[base_name] = compile_email_spt(spt)\n Participant._emails = emails\n\ndef billing(env):\n balanced.configure(env.balanced_api_secret)\n\n if env.braintree_sandbox_mode:\n braintree_env = braintree.Environment.Sandbox\n else:\n braintree_env = braintree.Environment.Production\n\n braintree.Configuration.configure(\n braintree_env,\n env.braintree_merchant_id,\n env.braintree_public_key,\n env.braintree_private_key\n )\n\n\ndef team_review(env):\n Team.review_repo = env.team_review_repo\n Team.review_auth = (env.team_review_username, env.team_review_token)\n\n\ndef username_restrictions(website):\n gratipay.RESTRICTED_USERNAMES = os.listdir(website.www_root)\n\n\ndef make_sentry_teller(env, _noop=None):\n if not env.sentry_dsn:\n aspen.log_dammit(\"Won't log to Sentry (SENTRY_DSN is empty).\")\n noop = _noop or (lambda *a, **kw: None)\n Participant._tell_sentry = noop\n return noop\n\n sentry = raven.Client(env.sentry_dsn)\n\n def tell_sentry(exception, state):\n\n # Decide if we care.\n # ==================\n\n if isinstance(exception, aspen.Response):\n\n if exception.code < 500:\n\n # Only log server errors to Sentry. For responses < 500 we use\n # stream-/line-based access logging. See discussion on:\n\n # https://github.com/gratipay/gratipay.com/pull/1560.\n\n return\n\n\n # Find a user.\n # ============\n # | is disallowed in usernames, so we can use it here to indicate\n # situations in which we can't get a username.\n\n user = state.get('user')\n user_id = 'n/a'\n if user is None:\n username = '| no user'\n else:\n is_anon = getattr(user, 'ANON', None)\n if is_anon is None:\n username = '| no ANON'\n elif is_anon:\n username = '| anonymous'\n else:\n participant = getattr(user, 'participant', None)\n if participant is None:\n username = '| no participant'\n else:\n username = getattr(user.participant, 'username', None)\n if username is None:\n username = '| no username'\n else:\n user_id = user.participant.id\n username = username.encode('utf8')\n user = { 'id': user_id\n , 'is_admin': user.participant.is_admin\n , 'is_suspicious': user.participant.is_suspicious\n , 'claimed_time': user.participant.claimed_time.isoformat()\n , 'url': 'https://gratipay.com/{}/'.format(username)\n }\n\n\n # Fire off a Sentry call.\n # =======================\n\n dispatch_result = state.get('dispatch_result')\n request = state.get('request')\n tags = { 'username': username\n , 'user_id': user_id\n }\n extra = { 'filepath': getattr(dispatch_result, 'match', None)\n , 'request': str(request).splitlines()\n , 'user': user\n }\n result = sentry.captureException(tags=tags, extra=extra)\n\n\n # Emit a reference string to stdout.\n # ==================================\n\n ident = sentry.get_ident(result)\n aspen.log_dammit('Exception reference: ' + ident)\n\n Participant._tell_sentry = tell_sentry\n return tell_sentry\n\n\nclass BadEnvironment(SystemExit):\n pass\n\n\ndef accounts_elsewhere(website, env):\n\n twitter = Twitter(\n env.twitter_consumer_key,\n env.twitter_consumer_secret,\n env.twitter_callback,\n )\n facebook = Facebook(\n env.facebook_app_id,\n env.facebook_app_secret,\n env.facebook_callback,\n )\n github = GitHub(\n env.github_client_id,\n env.github_client_secret,\n env.github_callback,\n )\n google = Google(\n env.google_client_id,\n env.google_client_secret,\n env.google_callback,\n )\n bitbucket = Bitbucket(\n env.bitbucket_consumer_key,\n env.bitbucket_consumer_secret,\n env.bitbucket_callback,\n )\n openstreetmap = OpenStreetMap(\n env.openstreetmap_consumer_key,\n env.openstreetmap_consumer_secret,\n env.openstreetmap_callback,\n env.openstreetmap_api_url,\n env.openstreetmap_auth_url,\n )\n bountysource = Bountysource(\n None,\n env.bountysource_api_secret,\n env.bountysource_callback,\n env.bountysource_api_host,\n env.bountysource_www_host,\n )\n venmo = Venmo(\n env.venmo_client_id,\n env.venmo_client_secret,\n env.venmo_callback,\n )\n\n signin_platforms = [twitter, github, facebook, google, bitbucket, openstreetmap]\n website.signin_platforms = PlatformRegistry(signin_platforms)\n AccountElsewhere.signin_platforms_names = tuple(p.name for p in signin_platforms)\n\n # For displaying \"Connected Accounts\"\n website.social_profiles = [twitter, github, facebook, google, bitbucket, openstreetmap, bountysource]\n\n all_platforms = signin_platforms + [bountysource, venmo]\n website.platforms = AccountElsewhere.platforms = PlatformRegistry(all_platforms)\n\n friends_platforms = [p for p in website.platforms if getattr(p, 'api_friends_path', None)]\n website.friends_platforms = PlatformRegistry(friends_platforms)\n\n for platform in all_platforms:\n platform.icon = website.asset('platforms/%s.16.png' % platform.name)\n platform.logo = website.asset('platforms/%s.png' % platform.name)\n\n\ndef find_files(directory, pattern):\n for root, dirs, files in os.walk(directory):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(root, filename)\n\n\ndef compile_assets(website):\n client = Client(website.www_root, website.project_root)\n client._website = website\n for spt in find_files(website.www_root+'/assets/', '*.spt'):\n filepath = spt[:-4] # /path/to/www/assets/foo.css\n urlpath = spt[spt.rfind('/assets/'):-4] # /assets/foo.css\n if urlpath == '/assets/_well-known/acme-challenge/%token':\n # This *should* be dynamic.\n continue\n try:\n # Remove any existing compiled asset, so we can access the dynamic\n # one instead (Aspen prefers foo.css over foo.css.spt).\n os.unlink(filepath)\n except:\n pass\n headers = {}\n if website.base_url:\n url = urlparse.urlparse(website.base_url)\n headers[b'HTTP_X_FORWARDED_PROTO'] = str(url.scheme)\n headers[b'HTTP_HOST'] = str(url.netloc)\n content = client.GET(urlpath, **headers).body\n tmpfd, tmpfpath = mkstemp(dir='.')\n os.write(tmpfd, content)\n os.close(tmpfd)\n os.rename(tmpfpath, filepath)\n atexit.register(lambda: clean_assets(website.www_root))\n\n\ndef clean_assets(www_root):\n for spt in find_files(www_root+'/assets/', '*.spt'):\n try:\n os.unlink(spt[:-4])\n except:\n pass\n\n\ndef load_i18n(project_root, tell_sentry):\n # Load the locales\n localeDir = os.path.join(project_root, 'i18n', 'core')\n locales = LOCALES\n for file in os.listdir(localeDir):\n try:\n parts = file.split(\".\")\n if not (len(parts) == 2 and parts[1] == \"po\"):\n continue\n lang = parts[0]\n with open(os.path.join(localeDir, file)) as f:\n l = locales[lang.lower()] = Locale(lang)\n c = l.catalog = read_po(f)\n c.plural_func = get_function_from_rule(c.plural_expr)\n try:\n l.countries = make_sorted_dict(COUNTRIES, l.territories)\n except KeyError:\n l.countries = COUNTRIES\n try:\n l.languages_2 = make_sorted_dict(LANGUAGES_2, l.languages)\n except KeyError:\n l.languages_2 = LANGUAGES_2\n except Exception as e:\n tell_sentry(e, {})\n\n # Add aliases\n for k, v in list(locales.items()):\n locales.setdefault(ALIASES.get(k, k), v)\n locales.setdefault(ALIASES_R.get(k, k), v)\n for k, v in list(locales.items()):\n locales.setdefault(k.split('_', 1)[0], v)\n\n # Patch the locales to look less formal\n locales['fr'].currency_formats[None] = parse_pattern('#,##0.00\\u202f\\xa4')\n locales['fr'].currency_symbols['USD'] = '$'\n\n\ndef other_stuff(website, env):\n website.cache_static = env.gratipay_cache_static\n website.compress_assets = env.gratipay_compress_assets\n\n if website.cache_static:\n def asset(path):\n fspath = website.www_root+'/assets/'+path\n etag = ''\n try:\n etag = asset_etag(fspath)\n except Exception as e:\n website.tell_sentry(e, {})\n return env.gratipay_asset_url+path+(etag and '?etag='+etag)\n website.asset = asset\n compile_assets(website)\n else:\n website.asset = lambda path: env.gratipay_asset_url+path\n clean_assets(website.www_root)\n\n website.optimizely_id = env.optimizely_id\n website.include_piwik = env.include_piwik\n\n website.log_metrics = env.log_metrics\n\n\ndef env():\n env = Environment(\n AWS_SES_ACCESS_KEY_ID = unicode,\n AWS_SES_SECRET_ACCESS_KEY = unicode,\n AWS_SES_DEFAULT_REGION = unicode,\n BASE_URL = unicode,\n DATABASE_URL = unicode,\n DATABASE_MAXCONN = int,\n CRYPTO_KEYS = unicode,\n GRATIPAY_ASSET_URL = unicode,\n GRATIPAY_CACHE_STATIC = is_yesish,\n GRATIPAY_COMPRESS_ASSETS = is_yesish,\n BALANCED_API_SECRET = unicode,\n BRAINTREE_SANDBOX_MODE = is_yesish,\n BRAINTREE_MERCHANT_ID = unicode,\n BRAINTREE_PUBLIC_KEY = unicode,\n BRAINTREE_PRIVATE_KEY = unicode,\n GITHUB_CLIENT_ID = unicode,\n GITHUB_CLIENT_SECRET = unicode,\n GITHUB_CALLBACK = unicode,\n BITBUCKET_CONSUMER_KEY = unicode,\n BITBUCKET_CONSUMER_SECRET = unicode,\n BITBUCKET_CALLBACK = unicode,\n TWITTER_CONSUMER_KEY = unicode,\n TWITTER_CONSUMER_SECRET = unicode,\n TWITTER_CALLBACK = unicode,\n FACEBOOK_APP_ID = unicode,\n FACEBOOK_APP_SECRET = unicode,\n FACEBOOK_CALLBACK = unicode,\n GOOGLE_CLIENT_ID = unicode,\n GOOGLE_CLIENT_SECRET = unicode,\n GOOGLE_CALLBACK = unicode,\n BOUNTYSOURCE_API_SECRET = unicode,\n BOUNTYSOURCE_CALLBACK = unicode,\n BOUNTYSOURCE_API_HOST = unicode,\n BOUNTYSOURCE_WWW_HOST = unicode,\n VENMO_CLIENT_ID = unicode,\n VENMO_CLIENT_SECRET = unicode,\n VENMO_CALLBACK = unicode,\n OPENSTREETMAP_CONSUMER_KEY = unicode,\n OPENSTREETMAP_CONSUMER_SECRET = unicode,\n OPENSTREETMAP_CALLBACK = unicode,\n OPENSTREETMAP_API_URL = unicode,\n OPENSTREETMAP_AUTH_URL = unicode,\n UPDATE_CTA_EVERY = int,\n CHECK_DB_EVERY = int,\n DEQUEUE_EMAILS_EVERY = int,\n OPTIMIZELY_ID = unicode,\n SENTRY_DSN = unicode,\n LOG_METRICS = is_yesish,\n INCLUDE_PIWIK = is_yesish,\n TEAM_REVIEW_REPO = unicode,\n TEAM_REVIEW_USERNAME = unicode,\n TEAM_REVIEW_TOKEN = unicode,\n RAISE_SIGNIN_NOTIFICATIONS = is_yesish,\n RESEND_VERIFICATION_THRESHOLD = unicode,\n REQUIRE_YAJL = is_yesish,\n GUNICORN_OPTS = unicode,\n )\n\n\n # Error Checking\n # ==============\n\n if env.malformed:\n these = len(env.malformed) != 1 and 'these' or 'this'\n plural = len(env.malformed) != 1 and 's' or ''\n aspen.log_dammit(\"=\" * 42)\n aspen.log_dammit( \"Oh no! Gratipay.com couldn't understand %s \" % these\n , \"environment variable%s:\" % plural\n )\n aspen.log_dammit(\" \")\n for key, err in env.malformed:\n aspen.log_dammit(\" {} ({})\".format(key, err))\n aspen.log_dammit(\" \")\n aspen.log_dammit(\"See ./default_local.env for hints.\")\n\n aspen.log_dammit(\"=\" * 42)\n keys = ', '.join([key for key in env.malformed])\n raise BadEnvironment(\"Malformed envvar{}: {}.\".format(plural, keys))\n\n if env.missing:\n these = len(env.missing) != 1 and 'these' or 'this'\n plural = len(env.missing) != 1 and 's' or ''\n aspen.log_dammit(\"=\" * 42)\n aspen.log_dammit( \"Oh no! Gratipay.com needs %s missing \" % these\n , \"environment variable%s:\" % plural\n )\n aspen.log_dammit(\" \")\n for key in env.missing:\n aspen.log_dammit(\" \" + key)\n aspen.log_dammit(\" \")\n aspen.log_dammit( \"(Sorry, we must've started looking for \"\n , \"%s since you last updated Gratipay!)\" % these\n )\n aspen.log_dammit(\" \")\n aspen.log_dammit(\"Running Gratipay locally? Edit ./local.env.\")\n aspen.log_dammit(\"Running the test suite? Edit ./tests/env.\")\n aspen.log_dammit(\" \")\n aspen.log_dammit(\"See ./default_local.env for hints.\")\n\n aspen.log_dammit(\"=\" * 42)\n keys = ', '.join([key for key in env.missing])\n raise BadEnvironment(\"Missing envvar{}: {}.\".format(plural, keys))\n\n return env\n\n\nif __name__ == '__main__':\n env()\n", "path": "gratipay/wireup.py" } ]
[ { "content": "\"\"\"Wireup\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport atexit\nimport fnmatch\nimport os\nimport urlparse\nfrom tempfile import mkstemp\n\nimport aspen\nfrom aspen.testing.client import Client\nfrom babel.core import Locale\nfrom babel.messages.pofile import read_po\nfrom babel.numbers import parse_pattern\nimport balanced\nimport boto3\nimport braintree\nimport gratipay\nimport gratipay.billing.payday\nimport raven\nfrom environment import Environment, is_yesish\nfrom gratipay.elsewhere import PlatformRegistry\nfrom gratipay.elsewhere.bitbucket import Bitbucket\nfrom gratipay.elsewhere.bountysource import Bountysource\nfrom gratipay.elsewhere.github import GitHub\nfrom gratipay.elsewhere.facebook import Facebook\nfrom gratipay.elsewhere.google import Google\nfrom gratipay.elsewhere.openstreetmap import OpenStreetMap\nfrom gratipay.elsewhere.twitter import Twitter\nfrom gratipay.elsewhere.venmo import Venmo\nfrom gratipay.email import compile_email_spt, ConsoleMailer\nfrom gratipay.models.account_elsewhere import AccountElsewhere\nfrom gratipay.models.community import Community\nfrom gratipay.models.country import Country\nfrom gratipay.models.exchange_route import ExchangeRoute\nfrom gratipay.models.participant import Participant, Identity\nfrom gratipay.models.team import Team\nfrom gratipay.models import GratipayDB\nfrom gratipay.security.crypto import EncryptingPacker\nfrom gratipay.utils.http_caching import asset_etag\nfrom gratipay.utils.i18n import (\n ALIASES, ALIASES_R, COUNTRIES, LANGUAGES_2, LOCALES,\n get_function_from_rule, make_sorted_dict\n)\n\ndef base_url(website, env):\n gratipay.base_url = website.base_url = env.base_url\n\ndef secure_cookies(env):\n gratipay.use_secure_cookies = env.base_url.startswith('https')\n\ndef db(env):\n dburl = env.database_url\n maxconn = env.database_maxconn\n db = GratipayDB(dburl, maxconn=maxconn)\n\n for model in (AccountElsewhere, Community, Country, ExchangeRoute, Participant, Team):\n db.register_model(model)\n gratipay.billing.payday.Payday.db = db\n\n return db\n\ndef crypto(env):\n keys = [k.encode('ASCII') for k in env.crypto_keys.split()]\n out = Identity.encrypting_packer = EncryptingPacker(*keys)\n return out\n\ndef mail(env, project_root='.'):\n if env.aws_ses_access_key_id and env.aws_ses_secret_access_key and env.aws_ses_default_region:\n aspen.log_dammit(\"AWS SES is configured! We'll send mail through SES.\")\n Participant._mailer = boto3.client( service_name='ses'\n , region_name=env.aws_ses_default_region\n , aws_access_key_id=env.aws_ses_access_key_id\n , aws_secret_access_key=env.aws_ses_secret_access_key\n )\n else:\n aspen.log_dammit(\"AWS SES is not configured! Mail will be dumped to the console here.\")\n Participant._mailer = ConsoleMailer()\n emails = {}\n emails_dir = project_root+'/emails/'\n i = len(emails_dir)\n for spt in find_files(emails_dir, '*.spt'):\n base_name = spt[i:-4]\n emails[base_name] = compile_email_spt(spt)\n Participant._emails = emails\n\ndef billing(env):\n balanced.configure(env.balanced_api_secret)\n\n if env.braintree_sandbox_mode:\n braintree_env = braintree.Environment.Sandbox\n else:\n braintree_env = braintree.Environment.Production\n\n braintree.Configuration.configure(\n braintree_env,\n env.braintree_merchant_id,\n env.braintree_public_key,\n env.braintree_private_key\n )\n\n\ndef team_review(env):\n Team.review_repo = env.team_review_repo\n Team.review_auth = (env.team_review_username, env.team_review_token)\n\n\ndef username_restrictions(website):\n gratipay.RESTRICTED_USERNAMES = os.listdir(website.www_root)\n\n\ndef make_sentry_teller(env, _noop=None):\n if not env.sentry_dsn:\n aspen.log_dammit(\"Won't log to Sentry (SENTRY_DSN is empty).\")\n noop = _noop or (lambda *a, **kw: None)\n Participant._tell_sentry = noop\n return noop\n\n sentry = raven.Client(env.sentry_dsn)\n\n def tell_sentry(exception, state):\n\n # Decide if we care.\n # ==================\n\n if isinstance(exception, aspen.Response):\n\n if exception.code < 500:\n\n # Only log server errors to Sentry. For responses < 500 we use\n # stream-/line-based access logging. See discussion on:\n\n # https://github.com/gratipay/gratipay.com/pull/1560.\n\n return\n\n\n # Find a user.\n # ============\n # | is disallowed in usernames, so we can use it here to indicate\n # situations in which we can't get a username.\n\n user = state.get('user')\n user_id = 'n/a'\n if user is None:\n username = '| no user'\n else:\n is_anon = getattr(user, 'ANON', None)\n if is_anon is None:\n username = '| no ANON'\n elif is_anon:\n username = '| anonymous'\n else:\n participant = getattr(user, 'participant', None)\n if participant is None:\n username = '| no participant'\n else:\n username = getattr(user.participant, 'username', None)\n if username is None:\n username = '| no username'\n else:\n user_id = user.participant.id\n username = username.encode('utf8')\n user = { 'id': user_id\n , 'is_admin': user.participant.is_admin\n , 'is_suspicious': user.participant.is_suspicious\n , 'claimed_time': user.participant.claimed_time.isoformat()\n , 'url': 'https://gratipay.com/{}/'.format(username)\n }\n\n\n # Fire off a Sentry call.\n # =======================\n\n dispatch_result = state.get('dispatch_result')\n request = state.get('request')\n tags = { 'username': username\n , 'user_id': user_id\n }\n extra = { 'filepath': getattr(dispatch_result, 'match', None)\n , 'request': str(request).splitlines()\n , 'user': user\n }\n result = sentry.captureException(tags=tags, extra=extra)\n\n\n # Emit a reference string to stdout.\n # ==================================\n\n ident = sentry.get_ident(result)\n aspen.log_dammit('Exception reference: ' + ident)\n\n Participant._tell_sentry = tell_sentry\n return tell_sentry\n\n\nclass BadEnvironment(SystemExit):\n pass\n\n\ndef accounts_elsewhere(website, env):\n\n twitter = Twitter(\n env.twitter_consumer_key,\n env.twitter_consumer_secret,\n env.twitter_callback,\n )\n facebook = Facebook(\n env.facebook_app_id,\n env.facebook_app_secret,\n env.facebook_callback,\n )\n github = GitHub(\n env.github_client_id,\n env.github_client_secret,\n env.github_callback,\n )\n google = Google(\n env.google_client_id,\n env.google_client_secret,\n env.google_callback,\n )\n bitbucket = Bitbucket(\n env.bitbucket_consumer_key,\n env.bitbucket_consumer_secret,\n env.bitbucket_callback,\n )\n openstreetmap = OpenStreetMap(\n env.openstreetmap_consumer_key,\n env.openstreetmap_consumer_secret,\n env.openstreetmap_callback,\n env.openstreetmap_api_url,\n env.openstreetmap_auth_url,\n )\n bountysource = Bountysource(\n None,\n env.bountysource_api_secret,\n env.bountysource_callback,\n env.bountysource_api_host,\n env.bountysource_www_host,\n )\n venmo = Venmo(\n env.venmo_client_id,\n env.venmo_client_secret,\n env.venmo_callback,\n )\n\n signin_platforms = [twitter, github, facebook, google, bitbucket, openstreetmap]\n website.signin_platforms = PlatformRegistry(signin_platforms)\n AccountElsewhere.signin_platforms_names = tuple(p.name for p in signin_platforms)\n\n # For displaying \"Connected Accounts\"\n website.social_profiles = [twitter, github, facebook, google, bitbucket, openstreetmap, bountysource]\n\n all_platforms = signin_platforms + [bountysource, venmo]\n website.platforms = AccountElsewhere.platforms = PlatformRegistry(all_platforms)\n\n friends_platforms = [p for p in website.platforms if getattr(p, 'api_friends_path', None)]\n website.friends_platforms = PlatformRegistry(friends_platforms)\n\n for platform in all_platforms:\n platform.icon = website.asset('platforms/%s.16.png' % platform.name)\n platform.logo = website.asset('platforms/%s.png' % platform.name)\n\n\ndef find_files(directory, pattern):\n for root, dirs, files in os.walk(directory):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(root, filename)\n\n\ndef compile_assets(website):\n client = Client(website.www_root, website.project_root)\n client._website = website\n for spt in find_files(website.www_root+'/assets/', '*.spt'):\n filepath = spt[:-4] # /path/to/www/assets/foo.css\n urlpath = spt[spt.rfind('/assets/'):-4] # /assets/foo.css\n if urlpath == '/assets/_well-known/acme-challenge/%token':\n # This *should* be dynamic.\n continue\n try:\n # Remove any existing compiled asset, so we can access the dynamic\n # one instead (Aspen prefers foo.css over foo.css.spt).\n os.unlink(filepath)\n except:\n pass\n headers = {}\n if website.base_url:\n url = urlparse.urlparse(website.base_url)\n headers[b'HTTP_X_FORWARDED_PROTO'] = str(url.scheme)\n headers[b'HTTP_HOST'] = str(url.netloc)\n content = client.GET(urlpath, **headers).body\n tmpfd, tmpfpath = mkstemp(dir='.')\n os.write(tmpfd, content)\n os.close(tmpfd)\n os.rename(tmpfpath, filepath)\n atexit.register(lambda: clean_assets(website.www_root))\n\n\ndef clean_assets(www_root):\n for spt in find_files(www_root+'/assets/', '*.spt'):\n try:\n os.unlink(spt[:-4])\n except:\n pass\n\n\ndef load_i18n(project_root, tell_sentry):\n # Load the locales\n localeDir = os.path.join(project_root, 'i18n', 'core')\n locales = LOCALES\n for file in os.listdir(localeDir):\n try:\n parts = file.split(\".\")\n if not (len(parts) == 2 and parts[1] == \"po\"):\n continue\n lang = parts[0]\n with open(os.path.join(localeDir, file)) as f:\n l = locales[lang.lower()] = Locale(lang)\n c = l.catalog = read_po(f)\n c.plural_func = get_function_from_rule(c.plural_expr)\n try:\n l.countries = make_sorted_dict(COUNTRIES, l.territories)\n except KeyError:\n l.countries = COUNTRIES\n try:\n l.languages_2 = make_sorted_dict(LANGUAGES_2, l.languages)\n except KeyError:\n l.languages_2 = LANGUAGES_2\n except Exception as e:\n tell_sentry(e, {})\n\n # Add aliases\n for k, v in list(locales.items()):\n locales.setdefault(ALIASES.get(k, k), v)\n locales.setdefault(ALIASES_R.get(k, k), v)\n for k, v in list(locales.items()):\n locales.setdefault(k.split('_', 1)[0], v)\n\n # Patch the locales to look less formal\n locales['fr'].currency_formats[None] = parse_pattern('#,##0.00\\u202f\\xa4')\n locales['fr'].currency_symbols['USD'] = '$'\n\n\ndef other_stuff(website, env):\n website.cache_static = env.gratipay_cache_static\n website.compress_assets = env.gratipay_compress_assets\n\n if website.cache_static:\n def asset(path):\n fspath = website.www_root+'/assets/'+path\n etag = ''\n try:\n etag = asset_etag(fspath)\n except Exception as e:\n website.tell_sentry(e, {})\n return env.gratipay_asset_url+path+(etag and '?etag='+etag)\n website.asset = asset\n compile_assets(website)\n else:\n website.asset = lambda path: env.gratipay_asset_url+path\n clean_assets(website.www_root)\n\n website.optimizely_id = env.optimizely_id\n website.include_piwik = env.include_piwik\n\n website.log_metrics = env.log_metrics\n\n\ndef env():\n env = Environment(\n AWS_SES_ACCESS_KEY_ID = unicode,\n AWS_SES_SECRET_ACCESS_KEY = unicode,\n AWS_SES_DEFAULT_REGION = unicode,\n BASE_URL = unicode,\n DATABASE_URL = unicode,\n DATABASE_MAXCONN = int,\n CRYPTO_KEYS = unicode,\n GRATIPAY_ASSET_URL = unicode,\n GRATIPAY_CACHE_STATIC = is_yesish,\n GRATIPAY_COMPRESS_ASSETS = is_yesish,\n BALANCED_API_SECRET = unicode,\n BRAINTREE_SANDBOX_MODE = is_yesish,\n BRAINTREE_MERCHANT_ID = unicode,\n BRAINTREE_PUBLIC_KEY = unicode,\n BRAINTREE_PRIVATE_KEY = unicode,\n GITHUB_CLIENT_ID = unicode,\n GITHUB_CLIENT_SECRET = unicode,\n GITHUB_CALLBACK = unicode,\n BITBUCKET_CONSUMER_KEY = unicode,\n BITBUCKET_CONSUMER_SECRET = unicode,\n BITBUCKET_CALLBACK = unicode,\n TWITTER_CONSUMER_KEY = unicode,\n TWITTER_CONSUMER_SECRET = unicode,\n TWITTER_CALLBACK = unicode,\n FACEBOOK_APP_ID = unicode,\n FACEBOOK_APP_SECRET = unicode,\n FACEBOOK_CALLBACK = unicode,\n GOOGLE_CLIENT_ID = unicode,\n GOOGLE_CLIENT_SECRET = unicode,\n GOOGLE_CALLBACK = unicode,\n BOUNTYSOURCE_API_SECRET = unicode,\n BOUNTYSOURCE_CALLBACK = unicode,\n BOUNTYSOURCE_API_HOST = unicode,\n BOUNTYSOURCE_WWW_HOST = unicode,\n VENMO_CLIENT_ID = unicode,\n VENMO_CLIENT_SECRET = unicode,\n VENMO_CALLBACK = unicode,\n OPENSTREETMAP_CONSUMER_KEY = unicode,\n OPENSTREETMAP_CONSUMER_SECRET = unicode,\n OPENSTREETMAP_CALLBACK = unicode,\n OPENSTREETMAP_API_URL = unicode,\n OPENSTREETMAP_AUTH_URL = unicode,\n UPDATE_CTA_EVERY = int,\n CHECK_DB_EVERY = int,\n DEQUEUE_EMAILS_EVERY = int,\n OPTIMIZELY_ID = unicode,\n SENTRY_DSN = unicode,\n LOG_METRICS = is_yesish,\n INCLUDE_PIWIK = is_yesish,\n TEAM_REVIEW_REPO = unicode,\n TEAM_REVIEW_USERNAME = unicode,\n TEAM_REVIEW_TOKEN = unicode,\n RAISE_SIGNIN_NOTIFICATIONS = is_yesish,\n RESEND_VERIFICATION_THRESHOLD = unicode,\n REQUIRE_YAJL = is_yesish,\n GUNICORN_OPTS = unicode,\n )\n\n\n # Error Checking\n # ==============\n\n if env.malformed:\n these = len(env.malformed) != 1 and 'these' or 'this'\n plural = len(env.malformed) != 1 and 's' or ''\n aspen.log_dammit(\"=\" * 42)\n aspen.log_dammit( \"Oh no! Gratipay.com couldn't understand %s \" % these\n , \"environment variable%s:\" % plural\n )\n aspen.log_dammit(\" \")\n for key, err in env.malformed:\n aspen.log_dammit(\" {} ({})\".format(key, err))\n aspen.log_dammit(\" \")\n aspen.log_dammit(\"See ./default_local.env for hints.\")\n\n aspen.log_dammit(\"=\" * 42)\n keys = ', '.join([key for key in env.malformed])\n raise BadEnvironment(\"Malformed envvar{}: {}.\".format(plural, keys))\n\n if env.missing:\n these = len(env.missing) != 1 and 'these' or 'this'\n plural = len(env.missing) != 1 and 's' or ''\n aspen.log_dammit(\"=\" * 42)\n aspen.log_dammit( \"Oh no! Gratipay.com needs %s missing \" % these\n , \"environment variable%s:\" % plural\n )\n aspen.log_dammit(\" \")\n for key in env.missing:\n aspen.log_dammit(\" \" + key)\n aspen.log_dammit(\" \")\n aspen.log_dammit( \"(Sorry, we must've started looking for \"\n , \"%s since you last updated Gratipay!)\" % these\n )\n aspen.log_dammit(\" \")\n aspen.log_dammit(\"Running Gratipay locally? Edit ./local.env.\")\n aspen.log_dammit(\"Running the test suite? Edit ./tests/env.\")\n aspen.log_dammit(\" \")\n aspen.log_dammit(\"See ./default_local.env for hints.\")\n\n aspen.log_dammit(\"=\" * 42)\n keys = ', '.join([key for key in env.missing])\n raise BadEnvironment(\"Missing envvar{}: {}.\".format(plural, keys))\n\n return env\n\n\ndef __main__():\n # deploy.sh uses this to validate production env config\n env()\n", "path": "gratipay/wireup.py" } ]
diff --git a/deploy.sh b/deploy.sh index 45faedbb06..5696579a49 100755 --- a/deploy.sh +++ b/deploy.sh @@ -49,7 +49,7 @@ version="$((prev + 1))" # Check that the environment contains all required variables heroku config -s -a gratipay | ./env/bin/honcho run -e /dev/stdin \ - ./env/bin/python gratipay/wireup.py + ./env/bin/python -m gratipay.wireup # Sync the translations diff --git a/gratipay/wireup.py b/gratipay/wireup.py index 0c113c18bf..16a1b3d39d 100644 --- a/gratipay/wireup.py +++ b/gratipay/wireup.py @@ -476,5 +476,6 @@ def env(): return env -if __name__ == '__main__': +def __main__(): + # deploy.sh uses this to validate production env config env()
Kinto__kinto-7
PostgreSQL by default ? - put `cliquet[postgresql]` in requirements - put storage_url in config (default postgres:postgres@localhost/postgres)
[ { "content": "import os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n\nREQUIREMENTS = [\n 'colander',\n 'cornice',\n 'six',\n 'waitress',\n 'cliquet'\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ]}\n\nsetup(name='kinto',\n version='0.1.dev0',\n description='kinto',\n long_description=README,\n classifiers=[\n \"Programming Language :: Python\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\"\n ],\n keywords=\"web services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py" } ]
[ { "content": "import os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n\nREQUIREMENTS = [\n 'colander',\n 'cornice',\n 'six',\n 'waitress',\n 'cliquet[postgresql]'\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ]}\n\nsetup(name='kinto',\n version='0.1.dev0',\n description='kinto',\n long_description=README,\n classifiers=[\n \"Programming Language :: Python\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\"\n ],\n keywords=\"web services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py" } ]
diff --git a/.travis.yml b/.travis.yml index b185661c1..538bf2ae6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,13 +1,18 @@ language: python python: 2.7 -sudo: false +sudo: true services: redis-server +addons: + postgresql: "9.3" env: - TOX_ENV=py27 - TOX_ENV=py34 - TOX_ENV=flake8 - TOX_ENV=docs - - ACTION=loadtest +before_script: + - psql -c "CREATE DATABASE testdb ENCODING 'UTF8' TEMPLATE template0;" -U postgres +before_install: + - sudo apt-get install postgresql-contrib install: - if [[ $ACTION != loadtest ]]; then pip install tox; fi script: diff --git a/config/kinto.ini b/config/kinto.ini index ebac1b1f2..b3c0c4269 100644 --- a/config/kinto.ini +++ b/config/kinto.ini @@ -4,7 +4,8 @@ use = egg:kinto cliquet.project_name = cloud storage cliquet.project_docs = https://kinto.rtfd.org/ cliquet.session_backend = cliquet.session.redis -cliquet.storage_backend = cliquet.storage.redis +cliquet.storage_backend = cliquet.storage.postgresql +cliquet.storage_url = postgres://postgres:postgres@localhost/postgres cliquet.http_scheme = http cliquet.retry_after = 30 cliquet.eos = diff --git a/kinto/tests/support.py b/kinto/tests/support.py index 1f154b66f..8e97ff09f 100644 --- a/kinto/tests/support.py +++ b/kinto/tests/support.py @@ -20,11 +20,14 @@ def get_app_settings(self): 'cliquet.project_name': 'cloud storage', 'cliquet.project_docs': 'https://kinto.rtfd.org/', 'cliquet.basic_auth_enabled': 'true', - 'cliquet.storage_backend': 'cliquet.storage.redis', + 'cliquet.storage_backend': 'cliquet.storage.postgresql', + 'cliquet.storage_url': + 'postgres://postgres:postgres@localhost/testdb', 'cliquet.session_backend': 'cliquet.session.redis', 'fxa-oauth.client_id': '89513028159972bc', - 'fxa-oauth.client_secret': '9aced230585cc0aa2932e2eb871c9a3a7d6458' - 'e59ccf57eb610ea0a3467dd800', + 'fxa-oauth.client_secret': + '9aced230585cc0aa2932e2eb871c9a3a7d6458' + 'e59ccf57eb610ea0a3467dd800', 'fxa-oauth.oauth_uri': 'https://oauth-stable.dev.lcip.org', 'fxa-oauth.scope': 'profile' } diff --git a/setup.py b/setup.py index 68b58c244..7d63e82a9 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ 'cornice', 'six', 'waitress', - 'cliquet' + 'cliquet[postgresql]' ] ENTRY_POINTS = {
pypa__setuptools-1043
No module named 'six' Flask's Tox/Travis builds all started failing. Looks like a new version of setuptools was just released that has a problem with six. ~~~pytb Obtaining file:///home/david/Projects/flask Installing collected packages: Flask Running setup.py develop for Flask Complete output from command /home/david/Projects/flask/.tox/py/bin/python3 -c "import setuptools, tokenize;__file__='/home/david/Projects/flask/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" develop --no-deps: /home/david/Projects/flask/.tox/py/lib/python3.6/site-packages/setuptools/dist.py:336: UserWarning: Normalizing '0.13-dev' to '0.13.dev0' normalized_version, Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/david/Projects/flask/setup.py", line 109, in <module> ''' File "/usr/lib64/python3.6/distutils/core.py", line 134, in setup ok = dist.parse_command_line() File "/home/david/Projects/flask/.tox/py/lib/python3.6/site-packages/setuptools/dist.py", line 363, in parse_command_line result = _Distribution.parse_command_line(self) File "/usr/lib64/python3.6/distutils/dist.py", line 472, in parse_command_line args = self._parse_command_opts(parser, args) File "/home/david/Projects/flask/.tox/py/lib/python3.6/site-packages/setuptools/dist.py", line 674, in _parse_command_opts nargs = _Distribution._parse_command_opts(self, parser, args) File "/usr/lib64/python3.6/distutils/dist.py", line 528, in _parse_command_opts cmd_class = self.get_command_class(command) File "/home/david/Projects/flask/.tox/py/lib/python3.6/site-packages/setuptools/dist.py", line 495, in get_command_class self.cmdclass[command] = cmdclass = ep.load() File "/home/david/Projects/flask/.tox/py/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2303, in load return self.resolve() File "/home/david/Projects/flask/.tox/py/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2309, in resolve module = __import__(self.module_name, fromlist=['__name__'], level=0) File "/home/david/Projects/flask/.tox/py/lib/python3.6/site-packages/setuptools/command/develop.py", line 11, in <module> from setuptools.command.easy_install import easy_install File "/home/david/Projects/flask/.tox/py/lib/python3.6/site-packages/setuptools/command/easy_install.py", line 49, in <module> from setuptools.py27compat import rmtree_safe File "/home/david/Projects/flask/.tox/py/lib/python3.6/site-packages/setuptools/py27compat.py", line 7, in <module> import six ModuleNotFoundError: No module named 'six' ~~~ Example failed build log: https://travis-ci.org/pallets/flask/jobs/238166427#L242
[ { "content": "\"\"\"\nCompatibility Support for Python 2.7 and earlier\n\"\"\"\n\nimport platform\n\nimport six\n\n\ndef get_all_headers(message, key):\n \"\"\"\n Given an HTTPMessage, return all headers matching a given key.\n \"\"\"\n return message.get_all(key)\n\n\nif six.PY2:\n def get_all_headers(message, key):\n return message.getheaders(key)\n\n\nlinux_py2_ascii = (\n platform.system() == 'Linux' and\n six.PY2\n)\n\nrmtree_safe = str if linux_py2_ascii else lambda x: x\n\"\"\"Workaround for http://bugs.python.org/issue24672\"\"\"\n", "path": "setuptools/py27compat.py" } ]
[ { "content": "\"\"\"\nCompatibility Support for Python 2.7 and earlier\n\"\"\"\n\nimport platform\n\nfrom setuptools.extern import six\n\n\ndef get_all_headers(message, key):\n \"\"\"\n Given an HTTPMessage, return all headers matching a given key.\n \"\"\"\n return message.get_all(key)\n\n\nif six.PY2:\n def get_all_headers(message, key):\n return message.getheaders(key)\n\n\nlinux_py2_ascii = (\n platform.system() == 'Linux' and\n six.PY2\n)\n\nrmtree_safe = str if linux_py2_ascii else lambda x: x\n\"\"\"Workaround for http://bugs.python.org/issue24672\"\"\"\n", "path": "setuptools/py27compat.py" } ]
diff --git a/setuptools/py27compat.py b/setuptools/py27compat.py index 701283c8c1..2985011b92 100644 --- a/setuptools/py27compat.py +++ b/setuptools/py27compat.py @@ -4,7 +4,7 @@ import platform -import six +from setuptools.extern import six def get_all_headers(message, key):
scalableminds__webknossos-libs-1048
Dataset.from_images should support NIFTI, recognize .nii `.nii` is an alternative file ending for nifti. The wkcuber CLI currrently prints `Converting Nifti dataset`, but then `Could not find any supported image data. The following suffixes are supported …` I think nifti used to be supported by the wkcuber
[ { "content": "import warnings\nfrom contextlib import contextmanager, nullcontext\nfrom itertools import chain\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import (\n ContextManager,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n)\nfrom urllib.error import HTTPError\n\nimport numpy as np\nfrom natsort import natsorted\nfrom numpy.typing import DTypeLike\n\nfrom webknossos.geometry.bounding_box import BoundingBox\nfrom webknossos.geometry.nd_bounding_box import NDBoundingBox\n\n# pylint: disable=unused-import\ntry:\n from .pims_czi_reader import PimsCziReader\nexcept ImportError:\n PimsCziReader = type(None) # type: ignore[misc,assignment]\n\ntry:\n from .pims_dm_readers import ( # noqa: F401 unused-import\n PimsDm3Reader,\n PimsDm4Reader,\n )\nexcept ImportError:\n pass\n\ntry:\n from .pims_imagej_tiff_reader import ( # noqa: F401 unused-import\n PimsImagejTiffReader,\n )\nexcept ImportError:\n pass\n\n\nfrom ...geometry.vec_int import VecInt\nfrom ..mag_view import MagView\n\ntry:\n import pims\nexcept ImportError as import_error:\n raise RuntimeError(\n \"Cannot import pims, please install it e.g. using 'webknossos[all]'\"\n ) from import_error\n\n\n# Fix ImageIOReader not handling channels correctly. This might get fixed via\n# https://github.com/soft-matter/pims/pull/430\npims.ImageIOReader.frame_shape = pims.FramesSequenceND.frame_shape\n\n\ndef _assume_color_channel(dim_size: int, dtype: np.dtype) -> bool:\n return dim_size == 1 or (dim_size == 3 and dtype == np.dtype(\"uint8\"))\n\n\nclass PimsImages:\n dtype: DTypeLike\n num_channels: int\n\n def __init__(\n self,\n images: Union[str, Path, \"pims.FramesSequence\", List[Union[str, PathLike]]],\n channel: Optional[int],\n timepoint: Optional[int],\n czi_channel: Optional[int],\n swap_xy: bool,\n flip_x: bool,\n flip_y: bool,\n flip_z: bool,\n use_bioformats: Optional[bool],\n is_segmentation: bool,\n ) -> None:\n \"\"\"\n During initialization the pims objects are examined and configured to produce\n ndarrays that follow the following form:\n (self._iter_axes, *self._bundle_axis)\n self._iter_axes can be a list of different axes or an empty list if the image is 2D.\n In the latter case, the inner 2D image is still wrapped in a single-element list\n by _open_images() to be consistent with 3D images.\n self._bundle_axis can consist of \"x\", \"y\" and \"c\", where \"c\" is optional and must be\n at the start or the end, so one of \"xy\", \"yx\", \"xyc\", \"yxc\", \"cxy\", \"cyx\".\n\n The part \"IDENTIFY AXIS ORDER\" figures out (self._iter_dim, *self._img_dims)\n from out-of-the-box pims images. Afterwards self._open_images() produces\n images consistent with those variables.\n\n The part \"IDENTIFY SHAPE & CHANNELS\" uses this information and the well-defined\n images to figure out the shape & num_channels.\n \"\"\"\n ## we use images as the name for the entered contextmanager,\n ## the `del` prevents any confusion with the passed argument.\n self._original_images = images\n del images\n\n ## arguments as inner attributes\n self._channel = channel\n self._timepoint = timepoint\n self._czi_channel = czi_channel\n self._swap_xy = swap_xy\n self._flip_x = flip_x\n self._flip_y = flip_y\n self._flip_z = flip_z\n self._use_bioformats = use_bioformats\n\n ## attributes that will be set in __init__()\n # _bundle_axes\n self._iter_axes = None\n self._iter_loop_size = None\n self._possible_layers = {}\n\n ## attributes only for pims.FramesSequenceND instances:\n # _default_coords\n # _init_c_axis\n\n ## attributes that will also be set in __init__()\n # dtype\n # num_channels\n # _first_n_channels\n\n #######################\n # IDENTIFY AXIS ORDER #\n #######################\n\n with self._open_images() as images:\n assert isinstance(\n images, pims.FramesSequence\n ), f\"{type(images)} does not inherit from pims.FramesSequence\"\n self.dtype = images.dtype\n\n if isinstance(images, pims.FramesSequenceND):\n self._default_coords = {}\n self._init_c_axis = False\n if isinstance(images, pims.imageio_reader.ImageIOReader):\n # bugfix for ImageIOReader which misses channel axis sometimes,\n # assuming channels come last. This might get fixed via\n # https://github.com/soft-matter/pims/pull/430\n if (\n len(images._shape) >= len(images.sizes)\n and \"c\" not in images.sizes\n ):\n images._init_axis(\"c\", images._shape[-1])\n self._init_c_axis = True\n\n if isinstance(images, PimsCziReader):\n available_czi_channels = images.available_czi_channels()\n if len(available_czi_channels) > 1:\n self._possible_layers[\"czi_channel\"] = available_czi_channels\n\n # An image slice should always consist of a 2D image. If there are multiple channels\n # the data of each channel is part of the image slices. Possible shapes of an image\n # slice are (#y_shape, #x_shape), (1, #y_shape, #x_shape) or (3, #y_shape, #x_shape).\n if images.sizes.get(\"c\", 1) > 1:\n self._bundle_axes = [\"c\", \"y\", \"x\"]\n else:\n if \"c\" in images.axes:\n # When c-axis is not in _bundle_axes and _iter_axes its value at coordinate 0\n # should be returned\n self._default_coords[\"c\"] = 0\n self._bundle_axes = [\"y\", \"x\"]\n\n # All other axes are used to iterate over them. The last one is iterated the fastest.\n self._iter_axes = list(\n set(images.axes).difference({*self._bundle_axes, \"c\", \"z\"})\n )\n if \"z\" in images.axes:\n self._iter_axes.append(\"z\")\n\n if self._timepoint is not None:\n # if a timepoint is given, PimsImages should only generate image slices for that timepoint\n if \"t\" in self._iter_axes:\n self._iter_axes.remove(\"t\")\n self._default_coords[\"t\"] = self._timepoint\n\n if len(self._iter_axes) > 1:\n iter_size = 1\n self._iter_loop_size = dict()\n for axis, other_axis in zip(\n self._iter_axes[-1:0:-1], self._iter_axes[-2::-1]\n ):\n # Creates a dict that contains the size of the loop for each axis\n # the axes are identified by their index in the _iter_axes list\n # the last axis is the fastest iterating axis, therfore the size of the loop\n # for the last axis is 1. For all other axes it is the product of all previous axes sizes.\n # self._iter_axes[-1:0:-1] is a reversed copy of self._iter_axes without the last element\n # e.g. [1,2,3,4] -> [4,3,2]\n # self._iter_axes[-2::-1] is a reversed copy of self._iter_axes without the first element\n # e.g. [1,2,3,4] -> [3,2,1]\n self._iter_loop_size[other_axis] = (\n iter_size := iter_size * images.sizes[axis]\n )\n\n else:\n # Fallback for generic pims classes that do not name their\n # dimensions as pims.FramesSequenceND does:\n\n _allow_channels_first = not is_segmentation\n if isinstance(images, (pims.ImageSequence, pims.ReaderSequence)):\n _allow_channels_first = False\n\n if len(images.shape) == 2:\n # Assume yx\n self._bundle_axes = [\"y\", \"x\"]\n self._iter_axes = []\n elif len(images.shape) == 3:\n # Assume yxc, cyx or zyx\n if _assume_color_channel(images.shape[2], images.dtype):\n self._bundle_axes = [\"y\", \"x\", \"c\"]\n self._iter_axes = []\n elif images.shape[0] == 1 or (\n _allow_channels_first\n and _assume_color_channel(images.shape[0], images.dtype)\n ):\n self._bundle_axes = [\"c\", \"y\", \"x\"]\n self._iter_axes = []\n else:\n self._bundle_axes = [\"y\", \"x\"]\n self._iter_axes = [\"z\"]\n elif len(images.shape) == 4:\n # Assume zcyx or zyxc\n if images.shape[1] == 1 or _assume_color_channel(\n images.shape[1], images.dtype\n ):\n self._bundle_axes = [\"c\", \"y\", \"x\"]\n else:\n self._bundle_axes = [\"y\", \"x\", \"c\"]\n self._iter_axes = [\"z\"]\n elif len(images.shape) == 5:\n # Assume tzcyx or tzyxc\n # t has to be constant for this reader to obtain 4D image\n # (only possible if not specified manually already, since\n # the timepoint would already be indexed here and the\n # 5th dimension would be something else)\n if timepoint is not None:\n raise RuntimeError(\n f\"Got {len(images.shape)} axes for the images after \"\n + \"removing time dimension, can only map to 3D+channels.\"\n + \"To import image with more dimensions use dataformat\"\n + \"Zarr3 and set use_bioformats=True.\"\n )\n\n if _assume_color_channel(images.shape[2], images.dtype):\n self._bundle_axes = [\"c\", \"y\", \"x\"]\n else:\n self._bundle_axes = [\"y\", \"x\", \"c\"]\n self._iter_axes = [\"z\"]\n self._timepoint = 0\n if images.shape[0] > 1:\n self._possible_layers[\"timepoint\"] = list(\n range(0, images.shape[0])\n )\n else:\n raise RuntimeError(\n f\"Got {len(images.shape)} axes for the images, \"\n + \"but don't have axes information. Try to open \"\n + \"an N-dimensional image file with use_bioformats=\"\n + \"True.\"\n )\n\n #########################\n # IDENTIFY NUM_CHANNELS #\n #########################\n\n with self._open_images() as images:\n try:\n c_index = self._bundle_axes.index(\"c\")\n if isinstance(images, list):\n images_shape = (len(images),) + cast(\n pims.FramesSequence, images[0]\n ).shape\n else:\n images_shape = images.shape # pylint: disable=no-member\n\n self.num_channels = images_shape[c_index + 1]\n\n except ValueError:\n self.num_channels = 1\n\n self._first_n_channels = None\n if self._channel is not None:\n assert (\n self._channel < self.num_channels\n ), f\"Selected channel {self._channel} (0-indexed), but only {self.num_channels} channels are available.\"\n self.num_channels = 1\n else:\n if self.num_channels == 2:\n self._possible_layers[\"channel\"] = [0, 1]\n self.num_channels = 1\n self._channel = 0\n elif self.num_channels > 3:\n self._possible_layers[\"channel\"] = list(range(0, self.num_channels))\n self.num_channels = 3\n self._first_n_channels = 3\n\n def _normalize_original_images(self) -> Union[str, List[str]]:\n original_images = self._original_images\n if isinstance(original_images, (str, Path)):\n original_images_path = Path(original_images)\n if original_images_path.is_dir():\n valid_suffixes = get_valid_pims_suffixes()\n if self._use_bioformats is not False:\n valid_suffixes.update(get_valid_bioformats_suffixes())\n original_images = natsorted(\n str(i)\n for i in original_images_path.glob(\"**/*\")\n if i.is_file() and i.suffix.lstrip(\".\") in valid_suffixes\n )\n if len(original_images) == 1:\n original_images = original_images[0]\n if isinstance(original_images, str):\n return original_images\n elif isinstance(original_images, Iterable):\n return [str(i) for i in original_images]\n else:\n return str(original_images)\n\n def _ensure_correct_bioformats_usage(\n self, images_context_manager: pims.FramesSequence\n ) -> None:\n if (\n isinstance(images_context_manager, pims.bioformats.BioformatsReader)\n and self._use_bioformats is False\n ): # None is allowed\n raise RuntimeError(\n \"Selected bioformats reader, but using bioformats is not allowed \"\n + \"(use_bioformats is False).\"\n )\n\n def _try_open_pims_images(\n self, original_images: Union[str, List[str]], exceptions: List[Exception]\n ) -> Optional[pims.FramesSequence]:\n if self._use_bioformats:\n return None\n\n open_kwargs = {}\n if self._czi_channel is not None:\n open_kwargs[\"czi_channel\"] = self._czi_channel\n\n # try normal pims.open\n def strategy_0() -> pims.FramesSequence:\n result = pims.open(original_images, **open_kwargs)\n self._ensure_correct_bioformats_usage(original_images)\n return result\n\n # try pims.ImageSequence, which uses skimage internally but works for multiple images\n strategy_1 = lambda: pims.ImageSequence(original_images) # noqa: E731 Do not assign a `lambda` expression, use a `def`\n\n # for image lists, try to guess the correct reader using only the first image,\n # and apply that for all images via pims.ReaderSequence\n def strategy_2() -> Optional[pims.FramesSequence]:\n if isinstance(original_images, list):\n # assuming the same reader works for all images:\n first_image_handler = pims.open(original_images[0], **open_kwargs)\n self._ensure_correct_bioformats_usage(first_image_handler)\n return pims.ReaderSequence(\n original_images, type(first_image_handler), **open_kwargs\n )\n else:\n return None\n\n for strategy in [strategy_0, strategy_1, strategy_2]:\n try:\n images_context_manager = strategy()\n except Exception as e: # noqa: PERF203 `try`-`except` within a loop incurs performance overhead\n exceptions.append(e)\n else:\n if images_context_manager is not None:\n return images_context_manager\n return None\n\n def _try_open_bioformats_images_raw(\n self,\n original_images: Union[str, List[str]],\n exceptions: List[Exception],\n ) -> Optional[pims.FramesSequence]:\n try:\n if self._use_bioformats is False: # None is allowed\n raise RuntimeError(\n \"Using bioformats is not allowed (use_bioformats is False).\"\n )\n\n # There is a wrong warning about jpype, supressing it here.\n # See issue https://github.com/soft-matter/pims/issues/384\n warnings.filterwarnings(\n \"ignore\",\n \"Due to an issue with JPype 0.6.0, reading is slower.*\",\n category=UserWarning,\n module=\"pims.bioformats\",\n )\n try:\n pims.bioformats._find_jar()\n except HTTPError:\n # We cannot use the newest bioformats version,\n # since it does not include the necessary loci_tools.jar.\n # Updates to support newer bioformats jars with pims are in PR\n # https://github.com/soft-matter/pims/pull/403\n\n # This is also part of the worker dockerfile to cache the\n # jar in the image, please update Dockerfile.worker in the\n # voxelytics repo accordingly when editing this.\n pims.bioformats.download_jar(version=\"6.7.0\")\n\n if \"*\" in str(original_images) or isinstance(original_images, list):\n return pims.ReaderSequence(\n original_images, pims.bioformats.BioformatsReader\n )\n else:\n return pims.bioformats.BioformatsReader(original_images)\n except Exception as e:\n exceptions.append(e)\n return None\n\n @contextmanager\n def _open_images(\n self,\n ) -> Iterator[Union[pims.FramesSequence, List[pims.FramesSequence]]]:\n \"\"\"\n This yields well-defined images of the form (self._iter_axes, *self._bundle_axes),\n after IDENTIFY AXIS ORDER of __init__() has run.\n For a 2D image this is achieved by wrapping it in a list.\n \"\"\"\n images_context_manager: Optional[ContextManager]\n with warnings.catch_warnings():\n if isinstance(self._original_images, pims.FramesSequenceND):\n images_context_manager = nullcontext(enter_result=self._original_images)\n else:\n exceptions: List[Exception] = []\n original_images = self._normalize_original_images()\n images_context_manager = None\n\n images_context_manager = self._try_open_pims_images(\n original_images, exceptions\n )\n\n if images_context_manager is None:\n images_context_manager = self._try_open_bioformats_images_raw(\n original_images, exceptions\n )\n\n if images_context_manager is None:\n if len(exceptions) == 1:\n raise exceptions[0]\n else:\n exceptions_str = \"\\n\".join(\n f\"{type(e).__name__}: {str(e)}\" for e in exceptions\n )\n raise ValueError(\n f\"Tried to open the images {self._original_images} with different methods, \"\n + f\"none succeded. The following errors were raised:\\n{exceptions_str}\"\n )\n\n with images_context_manager as images:\n if isinstance(images, pims.FramesSequenceND):\n if hasattr(self, \"_bundle_axes\"):\n # first part of __init__() has happened\n images.default_coords.update(self._default_coords)\n if self._init_c_axis and \"c\" not in images.sizes:\n # Bugfix for ImageIOReader which misses channel axis sometimes,\n # assuming channels come last. _init_c_axis is set in __init__().\n # This might get fixed via https://github.com/soft-matter/pims/pull/430\n images._init_axis(\"c\", images._shape[-1])\n for key in list(images._get_frame_dict.keys()):\n images._get_frame_dict[key + (\"c\",)] = (\n images._get_frame_dict.pop(key)\n )\n self._bundle_axes.remove(\"c\")\n self._bundle_axes.append(\"c\")\n images.bundle_axes = self._bundle_axes\n images.iter_axes = self._iter_axes\n else:\n if self._timepoint is not None:\n images = images[self._timepoint]\n if self._iter_axes and \"t\" in self._iter_axes:\n self._iter_axes.remove(\"t\")\n if self._iter_axes == []:\n # add outer list to wrap 2D images as 3D-like structure\n images = [images]\n yield images\n\n def copy_to_view(\n self,\n args: Union[BoundingBox, NDBoundingBox],\n mag_view: MagView,\n is_segmentation: bool,\n dtype: Optional[DTypeLike] = None,\n ) -> Tuple[Tuple[int, int], Optional[int]]:\n \"\"\"Copies the images according to the passed arguments to the given mag_view.\n args is expected to be a (ND)BoundingBox the start and end of the z-range, meant for usage with an executor.\n copy_to_view returns an iterable of image shapes and largest segment ids. When using this\n method a manual update of the bounding box and the largest segment id might be necessary.\n \"\"\"\n absolute_bbox = args\n relative_bbox = absolute_bbox.offset(-mag_view.bounding_box.topleft)\n\n assert all(\n size == 1\n for size, axis in zip(absolute_bbox.size, absolute_bbox.axes)\n if axis not in (\"x\", \"y\", \"z\")\n ), \"The delivered BoundingBox has to be flat except for x,y and z dimension.\"\n\n # z_start and z_end are relative to the bounding box of the mag_view\n # to access the correct data from the images\n z_start, z_end = relative_bbox.get_bounds(\"z\")\n shapes = []\n max_id: Optional[int]\n if is_segmentation:\n max_id = 0\n else:\n max_id = None\n\n with self._open_images() as images:\n if self._iter_axes is not None and self._iter_loop_size is not None:\n # select the range of images that represents one xyz combination in the mag_view\n lower_bounds = sum(\n self._iter_loop_size[axis_name]\n * relative_bbox.get_bounds(axis_name)[0]\n for axis_name in self._iter_axes[:-1]\n )\n upper_bounds = lower_bounds + mag_view.bounding_box.get_shape(\"z\")\n images = images[lower_bounds:upper_bounds]\n if self._flip_z:\n images = images[::-1] # pylint: disable=unsubscriptable-object\n\n with mag_view.get_buffered_slice_writer(\n # Previously only z_start and its end were important, now the slice writer needs to know\n # which axis is currently written.\n absolute_bounding_box=absolute_bbox,\n buffer_size=absolute_bbox.get_shape(\"z\"),\n # copy_to_view is typically used in a multiprocessing-context. Therefore the\n # buffered slice writer should not update the json file to avoid race conditions.\n json_update_allowed=False,\n ) as writer:\n for image_slice in images[z_start:z_end]:\n image_slice = np.array(image_slice)\n # place channels first\n if \"c\" in self._bundle_axes:\n if hasattr(self, \"_init_c_axis\") and self._init_c_axis:\n # Bugfix for ImageIOReader which misses channel axis sometimes,\n # assuming channels come last. _init_c_axis is set in __init__().\n # This might get fixed via\n image_slice = image_slice[0]\n image_slice = np.moveaxis(\n image_slice,\n source=self._bundle_axes.index(\"c\"),\n destination=0,\n )\n if self._channel is not None:\n image_slice = image_slice[self._channel : self._channel + 1]\n elif self._first_n_channels is not None:\n image_slice = image_slice[: self._first_n_channels]\n assert image_slice.shape[0] == self.num_channels, (\n f\"Image shape {image_slice.shape} does not fit to the number of channels \"\n + f\"{self.num_channels} which are expected in the first axis.\"\n )\n\n if self._flip_x:\n image_slice = np.flip(image_slice, -2)\n if self._flip_y:\n image_slice = np.flip(image_slice, -1)\n\n if dtype is not None:\n image_slice = image_slice.astype(dtype, order=\"F\")\n\n if max_id is not None:\n max_id = max(max_id, image_slice.max())\n\n if self._swap_xy is False:\n image_slice = np.moveaxis(image_slice, -1, -2)\n\n shapes.append(image_slice.shape[-2:])\n writer.send(image_slice)\n\n return dimwise_max(shapes), None if max_id is None else int(max_id)\n\n def get_possible_layers(self) -> Optional[Dict[\"str\", List[int]]]:\n if len(self._possible_layers) == 0:\n return None\n else:\n return self._possible_layers\n\n @property\n def expected_bbox(self) -> NDBoundingBox:\n # replaces the previous expected_shape to enable n-dimensional input files\n with self._open_images() as images:\n if isinstance(images, pims.FramesSequenceND):\n axes = images.axes\n images_shape = tuple(images.sizes[axis] for axis in axes)\n else:\n if isinstance(images, list):\n images_shape = (len(images),) + cast(\n pims.FramesSequence, images[0]\n ).shape\n\n else:\n images_shape = images.shape # pylint: disable=no-member\n if len(images_shape) == 3:\n axes = (\"z\", \"y\", \"x\")\n else:\n axes = (\"z\", \"c\", \"y\", \"x\")\n\n if self._iter_loop_size is None:\n # There is no or only one element in self._iter_axes, so a 3D bounding box is sufficient.\n x_index, y_index = (\n axes.index(\"x\"),\n axes.index(\"y\"),\n )\n if self._iter_axes:\n try:\n # In case the naming of the third axis is not \"z\",\n # it is still considered as the z-axis.\n z_index = axes.index(self._iter_axes[0])\n except ValueError:\n z_index = axes.index(\"z\")\n z_shape = images_shape[z_index]\n else:\n z_shape = 1\n if self._swap_xy:\n x_index, y_index = y_index, x_index\n return BoundingBox(\n (0, 0, 0),\n (images_shape[x_index], images_shape[y_index], z_shape),\n )\n else:\n if isinstance(images, pims.FramesSequenceND):\n axes_names = (self._iter_axes or []) + [\n axis for axis in self._bundle_axes if axis != \"c\"\n ]\n axes_sizes = [\n images.sizes[axis] # pylint: disable=no-member\n for axis in axes_names\n ]\n axes_index = list(range(1, len(axes_names) + 1))\n topleft = VecInt.zeros(tuple(axes_names))\n\n if self._swap_xy:\n x_index, y_index = axes_names.index(\"x\"), axes_names.index(\"y\")\n axes_sizes[x_index], axes_sizes[y_index] = (\n axes_sizes[y_index],\n axes_sizes[x_index],\n )\n\n return NDBoundingBox(\n topleft,\n VecInt(axes_sizes, axes=axes_names),\n axes_names,\n VecInt(axes_index, axes=axes_names),\n )\n\n raise ValueError(\n \"It seems as if you try to load an N-dimensional image from 2D images. This is currently not supported.\"\n )\n\n\nT = TypeVar(\"T\", bound=Tuple[int, ...])\n\n\ndef dimwise_max(vectors: Sequence[T]) -> T:\n if len(vectors) == 1:\n return vectors[0]\n else:\n return cast(T, tuple(map(max, *vectors)))\n\n\nC = TypeVar(\"C\", bound=Type)\n\n\ndef _recursive_subclasses(cls: C) -> List[C]:\n \"Return all subclasses (and their subclasses, etc.).\"\n # Source: http://stackoverflow.com/a/3862957/1221924\n return cls.__subclasses__() + [\n g for s in cls.__subclasses__() for g in _recursive_subclasses(s)\n ]\n\n\ndef _get_all_pims_handlers() -> (\n Iterable[Type[Union[pims.FramesSequence, pims.FramesSequenceND]]]\n):\n return chain(\n _recursive_subclasses(pims.FramesSequence),\n _recursive_subclasses(pims.FramesSequenceND),\n )\n\n\ndef get_valid_pims_suffixes() -> Set[str]:\n valid_suffixes = set()\n for pims_handler in _get_all_pims_handlers():\n valid_suffixes.update(pims_handler.class_exts())\n return valid_suffixes\n\n\ndef get_valid_bioformats_suffixes() -> Set[str]:\n # Added the most present suffixes that are implemented in bioformats\n return {\n \"dcm\",\n \"dicom\",\n \"ics\",\n \"ids\",\n \"lei\",\n \"tif\",\n \"lif\",\n \"stk\",\n \"nd\",\n \"nd2\",\n \"png\",\n \"tiff\",\n \"tf2\",\n \"tf8\",\n \"btf\",\n \"pic\",\n \"raw\",\n \"xml\",\n \"gif\",\n }\n\n\ndef has_image_z_dimension(\n filepath: Path,\n use_bioformats: Optional[bool],\n is_segmentation: bool,\n) -> bool:\n pims_images = PimsImages(\n filepath,\n use_bioformats=use_bioformats,\n is_segmentation=is_segmentation,\n # the following arguments shouldn't matter much for the Dataset.from_images method:\n channel=None,\n timepoint=None,\n czi_channel=None,\n swap_xy=False,\n flip_x=False,\n flip_y=False,\n flip_z=False,\n )\n\n return pims_images.expected_bbox.get_shape(\"z\") > 1\n", "path": "webknossos/webknossos/dataset/_utils/pims_images.py" } ]
[ { "content": "import warnings\nfrom contextlib import contextmanager, nullcontext\nfrom itertools import chain\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import (\n ContextManager,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n)\nfrom urllib.error import HTTPError\n\nimport numpy as np\nfrom natsort import natsorted\nfrom numpy.typing import DTypeLike\n\nfrom webknossos.geometry.bounding_box import BoundingBox\nfrom webknossos.geometry.nd_bounding_box import NDBoundingBox\n\n# pylint: disable=unused-import\ntry:\n from .pims_czi_reader import PimsCziReader\nexcept ImportError:\n PimsCziReader = type(None) # type: ignore[misc,assignment]\n\ntry:\n from .pims_dm_readers import ( # noqa: F401 unused-import\n PimsDm3Reader,\n PimsDm4Reader,\n )\nexcept ImportError:\n pass\n\ntry:\n from .pims_imagej_tiff_reader import ( # noqa: F401 unused-import\n PimsImagejTiffReader,\n )\nexcept ImportError:\n pass\n\n\nfrom ...geometry.vec_int import VecInt\nfrom ..mag_view import MagView\n\ntry:\n import pims\nexcept ImportError as import_error:\n raise RuntimeError(\n \"Cannot import pims, please install it e.g. using 'webknossos[all]'\"\n ) from import_error\n\n\n# Fix ImageIOReader not handling channels correctly. This might get fixed via\n# https://github.com/soft-matter/pims/pull/430\npims.ImageIOReader.frame_shape = pims.FramesSequenceND.frame_shape\n\n\ndef _assume_color_channel(dim_size: int, dtype: np.dtype) -> bool:\n return dim_size == 1 or (dim_size == 3 and dtype == np.dtype(\"uint8\"))\n\n\nclass PimsImages:\n dtype: DTypeLike\n num_channels: int\n\n def __init__(\n self,\n images: Union[str, Path, \"pims.FramesSequence\", List[Union[str, PathLike]]],\n channel: Optional[int],\n timepoint: Optional[int],\n czi_channel: Optional[int],\n swap_xy: bool,\n flip_x: bool,\n flip_y: bool,\n flip_z: bool,\n use_bioformats: Optional[bool],\n is_segmentation: bool,\n ) -> None:\n \"\"\"\n During initialization the pims objects are examined and configured to produce\n ndarrays that follow the following form:\n (self._iter_axes, *self._bundle_axis)\n self._iter_axes can be a list of different axes or an empty list if the image is 2D.\n In the latter case, the inner 2D image is still wrapped in a single-element list\n by _open_images() to be consistent with 3D images.\n self._bundle_axis can consist of \"x\", \"y\" and \"c\", where \"c\" is optional and must be\n at the start or the end, so one of \"xy\", \"yx\", \"xyc\", \"yxc\", \"cxy\", \"cyx\".\n\n The part \"IDENTIFY AXIS ORDER\" figures out (self._iter_dim, *self._img_dims)\n from out-of-the-box pims images. Afterwards self._open_images() produces\n images consistent with those variables.\n\n The part \"IDENTIFY SHAPE & CHANNELS\" uses this information and the well-defined\n images to figure out the shape & num_channels.\n \"\"\"\n ## we use images as the name for the entered contextmanager,\n ## the `del` prevents any confusion with the passed argument.\n self._original_images = images\n del images\n\n ## arguments as inner attributes\n self._channel = channel\n self._timepoint = timepoint\n self._czi_channel = czi_channel\n self._swap_xy = swap_xy\n self._flip_x = flip_x\n self._flip_y = flip_y\n self._flip_z = flip_z\n self._use_bioformats = use_bioformats\n\n ## attributes that will be set in __init__()\n # _bundle_axes\n self._iter_axes = None\n self._iter_loop_size = None\n self._possible_layers = {}\n\n ## attributes only for pims.FramesSequenceND instances:\n # _default_coords\n # _init_c_axis\n\n ## attributes that will also be set in __init__()\n # dtype\n # num_channels\n # _first_n_channels\n\n #######################\n # IDENTIFY AXIS ORDER #\n #######################\n\n with self._open_images() as images:\n assert isinstance(\n images, pims.FramesSequence\n ), f\"{type(images)} does not inherit from pims.FramesSequence\"\n self.dtype = images.dtype\n\n if isinstance(images, pims.FramesSequenceND):\n self._default_coords = {}\n self._init_c_axis = False\n if isinstance(images, pims.imageio_reader.ImageIOReader):\n # bugfix for ImageIOReader which misses channel axis sometimes,\n # assuming channels come last. This might get fixed via\n # https://github.com/soft-matter/pims/pull/430\n if (\n len(images._shape) >= len(images.sizes)\n and \"c\" not in images.sizes\n ):\n images._init_axis(\"c\", images._shape[-1])\n self._init_c_axis = True\n\n if isinstance(images, PimsCziReader):\n available_czi_channels = images.available_czi_channels()\n if len(available_czi_channels) > 1:\n self._possible_layers[\"czi_channel\"] = available_czi_channels\n\n # An image slice should always consist of a 2D image. If there are multiple channels\n # the data of each channel is part of the image slices. Possible shapes of an image\n # slice are (#y_shape, #x_shape), (1, #y_shape, #x_shape) or (3, #y_shape, #x_shape).\n if images.sizes.get(\"c\", 1) > 1:\n self._bundle_axes = [\"c\", \"y\", \"x\"]\n else:\n if \"c\" in images.axes:\n # When c-axis is not in _bundle_axes and _iter_axes its value at coordinate 0\n # should be returned\n self._default_coords[\"c\"] = 0\n self._bundle_axes = [\"y\", \"x\"]\n\n # All other axes are used to iterate over them. The last one is iterated the fastest.\n self._iter_axes = list(\n set(images.axes).difference({*self._bundle_axes, \"c\", \"z\"})\n )\n if \"z\" in images.axes:\n self._iter_axes.append(\"z\")\n\n if self._timepoint is not None:\n # if a timepoint is given, PimsImages should only generate image slices for that timepoint\n if \"t\" in self._iter_axes:\n self._iter_axes.remove(\"t\")\n self._default_coords[\"t\"] = self._timepoint\n\n if len(self._iter_axes) > 1:\n iter_size = 1\n self._iter_loop_size = dict()\n for axis, other_axis in zip(\n self._iter_axes[-1:0:-1], self._iter_axes[-2::-1]\n ):\n # Creates a dict that contains the size of the loop for each axis\n # the axes are identified by their index in the _iter_axes list\n # the last axis is the fastest iterating axis, therfore the size of the loop\n # for the last axis is 1. For all other axes it is the product of all previous axes sizes.\n # self._iter_axes[-1:0:-1] is a reversed copy of self._iter_axes without the last element\n # e.g. [1,2,3,4] -> [4,3,2]\n # self._iter_axes[-2::-1] is a reversed copy of self._iter_axes without the first element\n # e.g. [1,2,3,4] -> [3,2,1]\n self._iter_loop_size[other_axis] = (\n iter_size := iter_size * images.sizes[axis]\n )\n\n else:\n # Fallback for generic pims classes that do not name their\n # dimensions as pims.FramesSequenceND does:\n\n _allow_channels_first = not is_segmentation\n if isinstance(images, (pims.ImageSequence, pims.ReaderSequence)):\n _allow_channels_first = False\n\n if len(images.shape) == 2:\n # Assume yx\n self._bundle_axes = [\"y\", \"x\"]\n self._iter_axes = []\n elif len(images.shape) == 3:\n # Assume yxc, cyx or zyx\n if _assume_color_channel(images.shape[2], images.dtype):\n self._bundle_axes = [\"y\", \"x\", \"c\"]\n self._iter_axes = []\n elif images.shape[0] == 1 or (\n _allow_channels_first\n and _assume_color_channel(images.shape[0], images.dtype)\n ):\n self._bundle_axes = [\"c\", \"y\", \"x\"]\n self._iter_axes = []\n else:\n self._bundle_axes = [\"y\", \"x\"]\n self._iter_axes = [\"z\"]\n elif len(images.shape) == 4:\n # Assume zcyx or zyxc\n if images.shape[1] == 1 or _assume_color_channel(\n images.shape[1], images.dtype\n ):\n self._bundle_axes = [\"c\", \"y\", \"x\"]\n else:\n self._bundle_axes = [\"y\", \"x\", \"c\"]\n self._iter_axes = [\"z\"]\n elif len(images.shape) == 5:\n # Assume tzcyx or tzyxc\n # t has to be constant for this reader to obtain 4D image\n # (only possible if not specified manually already, since\n # the timepoint would already be indexed here and the\n # 5th dimension would be something else)\n if timepoint is not None:\n raise RuntimeError(\n f\"Got {len(images.shape)} axes for the images after \"\n + \"removing time dimension, can only map to 3D+channels.\"\n + \"To import image with more dimensions use dataformat\"\n + \"Zarr3 and set use_bioformats=True.\"\n )\n\n if _assume_color_channel(images.shape[2], images.dtype):\n self._bundle_axes = [\"c\", \"y\", \"x\"]\n else:\n self._bundle_axes = [\"y\", \"x\", \"c\"]\n self._iter_axes = [\"z\"]\n self._timepoint = 0\n if images.shape[0] > 1:\n self._possible_layers[\"timepoint\"] = list(\n range(0, images.shape[0])\n )\n else:\n raise RuntimeError(\n f\"Got {len(images.shape)} axes for the images, \"\n + \"but don't have axes information. Try to open \"\n + \"an N-dimensional image file with use_bioformats=\"\n + \"True.\"\n )\n\n #########################\n # IDENTIFY NUM_CHANNELS #\n #########################\n\n with self._open_images() as images:\n try:\n c_index = self._bundle_axes.index(\"c\")\n if isinstance(images, list):\n images_shape = (len(images),) + cast(\n pims.FramesSequence, images[0]\n ).shape\n else:\n images_shape = images.shape # pylint: disable=no-member\n\n self.num_channels = images_shape[c_index + 1]\n\n except ValueError:\n self.num_channels = 1\n\n self._first_n_channels = None\n if self._channel is not None:\n assert (\n self._channel < self.num_channels\n ), f\"Selected channel {self._channel} (0-indexed), but only {self.num_channels} channels are available.\"\n self.num_channels = 1\n else:\n if self.num_channels == 2:\n self._possible_layers[\"channel\"] = [0, 1]\n self.num_channels = 1\n self._channel = 0\n elif self.num_channels > 3:\n self._possible_layers[\"channel\"] = list(range(0, self.num_channels))\n self.num_channels = 3\n self._first_n_channels = 3\n\n def _normalize_original_images(self) -> Union[str, List[str]]:\n original_images = self._original_images\n if isinstance(original_images, (str, Path)):\n original_images_path = Path(original_images)\n if original_images_path.is_dir():\n valid_suffixes = get_valid_pims_suffixes()\n if self._use_bioformats is not False:\n valid_suffixes.update(get_valid_bioformats_suffixes())\n original_images = natsorted(\n str(i)\n for i in original_images_path.glob(\"**/*\")\n if i.is_file() and i.suffix.lstrip(\".\") in valid_suffixes\n )\n if len(original_images) == 1:\n original_images = original_images[0]\n if isinstance(original_images, str):\n return original_images\n elif isinstance(original_images, Iterable):\n return [str(i) for i in original_images]\n else:\n return str(original_images)\n\n def _ensure_correct_bioformats_usage(\n self, images_context_manager: pims.FramesSequence\n ) -> None:\n if (\n isinstance(images_context_manager, pims.bioformats.BioformatsReader)\n and self._use_bioformats is False\n ): # None is allowed\n raise RuntimeError(\n \"Selected bioformats reader, but using bioformats is not allowed \"\n + \"(use_bioformats is False).\"\n )\n\n def _try_open_pims_images(\n self, original_images: Union[str, List[str]], exceptions: List[Exception]\n ) -> Optional[pims.FramesSequence]:\n if self._use_bioformats:\n return None\n\n open_kwargs = {}\n if self._czi_channel is not None:\n open_kwargs[\"czi_channel\"] = self._czi_channel\n\n # try normal pims.open\n def strategy_0() -> pims.FramesSequence:\n result = pims.open(original_images, **open_kwargs)\n self._ensure_correct_bioformats_usage(original_images)\n return result\n\n # try pims.ImageSequence, which uses skimage internally but works for multiple images\n strategy_1 = lambda: pims.ImageSequence(original_images) # noqa: E731 Do not assign a `lambda` expression, use a `def`\n\n # for image lists, try to guess the correct reader using only the first image,\n # and apply that for all images via pims.ReaderSequence\n def strategy_2() -> Optional[pims.FramesSequence]:\n if isinstance(original_images, list):\n # assuming the same reader works for all images:\n first_image_handler = pims.open(original_images[0], **open_kwargs)\n self._ensure_correct_bioformats_usage(first_image_handler)\n return pims.ReaderSequence(\n original_images, type(first_image_handler), **open_kwargs\n )\n else:\n return None\n\n for strategy in [strategy_0, strategy_1, strategy_2]:\n try:\n images_context_manager = strategy()\n except Exception as e: # noqa: PERF203 `try`-`except` within a loop incurs performance overhead\n exceptions.append(e)\n else:\n if images_context_manager is not None:\n return images_context_manager\n return None\n\n def _try_open_bioformats_images_raw(\n self,\n original_images: Union[str, List[str]],\n exceptions: List[Exception],\n ) -> Optional[pims.FramesSequence]:\n try:\n if self._use_bioformats is False: # None is allowed\n raise RuntimeError(\n \"Using bioformats is not allowed (use_bioformats is False).\"\n )\n\n # There is a wrong warning about jpype, supressing it here.\n # See issue https://github.com/soft-matter/pims/issues/384\n warnings.filterwarnings(\n \"ignore\",\n \"Due to an issue with JPype 0.6.0, reading is slower.*\",\n category=UserWarning,\n module=\"pims.bioformats\",\n )\n try:\n pims.bioformats._find_jar()\n except HTTPError:\n # We cannot use the newest bioformats version,\n # since it does not include the necessary loci_tools.jar.\n # Updates to support newer bioformats jars with pims are in PR\n # https://github.com/soft-matter/pims/pull/403\n\n # This is also part of the worker dockerfile to cache the\n # jar in the image, please update Dockerfile.worker in the\n # voxelytics repo accordingly when editing this.\n pims.bioformats.download_jar(version=\"6.7.0\")\n\n if \"*\" in str(original_images) or isinstance(original_images, list):\n return pims.ReaderSequence(\n original_images, pims.bioformats.BioformatsReader\n )\n else:\n return pims.bioformats.BioformatsReader(original_images)\n except Exception as e:\n exceptions.append(e)\n return None\n\n @contextmanager\n def _open_images(\n self,\n ) -> Iterator[Union[pims.FramesSequence, List[pims.FramesSequence]]]:\n \"\"\"\n This yields well-defined images of the form (self._iter_axes, *self._bundle_axes),\n after IDENTIFY AXIS ORDER of __init__() has run.\n For a 2D image this is achieved by wrapping it in a list.\n \"\"\"\n images_context_manager: Optional[ContextManager]\n with warnings.catch_warnings():\n if isinstance(self._original_images, pims.FramesSequenceND):\n images_context_manager = nullcontext(enter_result=self._original_images)\n else:\n exceptions: List[Exception] = []\n original_images = self._normalize_original_images()\n images_context_manager = None\n\n images_context_manager = self._try_open_pims_images(\n original_images, exceptions\n )\n\n if images_context_manager is None:\n images_context_manager = self._try_open_bioformats_images_raw(\n original_images, exceptions\n )\n\n if images_context_manager is None:\n if len(exceptions) == 1:\n raise exceptions[0]\n else:\n exceptions_str = \"\\n\".join(\n f\"{type(e).__name__}: {str(e)}\" for e in exceptions\n )\n raise ValueError(\n f\"Tried to open the images {self._original_images} with different methods, \"\n + f\"none succeded. The following errors were raised:\\n{exceptions_str}\"\n )\n\n with images_context_manager as images:\n if isinstance(images, pims.FramesSequenceND):\n if hasattr(self, \"_bundle_axes\"):\n # first part of __init__() has happened\n images.default_coords.update(self._default_coords)\n if self._init_c_axis and \"c\" not in images.sizes:\n # Bugfix for ImageIOReader which misses channel axis sometimes,\n # assuming channels come last. _init_c_axis is set in __init__().\n # This might get fixed via https://github.com/soft-matter/pims/pull/430\n images._init_axis(\"c\", images._shape[-1])\n for key in list(images._get_frame_dict.keys()):\n images._get_frame_dict[key + (\"c\",)] = (\n images._get_frame_dict.pop(key)\n )\n self._bundle_axes.remove(\"c\")\n self._bundle_axes.append(\"c\")\n images.bundle_axes = self._bundle_axes\n images.iter_axes = self._iter_axes\n else:\n if self._timepoint is not None:\n images = images[self._timepoint]\n if self._iter_axes and \"t\" in self._iter_axes:\n self._iter_axes.remove(\"t\")\n if self._iter_axes == []:\n # add outer list to wrap 2D images as 3D-like structure\n images = [images]\n yield images\n\n def copy_to_view(\n self,\n args: Union[BoundingBox, NDBoundingBox],\n mag_view: MagView,\n is_segmentation: bool,\n dtype: Optional[DTypeLike] = None,\n ) -> Tuple[Tuple[int, int], Optional[int]]:\n \"\"\"Copies the images according to the passed arguments to the given mag_view.\n args is expected to be a (ND)BoundingBox the start and end of the z-range, meant for usage with an executor.\n copy_to_view returns an iterable of image shapes and largest segment ids. When using this\n method a manual update of the bounding box and the largest segment id might be necessary.\n \"\"\"\n absolute_bbox = args\n relative_bbox = absolute_bbox.offset(-mag_view.bounding_box.topleft)\n\n assert all(\n size == 1\n for size, axis in zip(absolute_bbox.size, absolute_bbox.axes)\n if axis not in (\"x\", \"y\", \"z\")\n ), \"The delivered BoundingBox has to be flat except for x,y and z dimension.\"\n\n # z_start and z_end are relative to the bounding box of the mag_view\n # to access the correct data from the images\n z_start, z_end = relative_bbox.get_bounds(\"z\")\n shapes = []\n max_id: Optional[int]\n if is_segmentation:\n max_id = 0\n else:\n max_id = None\n\n with self._open_images() as images:\n if self._iter_axes is not None and self._iter_loop_size is not None:\n # select the range of images that represents one xyz combination in the mag_view\n lower_bounds = sum(\n self._iter_loop_size[axis_name]\n * relative_bbox.get_bounds(axis_name)[0]\n for axis_name in self._iter_axes[:-1]\n )\n upper_bounds = lower_bounds + mag_view.bounding_box.get_shape(\"z\")\n images = images[lower_bounds:upper_bounds]\n if self._flip_z:\n images = images[::-1] # pylint: disable=unsubscriptable-object\n\n with mag_view.get_buffered_slice_writer(\n # Previously only z_start and its end were important, now the slice writer needs to know\n # which axis is currently written.\n absolute_bounding_box=absolute_bbox,\n buffer_size=absolute_bbox.get_shape(\"z\"),\n # copy_to_view is typically used in a multiprocessing-context. Therefore the\n # buffered slice writer should not update the json file to avoid race conditions.\n json_update_allowed=False,\n ) as writer:\n for image_slice in images[z_start:z_end]:\n image_slice = np.array(image_slice)\n # place channels first\n if \"c\" in self._bundle_axes:\n if hasattr(self, \"_init_c_axis\") and self._init_c_axis:\n # Bugfix for ImageIOReader which misses channel axis sometimes,\n # assuming channels come last. _init_c_axis is set in __init__().\n # This might get fixed via\n image_slice = image_slice[0]\n image_slice = np.moveaxis(\n image_slice,\n source=self._bundle_axes.index(\"c\"),\n destination=0,\n )\n if self._channel is not None:\n image_slice = image_slice[self._channel : self._channel + 1]\n elif self._first_n_channels is not None:\n image_slice = image_slice[: self._first_n_channels]\n assert image_slice.shape[0] == self.num_channels, (\n f\"Image shape {image_slice.shape} does not fit to the number of channels \"\n + f\"{self.num_channels} which are expected in the first axis.\"\n )\n\n if self._flip_x:\n image_slice = np.flip(image_slice, -2)\n if self._flip_y:\n image_slice = np.flip(image_slice, -1)\n\n if dtype is not None:\n image_slice = image_slice.astype(dtype, order=\"F\")\n\n if max_id is not None:\n max_id = max(max_id, image_slice.max())\n\n if self._swap_xy is False:\n image_slice = np.moveaxis(image_slice, -1, -2)\n\n shapes.append(image_slice.shape[-2:])\n writer.send(image_slice)\n\n return dimwise_max(shapes), None if max_id is None else int(max_id)\n\n def get_possible_layers(self) -> Optional[Dict[\"str\", List[int]]]:\n if len(self._possible_layers) == 0:\n return None\n else:\n return self._possible_layers\n\n @property\n def expected_bbox(self) -> NDBoundingBox:\n # replaces the previous expected_shape to enable n-dimensional input files\n with self._open_images() as images:\n if isinstance(images, pims.FramesSequenceND):\n axes = images.axes\n images_shape = tuple(images.sizes[axis] for axis in axes)\n else:\n if isinstance(images, list):\n images_shape = (len(images),) + cast(\n pims.FramesSequence, images[0]\n ).shape\n\n else:\n images_shape = images.shape # pylint: disable=no-member\n if len(images_shape) == 3:\n axes = (\"z\", \"y\", \"x\")\n else:\n axes = (\"z\", \"c\", \"y\", \"x\")\n\n if self._iter_loop_size is None:\n # There is no or only one element in self._iter_axes, so a 3D bounding box is sufficient.\n x_index, y_index = (\n axes.index(\"x\"),\n axes.index(\"y\"),\n )\n if self._iter_axes:\n try:\n # In case the naming of the third axis is not \"z\",\n # it is still considered as the z-axis.\n z_index = axes.index(self._iter_axes[0])\n except ValueError:\n z_index = axes.index(\"z\")\n z_shape = images_shape[z_index]\n else:\n z_shape = 1\n if self._swap_xy:\n x_index, y_index = y_index, x_index\n return BoundingBox(\n (0, 0, 0),\n (images_shape[x_index], images_shape[y_index], z_shape),\n )\n else:\n if isinstance(images, pims.FramesSequenceND):\n axes_names = (self._iter_axes or []) + [\n axis for axis in self._bundle_axes if axis != \"c\"\n ]\n axes_sizes = [\n images.sizes[axis] # pylint: disable=no-member\n for axis in axes_names\n ]\n axes_index = list(range(1, len(axes_names) + 1))\n topleft = VecInt.zeros(tuple(axes_names))\n\n if self._swap_xy:\n x_index, y_index = axes_names.index(\"x\"), axes_names.index(\"y\")\n axes_sizes[x_index], axes_sizes[y_index] = (\n axes_sizes[y_index],\n axes_sizes[x_index],\n )\n\n return NDBoundingBox(\n topleft,\n VecInt(axes_sizes, axes=axes_names),\n axes_names,\n VecInt(axes_index, axes=axes_names),\n )\n\n raise ValueError(\n \"It seems as if you try to load an N-dimensional image from 2D images. This is currently not supported.\"\n )\n\n\nT = TypeVar(\"T\", bound=Tuple[int, ...])\n\n\ndef dimwise_max(vectors: Sequence[T]) -> T:\n if len(vectors) == 1:\n return vectors[0]\n else:\n return cast(T, tuple(map(max, *vectors)))\n\n\nC = TypeVar(\"C\", bound=Type)\n\n\ndef _recursive_subclasses(cls: C) -> List[C]:\n \"Return all subclasses (and their subclasses, etc.).\"\n # Source: http://stackoverflow.com/a/3862957/1221924\n return cls.__subclasses__() + [\n g for s in cls.__subclasses__() for g in _recursive_subclasses(s)\n ]\n\n\ndef _get_all_pims_handlers() -> (\n Iterable[Type[Union[pims.FramesSequence, pims.FramesSequenceND]]]\n):\n return chain(\n _recursive_subclasses(pims.FramesSequence),\n _recursive_subclasses(pims.FramesSequenceND),\n )\n\n\ndef get_valid_pims_suffixes() -> Set[str]:\n valid_suffixes = set()\n for pims_handler in _get_all_pims_handlers():\n valid_suffixes.update(pims_handler.class_exts())\n return valid_suffixes\n\n\ndef get_valid_bioformats_suffixes() -> Set[str]:\n # Added the most present suffixes that are implemented in bioformats\n return {\n \"dcm\",\n \"dicom\",\n \"ics\",\n \"ids\",\n \"lei\",\n \"tif\",\n \"lif\",\n \"stk\",\n \"nd\",\n \"nd2\",\n \"png\",\n \"tiff\",\n \"tf2\",\n \"tf8\",\n \"btf\",\n \"pic\",\n \"raw\",\n \"xml\",\n \"gif\",\n \"nii\",\n }\n\n\ndef has_image_z_dimension(\n filepath: Path,\n use_bioformats: Optional[bool],\n is_segmentation: bool,\n) -> bool:\n pims_images = PimsImages(\n filepath,\n use_bioformats=use_bioformats,\n is_segmentation=is_segmentation,\n # the following arguments shouldn't matter much for the Dataset.from_images method:\n channel=None,\n timepoint=None,\n czi_channel=None,\n swap_xy=False,\n flip_x=False,\n flip_y=False,\n flip_z=False,\n )\n\n return pims_images.expected_bbox.get_shape(\"z\") > 1\n", "path": "webknossos/webknossos/dataset/_utils/pims_images.py" } ]
diff --git a/webknossos/Changelog.md b/webknossos/Changelog.md index 2088cec64..b97583315 100644 --- a/webknossos/Changelog.md +++ b/webknossos/Changelog.md @@ -18,6 +18,7 @@ For upgrade instructions, please check the respective _Breaking Changes_ section ### Changed - Updated ruff to v0.4.0 [1047](https://github.com/scalableminds/webknossos-libs/pull/1047) +- Added NIfTI suffix .nii to list of supported bioformats suffixes. [#1048](https://github.com/scalableminds/webknossos-libs/pull/1048) ### Fixed diff --git a/webknossos/webknossos/dataset/_utils/pims_images.py b/webknossos/webknossos/dataset/_utils/pims_images.py index af9331789..c80044080 100644 --- a/webknossos/webknossos/dataset/_utils/pims_images.py +++ b/webknossos/webknossos/dataset/_utils/pims_images.py @@ -725,6 +725,7 @@ def get_valid_bioformats_suffixes() -> Set[str]: "raw", "xml", "gif", + "nii", }
kedro-org__kedro-2092
Release Kedro `0.18.4` ### Depends on: - Dataset issues - Spaceflights tutorial documentation - Open PRs related to datasets: - [x] https://github.com/kedro-org/kedro/pull/2082 - [x] https://github.com/kedro-org/kedro/pull/1746 - [x] https://github.com/kedro-org/kedro/pull/1992 - [x] https://github.com/kedro-org/kedro/pull/1865 - [x] https://github.com/kedro-org/kedro/pull/1312 - [x] https://github.com/kedro-org/kedro/pull/1844 - [x] https://github.com/kedro-org/kedro/pull/1962 - [x] https://github.com/kedro-org/kedro/pull/1964 - [x] https://github.com/kedro-org/kedro/pull/1931 - [x] https://github.com/kedro-org/kedro/pull/1587 For the above PRs: if it's nearly finished, but the author isn't responding, we as a team can take over and finish the PR. If the PR still needs a lot of work and the author isn't responding, I suggest we close it and ask them to re-open in the new `kedro-datasets` repo.
[ { "content": "\"\"\"Kedro is a framework that makes it easy to build robust and scalable\ndata pipelines by providing uniform project templates, data abstraction,\nconfiguration and pipeline assembly.\n\"\"\"\n\n__version__ = \"0.18.3\"\n\n\nimport logging\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "kedro/__init__.py" } ]
[ { "content": "\"\"\"Kedro is a framework that makes it easy to build robust and scalable\ndata pipelines by providing uniform project templates, data abstraction,\nconfiguration and pipeline assembly.\n\"\"\"\n\n__version__ = \"0.18.4\"\n\n\nimport logging\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "kedro/__init__.py" } ]
diff --git a/CITATION.cff b/CITATION.cff index 336c59be9c..a4ba6a6e0f 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -17,8 +17,14 @@ authors: given-names: Tynan - family-names: Hoang given-names: Lim +- family-names: Holzer + given-names: Jannic - family-names: Kanchwala given-names: Rashida +- family-names: Katiyar + given-names: Ankita +- family-names: Koh + given-names: Amanda - family-names: Mackay given-names: Andrew - family-names: Merali @@ -33,9 +39,11 @@ authors: given-names: Nero - family-names: Schwarzmann given-names: Joel +- family-names: Stichbury + given-names: Jo - family-names: Theisen given-names: Merel title: Kedro -version: 0.18.3 -date-released: 2022-09-20 +version: 0.18.4 +date-released: 2022-12-05 url: https://github.com/kedro-org/kedro diff --git a/RELEASE.md b/RELEASE.md index e7bcc363b5..6ab5217143 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -8,7 +8,15 @@ ## Migration guide from Kedro 0.18.* to 0.19.* -# Upcoming Release 0.18.4 +# Upcoming Release 0.18.5 + +## Major features and improvements + +## Bug fixes and other changes + +## Breaking changes to the API + +# Release 0.18.4 ## Major features and improvements * Make Kedro instantiate datasets from `kedro_datasets` with higher priority than `kedro.extras.datasets`. `kedro_datasets` is the namespace for the new `kedro-datasets` python package. @@ -33,7 +41,7 @@ * Updated Experiment Tracking docs with working examples. * Updated MatplotlibWriter Dataset, TextDataset, plotly.PlotlyDataSet and plotly.JSONDataSet docs with working examples. * Modified implementation of the Kedro IPython extension to use `local_ns` rather than a global variable. -* Refactored `ShelveStore` to it's own module to ensure multiprocessing works with it. +* Refactored `ShelveStore` to its own module to ensure multiprocessing works with it. * `kedro.extras.datasets.pandas.SQLQueryDataSet` now takes optional argument `execution_options`. * Removed `attrs` upper bound to support newer versions of Airflow. * Bumped the lower bound for the `setuptools` dependency to <=61.5.1. diff --git a/docs/source/deployment/databricks.md b/docs/source/deployment/databricks.md index 261545d3d7..ffab1c5c1b 100644 --- a/docs/source/deployment/databricks.md +++ b/docs/source/deployment/databricks.md @@ -34,7 +34,7 @@ conda create --name iris_databricks python=3.7 -y conda activate iris_databricks # install Kedro and create a new project -pip install "kedro~=0.18.3" +pip install "kedro~=0.18.4" # name your project Iris Databricks when prompted for it kedro new --starter pyspark-iris ``` @@ -169,10 +169,10 @@ In your newly-created notebook, put each of the below code snippets into a separ %sh rm -rf ~/projects/iris-databricks && git clone --single-branch --branch main https://${GITHUB_USER}:${GITHUB_TOKEN}@github.com/${GITHUB_USER}/<your-repo-name>.git ~/projects/iris-databricks ``` -* Install the latest version of Kedro compatible with version `0.18.3` +* Install the latest version of Kedro compatible with version `0.18.4` ```console -%pip install "kedro[spark.SparkDataSet]~=0.18.3" +%pip install "kedro[spark.SparkDataSet]~=0.18.4" ``` * Copy input data into DBFS diff --git a/docs/source/development/commands_reference.md b/docs/source/development/commands_reference.md index 679b0d16e0..4418450352 100644 --- a/docs/source/development/commands_reference.md +++ b/docs/source/development/commands_reference.md @@ -114,7 +114,7 @@ Returns output similar to the following, depending on the version of Kedro used | |/ / _ \/ _` | '__/ _ \ | < __/ (_| | | | (_) | |_|\_\___|\__,_|_| \___/ -v0.18.3 +v0.18.4 Kedro is a Python framework for creating reproducible, maintainable diff --git a/docs/source/extend_kedro/plugins.md b/docs/source/extend_kedro/plugins.md index 9ea7a59837..1167356ca5 100644 --- a/docs/source/extend_kedro/plugins.md +++ b/docs/source/extend_kedro/plugins.md @@ -84,7 +84,7 @@ setup( After that you can use this starter with `kedro new --starter=test_plugin_starter`. ```{note} -If your starter lives on a git repository, by default Kedro attempts to use a tag or branch labelled with your version of Kedro, e.g. `0.18.3.`. This means that you can host different versions of your starter template on the same repository, and the correct one will automatically be used. If you do not wish to follow this structure, you should override it with the `checkout` flag, e.g. `kedro new --starter=test_plugin_starter --checkout=main`. +If your starter lives on a git repository, by default Kedro attempts to use a tag or branch labelled with your version of Kedro, e.g. `0.18.4.`. This means that you can host different versions of your starter template on the same repository, and the correct one will automatically be used. If you do not wish to follow this structure, you should override it with the `checkout` flag, e.g. `kedro new --starter=test_plugin_starter --checkout=main`. ``` ## Working with `click` diff --git a/docs/source/tutorial/tutorial_template.md b/docs/source/tutorial/tutorial_template.md index f089a9361d..4764804c21 100644 --- a/docs/source/tutorial/tutorial_template.md +++ b/docs/source/tutorial/tutorial_template.md @@ -17,7 +17,7 @@ Don't forget to check the [tutorial FAQ](spaceflights_tutorial_faqs.md) if you r If you have not yet set up Kedro, do so by [following the guidelines to install Kedro](../get_started/install.md). ```{important} -We recommend that you use the same version of Kedro that was most recently used to test this tutorial (0.18.3). +We recommend that you use the same version of Kedro that was most recently used to test this tutorial (0.18.4). ``` In your terminal window, navigate to the folder you want to store the project and type the following to create an empty project: @@ -73,7 +73,7 @@ The dependencies above might be sufficient for some projects, but for this tutor Add the following lines to your `src/requirements.txt` file: ```text -kedro[pandas.CSVDataSet, pandas.ExcelDataSet, pandas.ParquetDataSet]==0.18.3 # Specify optional Kedro dependencies +kedro[pandas.CSVDataSet, pandas.ExcelDataSet, pandas.ParquetDataSet]==0.18.4 # Specify optional Kedro dependencies kedro-viz~=5.0 # Visualise your pipelines scikit-learn~=1.0 # For modelling in the data science pipeline ``` diff --git a/docs/source/visualisation/visualise_charts_with_plotly.md b/docs/source/visualisation/visualise_charts_with_plotly.md index b6dd8d56d5..427fe1ae2c 100644 --- a/docs/source/visualisation/visualise_charts_with_plotly.md +++ b/docs/source/visualisation/visualise_charts_with_plotly.md @@ -10,7 +10,7 @@ We have also used the Plotly integration to allow users to [visualise metrics fr You must update the `requirements.txt` file in your Kedro project and add the following datasets to enable Plotly for your project. -`kedro[plotly.PlotlyDataSet, plotly.JSONDataSet]==0.18.3` +`kedro[plotly.PlotlyDataSet, plotly.JSONDataSet]==0.18.4` You can view Plotly charts in Kedro-Viz when you use Kedro's plotly datasets. @@ -155,7 +155,7 @@ The MatplotlibWriter dataset converts Matplotlib objects to image files. This me You can view Matplotlib charts in Kedro-Viz when you use the [Kedro MatplotLibWriter dataset](/kedro.extras.datasets.matplotlib.MatplotlibWriter). You must update the `src/requirements.txt` file in your Kedro project by adding the following dataset to enable Matplotlib for your project: ``` -kedro[matplotlib.MatplotlibWriter]==0.18.3 +kedro[matplotlib.MatplotlibWriter]==0.18.4 ``` To use this dataset, configure your plot in your Kedro node. The below functions should be added to the `nodes.py` and `pipeline.py` files respectively. diff --git a/kedro/__init__.py b/kedro/__init__.py index f61ba5612c..f73b43a588 100644 --- a/kedro/__init__.py +++ b/kedro/__init__.py @@ -3,7 +3,7 @@ configuration and pipeline assembly. """ -__version__ = "0.18.3" +__version__ = "0.18.4" import logging
streamlit__streamlit-2570
URL markup does not get generated as a link # Summary URLs used to generate an anchor tag automatically in markup. Now it does not # Steps to reproduce Code snippet: ``` st.write(f""" As always, thank you to [all our contributors](https://github.com/streamlit/streamlit/graphs/contributors) who help make Streamlit awesome! --- ### Connect With Us - We can be found at https://streamlit.io and https://twitter.com/streamlit - Come by [the forums](https://discuss.streamlit.io/c/official-announcements/6) if you'd like to ask questions, post awesome apps, or just say hi! """) ``` ## Expected behavior: [0.73](https://share.streamlit.io/streamlit/release-demos/0.73/0.73/streamlit_app.py) ![image](https://user-images.githubusercontent.com/24946400/103850694-fb278900-5075-11eb-8052-1d8fa9a639a7.png) ## Actual behavior: [0.74](https://share.streamlit.io/streamlit/release-demos/0.74/0.74/streamlit_app.py) ![image](https://user-images.githubusercontent.com/24946400/103850623-b8fe4780-5075-11eb-9592-689366dcd06c.png) ## Is this a regression? Yes as of 0.74
[ { "content": "import os\nimport setuptools\nimport sys\n\nfrom setuptools.command.install import install\n\ntry:\n from pipenv.project import Project\n from pipenv.utils import convert_deps_to_pip\nexcept:\n exit_msg = (\n \"pipenv is required to package Streamlit. Please install pipenv and try again\"\n )\n sys.exit(exit_msg)\n\nVERSION = \"0.74.0\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\npipfile = Project(chdir=False).parsed_pipfile\n\npackages = pipfile[\"packages\"].copy()\nrequirements = convert_deps_to_pip(packages, r=False)\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n license=\"Apache 2\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=requirements,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py" } ]
[ { "content": "import os\nimport setuptools\nimport sys\n\nfrom setuptools.command.install import install\n\ntry:\n from pipenv.project import Project\n from pipenv.utils import convert_deps_to_pip\nexcept:\n exit_msg = (\n \"pipenv is required to package Streamlit. Please install pipenv and try again\"\n )\n sys.exit(exit_msg)\n\nVERSION = \"0.74.1\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\npipfile = Project(chdir=False).parsed_pipfile\n\npackages = pipfile[\"packages\"].copy()\nrequirements = convert_deps_to_pip(packages, r=False)\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n license=\"Apache 2\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=requirements,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py" } ]
diff --git a/docs/troubleshooting/sanity-checks.md b/docs/troubleshooting/sanity-checks.md index 583f54b1c20c..e51aeebd2de4 100644 --- a/docs/troubleshooting/sanity-checks.md +++ b/docs/troubleshooting/sanity-checks.md @@ -35,7 +35,7 @@ pip install --upgrade streamlit streamlit version ``` -...and then verify that the version number printed is `0.74.0`. +...and then verify that the version number printed is `0.74.1`. **Try reproducing the issue now.** If not fixed, keep reading on. diff --git a/e2e/specs/st_echo.spec.js b/e2e/specs/st_echo.spec.js index 55541520c476..3435eff7fc56 100644 --- a/e2e/specs/st_echo.spec.js +++ b/e2e/specs/st_echo.spec.js @@ -21,7 +21,8 @@ describe("st.echo", () => { }); it("echos a code", () => { - cy.get(".element-container .stMarkdown").contains( + cy.get(".element-container .stMarkdown").should( + "have.text", `print("This code is awesome!")` ); }); diff --git a/e2e/specs/st_markdown.spec.js b/e2e/specs/st_markdown.spec.js index 6941d08d53d9..3e588214a400 100644 --- a/e2e/specs/st_markdown.spec.js +++ b/e2e/specs/st_markdown.spec.js @@ -28,7 +28,7 @@ describe("st.markdown", () => { expect(els[2].textContent).to.eq("This HTML tag is not escaped!"); expect(els[3].textContent).to.eq("[text]"); expect(els[4].textContent).to.eq("link"); - expect(els[5].textContent).to.eq("[][]"); + expect(els[5].textContent).to.eq(""); expect(els[6].textContent).to.eq("Inline math with KATXE\\KaTeXKATE​X"); expect(els[7].textContent).to.eq( "ax2+bx+c=0ax^2 + bx + c = 0ax2+bx+c=0" diff --git a/frontend/package.json b/frontend/package.json index 9b80e6036417..8d0ffcfad9e0 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,6 +1,6 @@ { "name": "streamlit-browser", - "version": "0.74.0", + "version": "0.74.1", "private": true, "homepage": "./", "scripts": { @@ -79,17 +79,17 @@ "react-dropzone": "^11.2.0", "react-feather": "^2.0.8", "react-google-login": "^5.1.21", - "react-hotkeys": "^2.0.0", + "react-hotkeys": "^1.1.4", "react-json-view": "^1.19.1", "react-katex": "^2.0.2", "react-map-gl": "^5.2.7", - "react-markdown": "^5.0.3", + "react-markdown": "^4.3.1", "react-plotly.js": "^2.4.0", "react-transition-group": "^4.4.1", "react-virtualized": "^9.21.2", "react-window": "^1.8.5", "remark-emoji": "^2.1.0", - "remark-math": "^4.0.0", + "remark-math": "^2.0.1", "sprintf-js": "^1.1.2", "styletron-engine-atomic": "^1.4.6", "styletron-react": "^5.2.6", diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index fbaecc35f27a..a4f9c6b6b611 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -17,7 +17,7 @@ import React, { Fragment, PureComponent, ReactNode } from "react" import moment from "moment" -import { GlobalHotKeys, KeyMap } from "react-hotkeys" +import { HotKeys, KeyMap } from "react-hotkeys" import { fromJS } from "immutable" import classNames from "classnames" // Other local imports. @@ -922,6 +922,10 @@ export class App extends PureComponent<Props, State> { }) : null + // Attach and focused props provide a way to handle Global Hot Keys + // https://github.com/greena13/react-hotkeys/issues/41 + // attach: DOM element the keyboard listeners should attach to + // focused: A way to force focus behaviour return ( <PageLayoutContext.Provider value={{ @@ -933,7 +937,12 @@ export class App extends PureComponent<Props, State> { setFullScreen: this.handleFullScreen, }} > - <GlobalHotKeys keyMap={this.keyMap} handlers={this.keyHandlers}> + <HotKeys + keyMap={this.keyMap} + handlers={this.keyHandlers} + attach={window} + focused={true} + > <StyledApp className={outerDivClass}> {/* The tabindex below is required for testing. */} <Header> @@ -976,7 +985,7 @@ export class App extends PureComponent<Props, State> { /> {renderedDialog} </StyledApp> - </GlobalHotKeys> + </HotKeys> </PageLayoutContext.Provider> ) } diff --git a/frontend/src/components/core/StatusWidget/StatusWidget.tsx b/frontend/src/components/core/StatusWidget/StatusWidget.tsx index 11e62be47e4e..e61a09cfb9f6 100644 --- a/frontend/src/components/core/StatusWidget/StatusWidget.tsx +++ b/frontend/src/components/core/StatusWidget/StatusWidget.tsx @@ -18,7 +18,7 @@ import { EmotionIcon } from "@emotion-icons/emotion-icon" import { Ellipses, Info, Warning } from "@emotion-icons/open-iconic" import { RERUN_PROMPT_MODAL_DIALOG } from "lib/baseconsts" import React, { PureComponent, ReactNode } from "react" -import { GlobalHotKeys } from "react-hotkeys" +import { HotKeys } from "react-hotkeys" import { CSSTransition } from "react-transition-group" import Button, { Kind, Size } from "components/shared/Button" import Tooltip, { Placement } from "components/shared/Tooltip" @@ -343,8 +343,10 @@ class StatusWidget extends PureComponent<StatusWidgetProps, State> { this.props.reportRunState === ReportRunState.RERUN_REQUESTED const minimized = this.state.promptMinimized && !this.state.promptHovered + // Not sure exactly why attach and focused are necessary on the + // HotKeys component here but its not working without them return ( - <GlobalHotKeys handlers={this.keyHandlers}> + <HotKeys handlers={this.keyHandlers} attach={window} focused={true}> <div onMouseEnter={this.onReportPromptHover} onMouseLeave={this.onReportPromptUnhover} @@ -371,7 +373,7 @@ class StatusWidget extends PureComponent<StatusWidgetProps, State> { )} </StyledReportStatus> </div> - </GlobalHotKeys> + </HotKeys> ) } diff --git a/frontend/src/components/core/StreamlitDialog/ScriptChangedDialog.tsx b/frontend/src/components/core/StreamlitDialog/ScriptChangedDialog.tsx index edb095d52554..eadaca7f285a 100644 --- a/frontend/src/components/core/StreamlitDialog/ScriptChangedDialog.tsx +++ b/frontend/src/components/core/StreamlitDialog/ScriptChangedDialog.tsx @@ -16,7 +16,7 @@ */ import React, { PureComponent, ReactNode } from "react" -import { GlobalHotKeys } from "react-hotkeys" +import { HotKeys } from "react-hotkeys" import Modal, { ModalHeader, ModalBody, @@ -55,8 +55,10 @@ export class ScriptChangedDialog extends PureComponent<Props> { } public render(): ReactNode { + // Not sure exactly why attach and focused are necessary on the + // HotKeys component here but its not working without them return ( - <GlobalHotKeys handlers={this.keyHandlers}> + <HotKeys handlers={this.keyHandlers} attach={window} focused={true}> <Modal isOpen onClose={this.props.onClose}> <ModalHeader>App changed</ModalHeader> <ModalBody> @@ -73,7 +75,7 @@ export class ScriptChangedDialog extends PureComponent<Props> { </ModalButton> </ModalFooter> </Modal> - </GlobalHotKeys> + </HotKeys> ) } diff --git a/frontend/src/components/core/StreamlitDialog/StreamlitDialog.tsx b/frontend/src/components/core/StreamlitDialog/StreamlitDialog.tsx index cbcc4db42144..275830d3c507 100644 --- a/frontend/src/components/core/StreamlitDialog/StreamlitDialog.tsx +++ b/frontend/src/components/core/StreamlitDialog/StreamlitDialog.tsx @@ -25,7 +25,7 @@ import Modal, { ModalFooter, ModalButton, } from "components/shared/Modal" -import { GlobalHotKeys } from "react-hotkeys" +import { HotKeys } from "react-hotkeys" import { ScriptChangedDialog, @@ -156,8 +156,10 @@ function clearCacheDialog(props: ClearCacheProps): ReactElement { enter: () => props.defaultAction(), } + // Not sure exactly why attach is necessary on the HotKeys + // component here but it's not working without it return ( - <GlobalHotKeys handlers={keyHandlers}> + <HotKeys handlers={keyHandlers} attach={window}> <Modal isOpen onClose={props.onClose}> <ModalHeader>Clear Cache</ModalHeader> <ModalBody> @@ -175,7 +177,7 @@ function clearCacheDialog(props: ClearCacheProps): ReactElement { </ModalButton> </ModalFooter> </Modal> - </GlobalHotKeys> + </HotKeys> ) } @@ -206,8 +208,10 @@ function rerunScriptDialog(props: RerunScriptProps): ReactElement { enter: () => props.defaultAction(), } + // Not sure exactly why attach is necessary on the HotKeys + // component here but it's not working without it return ( - <GlobalHotKeys handlers={keyHandlers}> + <HotKeys handlers={keyHandlers} attach={window}> <Modal isOpen onClose={props.onClose}> <ModalBody> <StyledRerunHeader>Command line:</StyledRerunHeader> @@ -232,7 +236,7 @@ function rerunScriptDialog(props: RerunScriptProps): ReactElement { </ModalButton> </ModalFooter> </Modal> - </GlobalHotKeys> + </HotKeys> ) } diff --git a/frontend/yarn.lock b/frontend/yarn.lock index 88262b1fe5d6..ef7af040ecd4 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -2829,13 +2829,6 @@ dependencies: "@types/geojson" "*" -"@types/mdast@^3.0.0", "@types/mdast@^3.0.3": - version "3.0.3" - resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-3.0.3.tgz#2d7d671b1cd1ea3deb306ea75036c2a0407d2deb" - integrity sha512-SXPBMnFVQg1s00dlMCc/jCdvPqdE4mXaMMCeRlxLDmTAEoegHT53xKtkDnzDTOcmMHUfcjyf36/YYZ6SxRdnsw== - dependencies: - "@types/unist" "*" - "@types/minimatch@*": version "3.0.3" resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.3.tgz#3dca0e3f33b200fc7d1139c0cd96c1268cadfd9d" @@ -3073,7 +3066,7 @@ dependencies: source-map "^0.6.1" -"@types/unist@*", "@types/unist@^2.0.0", "@types/unist@^2.0.2", "@types/unist@^2.0.3": +"@types/unist@^2.0.0": version "2.0.3" resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.3.tgz#9c088679876f374eb5983f150d4787aa6fb32d7e" integrity sha512-FvUupuM3rlRsRtCN+fDudtmytGO6iHJuuRKS1Ss0pG5z8oX0diNEw94UEL7hgDbpN94rgaK5R7sWm6RrSkZuAQ== @@ -5163,6 +5156,11 @@ code-point-at@^1.0.0: resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" integrity sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c= +collapse-white-space@^1.0.2: + version "1.0.6" + resolved "https://registry.yarnpkg.com/collapse-white-space/-/collapse-white-space-1.0.6.tgz#e63629c0016665792060dbbeb79c42239d2c5287" + integrity sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ== + collect-v8-coverage@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.1.tgz#cc2c8e94fc18bbdffe64d6534570c8a673b27f59" @@ -6585,7 +6583,7 @@ [email protected], debug@=3.1.0: dependencies: ms "2.0.0" [email protected], debug@^4.0.0, debug@^4.0.1, debug@^4.1.0, debug@^4.1.1: [email protected], debug@^4.0.1, debug@^4.1.0, debug@^4.1.1: version "4.3.1" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.1.tgz#f0d229c505e0c6d8c49ac553d1b13dc183f6b2ee" integrity sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ== @@ -9984,7 +9982,7 @@ inflight@^1.0.4: once "^1.3.0" wrappy "1" -inherits@2, [email protected], inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.0, inherits@~2.0.1, inherits@~2.0.3: +inherits@2, [email protected], inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.0, inherits@~2.0.1, inherits@~2.0.3: version "2.0.4" resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== @@ -10162,12 +10160,12 @@ is-browser@^2.0.1, is-browser@^2.1.0: resolved "https://registry.yarnpkg.com/is-browser/-/is-browser-2.1.0.tgz#fc084d59a5fced307d6708c59356bad7007371a9" integrity sha512-F5rTJxDQ2sW81fcfOR1GnCXT6sVJC104fCyfj+mjpwNEwaPYSn5fte5jiHmBg3DHsIoL/l8Kvw5VN5SsTRcRFQ== -is-buffer@^1.0.2, is-buffer@~1.1.6: +is-buffer@^1.0.2, is-buffer@^1.1.4, is-buffer@~1.1.6: version "1.1.6" resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== -is-buffer@^2.0.0, is-buffer@^2.0.3: +is-buffer@^2.0.3: version "2.0.5" resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191" integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ== @@ -10414,11 +10412,6 @@ is-plain-obj@^1.0.0, is-plain-obj@^1.1.0: resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" integrity sha1-caUMhCnfync8kqOQpKA7OfzVHT4= -is-plain-obj@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" - integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== - is-plain-object@^2.0.3, is-plain-object@^2.0.4: version "2.0.4" resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" @@ -10512,11 +10505,21 @@ is-utf8@^0.2.0: resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72" integrity sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI= +is-whitespace-character@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz#0858edd94a95594c7c9dd0b5c174ec6e45ee4aa7" + integrity sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w== + is-windows@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d" integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA== +is-word-character@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-word-character/-/is-word-character-1.0.4.tgz#ce0e73216f98599060592f62ff31354ddbeb0230" + integrity sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA== + is-wsl@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-1.1.0.tgz#1f16e4aa22b04d1336b66188a66af3c600c3a66d" @@ -11282,13 +11285,6 @@ katex@^0.11.1: dependencies: commander "^2.19.0" -katex@^0.12.0: - version "0.12.0" - resolved "https://registry.yarnpkg.com/katex/-/katex-0.12.0.tgz#2fb1c665dbd2b043edcf8a1f5c555f46beaa0cb9" - integrity sha512-y+8btoc/CK70XqcHqjxiGWBOeIL8upbS0peTPXTvgrh21n1RiWWcIpSWM+4uXq+IAgNh9YYQWdc7LVDPDAEEAg== - dependencies: - commander "^2.19.0" - katex@^0.9.0: version "0.9.0" resolved "https://registry.yarnpkg.com/katex/-/katex-0.9.0.tgz#26a7d082c21d53725422d2d71da9b2d8455fbd4a" @@ -11539,6 +11535,11 @@ lodash.flow@^3.3.0: resolved "https://registry.yarnpkg.com/lodash.flow/-/lodash.flow-3.5.0.tgz#87bf40292b8cf83e4e8ce1a3ae4209e20071675a" integrity sha1-h79AKSuM+D5OjOGjrkIJ4gBxZ1o= +lodash.isboolean@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz#6c2e171db2a257cd96802fd43b01b20d5f5870f6" + integrity sha1-bC4XHbKiV82WgC/UOwGyDV9YcPY= + lodash.isempty@^4.4.0: version "4.4.0" resolved "https://registry.yarnpkg.com/lodash.isempty/-/lodash.isempty-4.4.0.tgz#6f86cbedd8be4ec987be9aaf33c9684db1b31e7e" @@ -11647,11 +11648,6 @@ long@^4.0.0: resolved "https://registry.yarnpkg.com/long/-/long-4.0.0.tgz#9a7b71cfb7d361a194ea555241c92f7468d5bf28" integrity sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA== -longest-streak@^2.0.0: - version "2.0.4" - resolved "https://registry.yarnpkg.com/longest-streak/-/longest-streak-2.0.4.tgz#b8599957da5b5dab64dee3fe316fa774597d90e4" - integrity sha512-vM6rUVCVUJJt33bnmHiZEvr7wPT78ztX7rojL+LW51bHtLh6HTjx84LA5W4+oa6aKEJA7jJu5LR6vQRBpA5DVg== - loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" @@ -11833,6 +11829,11 @@ marching-simplex-table@^1.0.0: dependencies: convex-hull "^1.0.3" +markdown-escapes@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/markdown-escapes/-/markdown-escapes-1.0.4.tgz#c95415ef451499d7602b91095f3c8e8975f78535" + integrity sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg== + mat4-decompose@^1.0.3: version "1.0.4" resolved "https://registry.yarnpkg.com/mat4-decompose/-/mat4-decompose-1.0.4.tgz#65eb4fe39d70878f7a444eb4624d52f7e7eb2faf" @@ -11916,43 +11917,6 @@ [email protected]: dependencies: unist-util-visit-parents "1.1.2" -mdast-util-from-markdown@^0.8.0: - version "0.8.4" - resolved "https://registry.yarnpkg.com/mdast-util-from-markdown/-/mdast-util-from-markdown-0.8.4.tgz#2882100c1b9fc967d3f83806802f303666682d32" - integrity sha512-jj891B5pV2r63n2kBTFh8cRI2uR9LQHsXG1zSDqfhXkIlDzrTcIlbB5+5aaYEkl8vOPIOPLf8VT7Ere1wWTMdw== - dependencies: - "@types/mdast" "^3.0.0" - mdast-util-to-string "^2.0.0" - micromark "~2.11.0" - parse-entities "^2.0.0" - unist-util-stringify-position "^2.0.0" - -mdast-util-math@^0.1.0: - version "0.1.2" - resolved "https://registry.yarnpkg.com/mdast-util-math/-/mdast-util-math-0.1.2.tgz#629a0793bd8822432917e5ddda5279492390cc2b" - integrity sha512-fogAitds+wH+QRas78Yr1TwmQGN4cW/G2WRw5ePuNoJbBSPJCxIOCE8MTzHgWHVSpgkRaPQTgfzXRE1CrwWSlg== - dependencies: - longest-streak "^2.0.0" - mdast-util-to-markdown "^0.6.0" - repeat-string "^1.0.0" - -mdast-util-to-markdown@^0.6.0: - version "0.6.1" - resolved "https://registry.yarnpkg.com/mdast-util-to-markdown/-/mdast-util-to-markdown-0.6.1.tgz#0e07d3f871e056bffc38a0cf50c7298b56d9e0d6" - integrity sha512-4qJtZ0qdyYeexAXoOZiU0uHIFVncJAmCkHkSluAsvDaVWODtPyNEo9I1ns0T4ulxu2EHRH5u/bt1cV0pdHCX+A== - dependencies: - "@types/unist" "^2.0.0" - longest-streak "^2.0.0" - mdast-util-to-string "^2.0.0" - parse-entities "^2.0.0" - repeat-string "^1.0.0" - zwitch "^1.0.0" - -mdast-util-to-string@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz#b8cfe6a713e1091cb5b728fc48885a4767f8b97b" - integrity sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w== - [email protected]: version "2.0.14" resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.14.tgz#7113fc4281917d63ce29b43446f701e68c25ba50" @@ -12044,22 +12008,6 @@ microevent.ts@~0.1.1: resolved "https://registry.yarnpkg.com/microevent.ts/-/microevent.ts-0.1.1.tgz#70b09b83f43df5172d0205a63025bce0f7357fa0" integrity sha512-jo1OfR4TaEwd5HOrt5+tAZ9mqT4jmpNAusXtyfNzqVm9uiSYFZlKM1wYL4oU7azZW/PxQW53wM0S6OR1JHNa2g== -micromark-extension-math@^0.1.0: - version "0.1.2" - resolved "https://registry.yarnpkg.com/micromark-extension-math/-/micromark-extension-math-0.1.2.tgz#5d7bb2b86018da4a758c05f3991664430ee4d711" - integrity sha512-ZJXsT2eVPM8VTmcw0CPSDeyonOn9SziGK3Z+nkf9Vb6xMPeU+4JMEnO6vzDL10562Favw8Vste74f54rxJ/i6Q== - dependencies: - katex "^0.12.0" - micromark "~2.11.0" - -micromark@~2.11.0: - version "2.11.2" - resolved "https://registry.yarnpkg.com/micromark/-/micromark-2.11.2.tgz#e8b6a05f54697d2d3d27fc89600c6bc40dd05f35" - integrity sha512-IXuP76p2uj8uMg4FQc1cRE7lPCLsfAXuEfdjtdO55VRiFO1asrCSQ5g43NmPqFtRwzEnEhafRVzn2jg0UiKArQ== - dependencies: - debug "^4.0.0" - parse-entities "^2.0.0" - micromatch@^3.1.10, micromatch@^3.1.4: version "3.1.10" resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-3.1.10.tgz#70859bc95c9840952f359a068a3fc49f9ecfac23" @@ -12387,6 +12335,11 @@ mouse-wheel@^1.2.0: signum "^1.0.0" to-px "^1.0.1" +mousetrap@^1.5.2: + version "1.6.5" + resolved "https://registry.yarnpkg.com/mousetrap/-/mousetrap-1.6.5.tgz#8a766d8c272b08393d5f56074e0b5ec183485bf9" + integrity sha512-QNo4kEepaIBwiT8CDhP98umTetp+JNfQYBWvC1pc6/OAibuXtRcxZ58Qz8skvEHYvURne/7R8T5VoOI7rDsEUA== + move-concurrently@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/move-concurrently/-/move-concurrently-1.0.1.tgz#be2c005fda32e0b29af1f05d7c4b33214c701f92" @@ -13291,10 +13244,10 @@ parse-asn1@^5.0.0, parse-asn1@^5.1.5: pbkdf2 "^3.0.3" safe-buffer "^5.1.1" -parse-entities@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-2.0.0.tgz#53c6eb5b9314a1f4ec99fa0fdf7ce01ecda0cbe8" - integrity sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ== +parse-entities@^1.1.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-1.2.2.tgz#c31bf0f653b6661354f8973559cb86dd1d5edf50" + integrity sha512-NzfpbxW/NPrzZ/yYSoQxyqUZMZXIdCfE0OIN4ESsnptHJECoUk3FZktxNuzQf4tjt5UEopnxpYJbvYuxIFDdsg== dependencies: character-entities "^1.0.0" character-entities-legacy "^1.0.0" @@ -14589,7 +14542,7 @@ prop-types-extra@^1.1.0: react-is "^16.3.2" warning "^4.0.0" -prop-types@^15.5.10, prop-types@^15.6.0, prop-types@^15.6.1, prop-types@^15.6.2, prop-types@^15.7.2: +prop-types@^15.5.10, prop-types@^15.6.0, prop-types@^15.6.2, prop-types@^15.7.2: version "15.7.2" resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.2.tgz#52c41e75b8c87e72b9d9360e0206b99dcbffa6c5" integrity sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ== @@ -15002,12 +14955,16 @@ react-google-login@^5.1.21: "@types/react" "*" prop-types "^15.6.0" -react-hotkeys@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/react-hotkeys/-/react-hotkeys-2.0.0.tgz#a7719c7340cbba888b0e9184f806a9ec0ac2c53f" - integrity sha512-3n3OU8vLX/pfcJrR3xJ1zlww6KS1kEJt0Whxc4FiGV+MJrQ1mYSYI3qS/11d2MJDFm8IhOXMTFQirfu6AVOF6Q== +react-hotkeys@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/react-hotkeys/-/react-hotkeys-1.1.4.tgz#a0712aa2e0c03a759fd7885808598497a4dace72" + integrity sha1-oHEqouDAOnWf14hYCFmEl6TaznI= dependencies: - prop-types "^15.6.1" + lodash.isboolean "^3.0.3" + lodash.isequal "^4.5.0" + lodash.isobject "^3.0.2" + mousetrap "^1.5.2" + prop-types "^15.6.0" react-input-mask@^2.0.4: version "2.0.4" @@ -15061,20 +15018,18 @@ react-map-gl@^5.2.7: react-virtualized-auto-sizer "^1.0.2" viewport-mercator-project "^6.2.3 || ^7.0.1" -react-markdown@^5.0.3: - version "5.0.3" - resolved "https://registry.yarnpkg.com/react-markdown/-/react-markdown-5.0.3.tgz#41040ea7a9324b564b328fb81dd6c04f2a5373ac" - integrity sha512-jDWOc1AvWn0WahpjW6NK64mtx6cwjM4iSsLHJPNBqoAgGOVoIdJMqaKX4++plhOtdd4JksdqzlDibgPx6B/M2w== +react-markdown@^4.3.1: + version "4.3.1" + resolved "https://registry.yarnpkg.com/react-markdown/-/react-markdown-4.3.1.tgz#39f0633b94a027445b86c9811142d05381300f2f" + integrity sha512-HQlWFTbDxTtNY6bjgp3C3uv1h2xcjCSi1zAEzfBW9OwJJvENSYiLXWNXN5hHLsoqai7RnZiiHzcnWdXk2Splzw== dependencies: - "@types/mdast" "^3.0.3" - "@types/unist" "^2.0.3" html-to-react "^1.3.4" mdast-add-list-metadata "1.0.1" prop-types "^15.7.2" react-is "^16.8.6" - remark-parse "^9.0.0" - unified "^9.0.0" - unist-util-visit "^2.0.0" + remark-parse "^5.0.0" + unified "^6.1.5" + unist-util-visit "^1.3.0" xtend "^4.0.1" react-movable@^2.4.0: @@ -15570,20 +15525,31 @@ remark-emoji@^2.1.0: node-emoji "^1.10.0" unist-util-visit "^2.0.2" -remark-math@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/remark-math/-/remark-math-4.0.0.tgz#494ddd50766555ad2332e3afca7796a76452256f" - integrity sha512-lH7SoQenXtQrvL0bm+mjZbvOk//YWNuyR+MxV18Qyv8rgFmMEGNuB0TSCQDkoDaiJ40FCnG8lxErc/zhcedYbw== - dependencies: - mdast-util-math "^0.1.0" - micromark-extension-math "^0.1.0" +remark-math@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/remark-math/-/remark-math-2.0.1.tgz#6edc884ba3b40710c2ae31ce93cd88c9959ac8b9" + integrity sha512-FokDg5BmlPbKaAdD4IfSVuRgYH6FBPeIn0zxZA6oZ6epc4qOSjoSJPyhsH0H/WKABuaCVMJuF5O2STti6UmBQw== -remark-parse@^9.0.0: - version "9.0.0" - resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-9.0.0.tgz#4d20a299665880e4f4af5d90b7c7b8a935853640" - integrity sha512-geKatMwSzEXKHuzBNU1z676sGcDcFoChMK38TgdHJNAYfFtsfHDQG7MoJAjs6sgYMqyLduCYWDIWZIxiPeafEw== +remark-parse@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-5.0.0.tgz#4c077f9e499044d1d5c13f80d7a98cf7b9285d95" + integrity sha512-b3iXszZLH1TLoyUzrATcTQUZrwNl1rE70rVdSruJFlDaJ9z5aMkhrG43Pp68OgfHndL/ADz6V69Zow8cTQu+JA== dependencies: - mdast-util-from-markdown "^0.8.0" + collapse-white-space "^1.0.2" + is-alphabetical "^1.0.0" + is-decimal "^1.0.0" + is-whitespace-character "^1.0.0" + is-word-character "^1.0.0" + markdown-escapes "^1.0.0" + parse-entities "^1.1.0" + repeat-string "^1.5.4" + state-toggle "^1.0.0" + trim "0.0.1" + trim-trailing-lines "^1.0.0" + unherit "^1.0.4" + unist-util-remove-position "^1.0.0" + vfile-location "^2.0.0" + xtend "^4.0.1" remove-trailing-separator@^1.0.1: version "1.1.0" @@ -15606,7 +15572,7 @@ repeat-element@^1.1.2: resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.3.tgz#782e0d825c0c5a3bb39731f84efee6b742e6b1ce" integrity sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g== -repeat-string@^1.0.0, repeat-string@^1.3.0, repeat-string@^1.6.1: +repeat-string@^1.3.0, repeat-string@^1.5.4, repeat-string@^1.6.1: version "1.6.1" resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" integrity sha1-jcrkcOHIirwtYA//Sndihtp15jc= @@ -15618,6 +15584,11 @@ repeating@^2.0.0: dependencies: is-finite "^1.0.0" [email protected]: + version "1.0.0" + resolved "https://registry.yarnpkg.com/replace-ext/-/replace-ext-1.0.0.tgz#de63128373fcbf7c3ccfa4de5a480c45a67958eb" + integrity sha1-3mMSg3P8v3w8z6TeWkgMRaZ5WOs= + request-progress@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-3.0.0.tgz#4ca754081c7fec63f505e4faa825aa06cd669dbe" @@ -16753,6 +16724,11 @@ start-server-and-test@^1.11.2: ps-tree "1.2.0" wait-on "5.2.0" +state-toggle@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/state-toggle/-/state-toggle-1.0.3.tgz#e123b16a88e143139b09c6852221bc9815917dfe" + integrity sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ== + static-eval@^2.0.5: version "2.1.0" resolved "https://registry.yarnpkg.com/static-eval/-/static-eval-2.1.0.tgz#a16dbe54522d7fa5ef1389129d813fd47b148014" @@ -17602,6 +17578,16 @@ trim-newlines@^1.0.0: resolved "https://registry.yarnpkg.com/trim-newlines/-/trim-newlines-1.0.0.tgz#5887966bb582a4503a41eb524f7d35011815a613" integrity sha1-WIeWa7WCpFA6QetST301ARgVphM= +trim-trailing-lines@^1.0.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz#bd4abbec7cc880462f10b2c8b5ce1d8d1ec7c2c0" + integrity sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ== + [email protected]: + version "0.0.1" + resolved "https://registry.yarnpkg.com/trim/-/trim-0.0.1.tgz#5858547f6b290757ee95cccc666fb50084c460dd" + integrity sha1-WFhUf2spB1fulczMZm+1AITEYN0= + trough@^1.0.0: version "1.0.5" resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.5.tgz#b8b639cefad7d0bb2abd37d433ff8293efa5f406" @@ -17794,6 +17780,14 @@ underscore.template@^0.1.7: resolved "https://registry.yarnpkg.com/underscore.template/-/underscore.template-0.1.7.tgz#3013e0ea181756306f1609e959cafbc722adb3e9" integrity sha1-MBPg6hgXVjBvFgnpWcr7xyKts+k= +unherit@^1.0.4: + version "1.1.3" + resolved "https://registry.yarnpkg.com/unherit/-/unherit-1.1.3.tgz#6c9b503f2b41b262330c80e91c8614abdaa69c22" + integrity sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ== + dependencies: + inherits "^2.0.0" + xtend "^4.0.0" + unicode-canonical-property-names-ecmascript@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz#2619800c4c825800efdd8343af7dd9933cbe2818" @@ -17817,17 +17811,17 @@ unicode-property-aliases-ecmascript@^1.0.4: resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz#dd57a99f6207bedff4628abefb94c50db941c8f4" integrity sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg== -unified@^9.0.0: - version "9.2.0" - resolved "https://registry.yarnpkg.com/unified/-/unified-9.2.0.tgz#67a62c627c40589edebbf60f53edfd4d822027f8" - integrity sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg== +unified@^6.1.5: + version "6.2.0" + resolved "https://registry.yarnpkg.com/unified/-/unified-6.2.0.tgz#7fbd630f719126d67d40c644b7e3f617035f6dba" + integrity sha512-1k+KPhlVtqmG99RaTbAv/usu85fcSRu3wY8X+vnsEhIxNP5VbVIDiXnLqyKIG+UMdyTg0ZX9EI6k2AfjJkHPtA== dependencies: bail "^1.0.0" extend "^3.0.0" - is-buffer "^2.0.0" - is-plain-obj "^2.0.0" + is-plain-obj "^1.1.0" trough "^1.0.0" - vfile "^4.0.0" + vfile "^2.0.0" + x-is-string "^0.1.0" union-find@^1.0.0, union-find@^1.0.2: version "1.0.2" @@ -17880,23 +17874,40 @@ unique-string@^1.0.0: dependencies: crypto-random-string "^1.0.0" +unist-util-is@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-3.0.0.tgz#d9e84381c2468e82629e4a5be9d7d05a2dd324cd" + integrity sha512-sVZZX3+kspVNmLWBPAB6r+7D9ZgAFPNWm66f7YNb420RlQSbn+n8rG8dGZSkrER7ZIXGQYNm5pqC3v3HopH24A== + unist-util-is@^4.0.0: version "4.0.4" resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-4.0.4.tgz#3e9e8de6af2eb0039a59f50c9b3e99698a924f50" integrity sha512-3dF39j/u423v4BBQrk1AQ2Ve1FxY5W3JKwXxVFzBODQ6WEvccguhgp802qQLKSnxPODE6WuRZtV+ohlUg4meBA== -unist-util-stringify-position@^2.0.0: - version "2.0.3" - resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz#cce3bfa1cdf85ba7375d1d5b17bdc4cada9bd9da" - integrity sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g== +unist-util-remove-position@^1.0.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/unist-util-remove-position/-/unist-util-remove-position-1.1.4.tgz#ec037348b6102c897703eee6d0294ca4755a2020" + integrity sha512-tLqd653ArxJIPnKII6LMZwH+mb5q+n/GtXQZo6S6csPRs5zB0u79Yw8ouR3wTw8wxvdJFhpP6Y7jorWdCgLO0A== dependencies: - "@types/unist" "^2.0.2" + unist-util-visit "^1.1.0" + +unist-util-stringify-position@^1.0.0, unist-util-stringify-position@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-1.1.2.tgz#3f37fcf351279dcbca7480ab5889bb8a832ee1c6" + integrity sha512-pNCVrk64LZv1kElr0N1wPiHEUoXNVFERp+mlTg/s9R5Lwg87f9bM/3sQB99w+N9D/qnM9ar3+AKDBwo/gm/iQQ== [email protected]: version "1.1.2" resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-1.1.2.tgz#f6e3afee8bdbf961c0e6f028ea3c0480028c3d06" integrity sha512-yvo+MMLjEwdc3RhhPYSximset7rwjMrdt9E41Smmvg25UQIenzrN83cRnF1JMzoMi9zZOQeYXHSDf7p+IQkW3Q== +unist-util-visit-parents@^2.0.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-2.1.2.tgz#25e43e55312166f3348cae6743588781d112c1e9" + integrity sha512-DyN5vD4NE3aSeB+PXYNKxzGsfocxp6asDc2XXE3b0ekO2BaRUpBicbbUygfSvYfUz1IkmjFR1YF7dPklraMZ2g== + dependencies: + unist-util-is "^3.0.0" + unist-util-visit-parents@^3.0.0: version "3.1.1" resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz#65a6ce698f78a6b0f56aa0e88f13801886cdaef6" @@ -17905,7 +17916,14 @@ unist-util-visit-parents@^3.0.0: "@types/unist" "^2.0.0" unist-util-is "^4.0.0" -unist-util-visit@^2.0.0, unist-util-visit@^2.0.2: +unist-util-visit@^1.1.0, unist-util-visit@^1.3.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-1.4.1.tgz#4724aaa8486e6ee6e26d7ff3c8685960d560b1e3" + integrity sha512-AvGNk7Bb//EmJZyhtRUnNMEpId/AZ5Ph/KUpTI09WHQuDZHKovQ1oEv3mfmKpWKtoMzyMC4GLBm1Zy5k12fjIw== + dependencies: + unist-util-visit-parents "^2.0.0" + +unist-util-visit@^2.0.2: version "2.0.3" resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-2.0.3.tgz#c3703893146df47203bb8a9795af47d7b971208c" integrity sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q== @@ -18607,23 +18625,27 @@ [email protected]: core-util-is "1.0.2" extsprintf "^1.2.0" -vfile-message@^2.0.0: - version "2.0.4" - resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-2.0.4.tgz#5b43b88171d409eae58477d13f23dd41d52c371a" - integrity sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ== +vfile-location@^2.0.0: + version "2.0.6" + resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-2.0.6.tgz#8a274f39411b8719ea5728802e10d9e0dff1519e" + integrity sha512-sSFdyCP3G6Ka0CEmN83A2YCMKIieHx0EDaj5IDP4g1pa5ZJ4FJDvpO0WODLxo4LUX4oe52gmSCK7Jw4SBghqxA== + +vfile-message@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-1.1.1.tgz#5833ae078a1dfa2d96e9647886cd32993ab313e1" + integrity sha512-1WmsopSGhWt5laNir+633LszXvZ+Z/lxveBf6yhGsqnQIhlhzooZae7zV6YVM1Sdkw68dtAW3ow0pOdPANugvA== dependencies: - "@types/unist" "^2.0.0" - unist-util-stringify-position "^2.0.0" + unist-util-stringify-position "^1.1.1" -vfile@^4.0.0: - version "4.2.1" - resolved "https://registry.yarnpkg.com/vfile/-/vfile-4.2.1.tgz#03f1dce28fc625c625bc6514350fbdb00fa9e624" - integrity sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA== +vfile@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/vfile/-/vfile-2.3.0.tgz#e62d8e72b20e83c324bc6c67278ee272488bf84a" + integrity sha512-ASt4mBUHcTpMKD/l5Q+WJXNtshlWxOogYyGYYrg4lt/vuRjC1EFQtlAofL5VmtVNIZJzWYFJjzGWZ0Gw8pzW1w== dependencies: - "@types/unist" "^2.0.0" - is-buffer "^2.0.0" - unist-util-stringify-position "^2.0.0" - vfile-message "^2.0.0" + is-buffer "^1.1.4" + replace-ext "1.0.0" + unist-util-stringify-position "^1.0.0" + vfile-message "^1.0.0" "viewport-mercator-project@^6.2.3 || ^7.0.1": version "7.0.1" @@ -19226,6 +19248,11 @@ ws@^7.2.3: resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.1.tgz#a333be02696bd0e54cea0434e21dcc8a9ac294bb" integrity sha512-pTsP8UAfhy3sk1lSk/O/s4tjD0CRwvMnzvwr4OKGX7ZvqZtUyx4KIJB5JWbkykPoc55tixMGgTNoh3k4FkNGFQ== +x-is-string@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/x-is-string/-/x-is-string-0.1.0.tgz#474b50865af3a49a9c4657f05acd145458f77d82" + integrity sha1-R0tQhlrzpJqcRlfwWs0UVFj3fYI= + xml-name-validator@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a" @@ -19423,8 +19450,3 @@ zero-crossings@^1.0.0: integrity sha1-xWK9MRNkPzRDokXRJAa4i2m5qf8= dependencies: cwise-compiler "^1.0.0" - -zwitch@^1.0.0: - version "1.0.5" - resolved "https://registry.yarnpkg.com/zwitch/-/zwitch-1.0.5.tgz#d11d7381ffed16b742f6af7b3f223d5cd9fe9920" - integrity sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw== diff --git a/lib/setup.py b/lib/setup.py index 9fe513f39e7e..2c0a989c3325 100644 --- a/lib/setup.py +++ b/lib/setup.py @@ -13,7 +13,7 @@ ) sys.exit(exit_msg) -VERSION = "0.74.0" # PEP-440 +VERSION = "0.74.1" # PEP-440 NAME = "streamlit"
spacetelescope__jwql-1598
Incorrect NIRSpec filter Using the query form of JWQL and clicking on NIRSpec, the list of filters contains the entry P750L. This is a filter for MIRI, not NIRSpec and should be removed from the NIRSpec listing. This filter is present in the MIRI filter list.
[ { "content": "\"\"\"Globally defined and used variables for the ``jwql`` project.\n\nAuthors\n-------\n\n - Johannes Sahlmann\n - Matthew Bourque\n - Bryan Hilbert\n - Ben Sunnquist\n - Teagan King\n - Mike Engesser\n - Maria Pena-Guerrero\n - Rachel Cooper\n - Brad Sappington\n\nUse\n---\n This variables within this module are intended to be directly\n imported, e.g.:\n ::\n\n from jwql.utils.constants import JWST_INSTRUMENT_NAMES\n\nReferences\n----------\n\n Many variables were transferred from an earlier version of\n ``utils.py``\n\"\"\"\n\nimport asdf\nimport inflection\nimport os\n\n# Each amplifier is represented by 2 tuples, the first for x coordinates\n# and the second for y coordinates. Within each tuple are value for\n# starting, ending, and step size. Step size is needed for MIRI, where\n# the pixels corresponding to the 4 amplifiers are interleaved.\nAMPLIFIER_BOUNDARIES = {\n \"nircam\": {\n \"1\": [(0, 512, 1), (0, 2048, 1)],\n \"2\": [(512, 1024, 1), (0, 2048, 1)],\n \"3\": [(1024, 1536, 1), (0, 2048, 1)],\n \"4\": [(1536, 2048, 1), (0, 2048, 1)],\n },\n \"niriss\": {\n \"1\": [(0, 2048, 1), (0, 512, 1)],\n \"2\": [(0, 2048, 1), (512, 1024, 1)],\n \"3\": [(0, 2048, 1), (1024, 1536, 1)],\n \"4\": [(0, 2048, 1), (1536, 2048, 1)],\n },\n \"fgs\": {\n \"1\": [(0, 512, 1), (0, 2048, 1)],\n \"2\": [(512, 1024, 1), (0, 2048, 1)],\n \"3\": [(1024, 1536, 1), (0, 2048, 1)],\n \"4\": [(1536, 2048, 1), (0, 2048, 1)],\n },\n \"nirspec\": {\n \"1\": [(0, 2048, 1), (0, 512, 1)],\n \"2\": [(0, 2048, 1), (512, 1024, 1)],\n \"3\": [(0, 2048, 1), (1024, 1536, 1)],\n \"4\": [(0, 2048, 1), (1536, 2048, 1)],\n },\n \"miri\": {\n \"1\": [(0, 1032, 4), (0, 1024, 1)],\n \"2\": [(1, 1032, 4), (0, 1024, 1)],\n \"3\": [(2, 1032, 4), (0, 1024, 1)],\n \"4\": [(3, 1032, 4), (0, 1024, 1)],\n },\n}\n\n# Dictionary describing instruments to which anomalies apply\nANOMALIES_PER_INSTRUMENT = {\n # anomalies affecting all instruments:\n \"diffraction_spike\": [\"fgs\", \"miri\", \"nircam\", \"niriss\", \"nirspec\"],\n \"excessive_saturation\": [\"fgs\", \"miri\", \"nircam\", \"niriss\", \"nirspec\"],\n \"persistence\": [\"fgs\", \"miri\", \"nircam\", \"niriss\", \"nirspec\"],\n # anomalies affecting multiple instruments:\n \"crosstalk\": [\"fgs\", \"nircam\", \"niriss\"],\n \"data_transfer_error\": [\"fgs\", \"nircam\", \"niriss\"],\n \"ghost\": [\"fgs\", \"nircam\", \"niriss\"],\n \"guidestar_failure\": [\"fgs\", \"miri\", \"nircam\", \"niriss\"],\n \"unusual_cosmic_rays\": [\"fgs\", \"nircam\", \"niriss\", \"nirspec\"],\n \"unusual_snowballs\": [\"fgs\", \"nircam\", \"niriss\", \"nirspec\"],\n # instrument-specific anomalies:\n \"cosmic_ray_shower\": [\"miri\"],\n \"column_pull_up\": [\"miri\"],\n \"column_pull_down\": [\"miri\"],\n \"noticeable_msa_leakage\": [\"nirspec\"],\n \"dragons_breath\": [\"nircam\"],\n \"mrs_glow\": [\"miri\"],\n \"mrs_zipper\": [\"miri\"],\n \"internal_reflection\": [\"miri\"],\n \"new_short\": [\"nirspec\"], # Only for MOS observations\n \"row_pull_up\": [\"miri\"],\n \"row_pull_down\": [\"miri\"],\n \"lrs_contamination\": [\"miri\"],\n \"tree_rings\": [\"miri\"],\n \"scattered_light\": [\"niriss\", \"nircam\", \"nirspec\"],\n \"claws\": [\"nircam\"],\n \"wisps\": [\"nircam\"],\n \"tilt_event\": [\"nircam\"],\n \"light_saber\": [\"niriss\"],\n \"transient_short\": [\"nirspec\"],\n \"subsequently_masked_short\": [\"nirspec\"],\n \"monitored_short\": [\"nirspec\"],\n \"bright_object_not_a_short\": [\"nirspec\"],\n # additional anomalies:\n \"other\": [\"fgs\", \"miri\", \"nircam\", \"niriss\", \"nirspec\"],\n \"needs_discussion\": [\"fgs\", \"miri\", \"nircam\", \"niriss\", \"nirspec\"],\n}\n\n# Defines the possible anomalies to flag through the web app\nANOMALY_CHOICES = [\n (anomaly, anomaly.replace(\"_\", \" \").upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n for anomaly in ANOMALIES_PER_INSTRUMENT\n]\n\nANOMALY_CHOICES_FGS = [\n (anomaly, inflection.titleize(anomaly).upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n if \"fgs\" in ANOMALIES_PER_INSTRUMENT[anomaly]\n]\n\nANOMALY_CHOICES_MIRI = [\n (anomaly, anomaly.replace(\"_\", \" \").upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n if \"miri\" in ANOMALIES_PER_INSTRUMENT[anomaly]\n]\n\nANOMALY_CHOICES_NIRCAM = [\n (anomaly, anomaly.replace(\"_\", \" \").upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n if \"nircam\" in ANOMALIES_PER_INSTRUMENT[anomaly]\n]\n\nANOMALY_CHOICES_NIRISS = [\n (anomaly, anomaly.replace(\"_\", \" \").upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n if \"niriss\" in ANOMALIES_PER_INSTRUMENT[anomaly]\n]\n\nANOMALY_CHOICES_NIRSPEC = [\n (anomaly, anomaly.replace(\"_\", \" \").upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n if \"nirspec\" in ANOMALIES_PER_INSTRUMENT[anomaly]\n]\n\nANOMALY_CHOICES_PER_INSTRUMENT = {\n \"fgs\": ANOMALY_CHOICES_FGS,\n \"miri\": ANOMALY_CHOICES_MIRI,\n \"nircam\": ANOMALY_CHOICES_NIRCAM,\n \"niriss\": ANOMALY_CHOICES_NIRISS,\n \"nirspec\": ANOMALY_CHOICES_NIRSPEC,\n}\n\nAPERTURES_PER_INSTRUMENT = {\n \"nircam\": [], # NIRCAM aperture redundant, can just use Subarray + Detector\n \"niriss\": [], # NIRISS preferred subarray only\n \"nirspec\": [\n \"NRS_FULL_MSA\",\n \"NRS_FULL_IFU\",\n \"NRS_S200A1_SLIT\",\n \"NRS_S200A2_SLIT\",\n \"NRS_S400A1_SLIT\",\n \"NRS_S1600A1_SLIT\",\n \"NRS_S200B1_SLIT\",\n ],\n \"miri\": [], # MIRI preferred subarray only\n \"fgs\": [\"FGS1_FULL\", \"FGS2_FULL\"],\n}\n\n# Observing templates used for ASIC tuning. MAST query results that\n# have one of these templates will be ignored\nASIC_TEMPLATES = [\"ISIM ASIC Tuning\"]\n\n# Bad pixel types by the type of data used to find them\nBAD_PIXEL_TYPES = [\n \"DEAD\",\n \"HOT\",\n \"LOW_QE\",\n \"RC\",\n \"OPEN\",\n \"ADJ_OPEN\",\n \"TELEGRAPH\",\n \"OTHER_BAD_PIXEL\",\n]\nDARKS_BAD_PIXEL_TYPES = [\"HOT\", \"RC\", \"OTHER_BAD_PIXEL\", \"TELEGRAPH\"]\nFLATS_BAD_PIXEL_TYPES = [\"DEAD\", \"OPEN\", \"ADJ_OPEN\", \"LOW_QE\"]\n\n# The maximum number of bad pixels allowed on a bad pixel monitor plot. If there\n# are more than this number of bad pixels identified for a particular type of\n# bad pixel, then the figure is saved as a png rather than an interactive plot,\n# in order to reduce the amount of data sent to the browser.\nBAD_PIXEL_MONITOR_MAX_POINTS_TO_PLOT = 15000\n\n# Possible exposure types for dark current data\nDARK_EXP_TYPES = {\n \"nircam\": [\"NRC_DARK\"],\n \"niriss\": [\"NIS_DARK\"],\n \"miri\": [\"MIR_DARKIMG\", \"MIR_DARKMRS\", \"MIR_DARKALL\"],\n \"nirspec\": [\"NRS_DARK\"],\n \"fgs\": [\"FGS_DARK\"],\n}\n\n# Types of potential bad pixels identified by the dark current monitor\nDARK_MONITOR_BADPIX_TYPES = [\"hot\", \"dead\", \"noisy\"]\n\n# Minimum amount of time, in days, between epochs of dark current observations. If the\n# dark monitor sees this much time, or longer, between two dark current files, it assumes\n# that the two files are part of separate epochs. This means the monitor will run separately\n# on these files, rather than bundling them together into a batch, where they would have\n# been combined into a mean dark rate\nDARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME = {'nircam': 10.,\n 'niriss': 10.,\n 'miri': 0.00001, # Treat each MIRI exposure separately\n 'nirspec': 10.,\n 'fgs': 10.\n }\n\n# Maximum number of potential new bad pixels to overplot on the dark monitor\n# mean dark image plot. Too many overplotted points starts to obscure the image\n# itself, and are most likely not really new bad pixels\nDARK_MONITOR_MAX_BADPOINTS_TO_PLOT = 1000\n\n# Dictionary of observing modes available for each instrument\nDETECTOR_PER_INSTRUMENT = {\n \"miri\": [\"MIRIFULONG\", \"MIRIFUSHORT\", \"MIRIMAGE\"],\n \"nircam\": [\n \"NRCB4\",\n \"NRCA4\",\n \"NRCA2\",\n \"NRCALONG\",\n \"NRCBLONG\",\n \"NRCB2\",\n \"NRCB3\",\n \"NRCA1\",\n \"NRCA3\",\n \"NRCB1\",\n ],\n \"niriss\": [\"NIS\"],\n \"nirspec\": [\"NRS1\", \"NRS2\"],\n \"fgs\": [\"GUIDER1\", \"GUIDER2\"],\n}\n\n# Default time range to use for EDB monitor telemetry plots. The plots will\n# go from this starting time to the monitor run time, unless otherwise requested.\nEDB_DEFAULT_PLOT_RANGE = 14 # days.\n\nEXP_TYPE_PER_INSTRUMENT = {\n \"fgs\": [\"FGS_FOCUS\", \"FGS_IMAGE\", \"FGS_INTFLAT\", \"FGS_SKYFLAT\", \"FGS_DARK\"],\n \"miri\": [\n \"MIR_FLATMRS\",\n \"MIR_MRS\",\n \"MIR_FLATIMAGE\",\n \"MIR_DARK\",\n \"MIR_LYOT\",\n \"MIR_IMAGE\",\n \"MIR_LRS-FIXEDSLIT\",\n \"MIR_LRS-SLITLESS\",\n \"MIR_CORONCAL\",\n \"MIR_4QPM\",\n \"MIR_FLATIMAGE-EXT\",\n \"MIR_TACQ\",\n \"MIR_DARKMRS\",\n \"MIR_DARKIMG\",\n \"MIR_FLATMRS-EXT\",\n \"MIR_TACONFIRM\",\n ],\n \"nircam\": [\n \"NRC_LED\",\n \"NRC_DARK\",\n \"NRC_CORON\",\n \"NRC_IMAGE\",\n \"NRC_FOCUS\",\n \"NRC_TSGRISM\",\n \"NRC_TSIMAGE\",\n \"NRC_WFSS\",\n \"NRC_TACQ\",\n \"NRC_TACONFIRM\",\n \"NRC_FLAT\",\n \"NRC_GRISM\",\n ],\n \"niriss\": [\n \"NIS_IMAGE\",\n \"NIS_FOCUS\",\n \"NIS_SOSS\",\n \"NIS_AMI\",\n \"NIS_LAMP\",\n \"NIS_WFSS\",\n \"NIS_DARK\",\n \"NIS_EXTCAL\",\n \"NIS_TACONFIRM\",\n \"NIS_TACQ\",\n ],\n \"nirspec\": [\n \"NRS_IFU\",\n \"NRS_MSASPEC\",\n \"NRS_BRIGHTOBJ\",\n \"NRS_DARK\",\n \"NRS_AUTOWAVE\",\n \"NRS_LAMP\",\n \"NRS_AUTOFLAT\",\n \"NRS_IMAGE\",\n \"NRS_CONFIRM\",\n \"NRS_FIXEDSLIT\",\n \"NRS_MIMF\",\n \"NRS_FOCUS\",\n \"NRS_TACONFIRM\",\n \"NRS_WATA\",\n \"NRS_MSATA\",\n ],\n}\n\nEXPTYPES = {\n \"nircam\": {\n \"imaging\": \"NRC_IMAGE\",\n \"ts_imaging\": \"NRC_TSIMAGE\",\n \"wfss\": \"NRC_WFSS\",\n \"ts_grism\": \"NRC_TSGRISM\",\n },\n \"niriss\": {\n \"imaging\": \"NIS_IMAGE\",\n \"ami\": \"NIS_IMAGE\",\n \"pom\": \"NIS_IMAGE\",\n \"wfss\": \"NIS_WFSS\",\n },\n \"fgs\": {\"imaging\": \"FGS_IMAGE\"},\n}\n\nEXPOSURE_PAGE_SUFFIX_ORDER = [\n \"uncal\",\n \"dark\",\n \"trapsfilled\",\n \"ramp\",\n \"rate\",\n \"rateints\",\n \"fitopt\",\n \"cal\",\n \"calints\",\n \"msa\",\n \"crf\",\n \"crfints\",\n \"bsub\",\n \"bsubints\",\n \"i2d\",\n \"s2d\",\n \"s3d\",\n \"x1d\",\n \"x1dints\",\n \"cat\",\n \"segm\",\n \"c1d\",\n \"psfstack\",\n \"psfalign\",\n \"psfsub\",\n \"amiavg\",\n \"aminorm\",\n \"ami\",\n \"psf-amiavg\",\n \"phot\",\n \"whtlt\",\n \"wfscmb\",\n]\n\n# Default Model Values\nDEFAULT_MODEL_CHARFIELD = \"empty\"\n\n# Filename Component Lengths\nFILE_AC_CAR_ID_LEN = 4\nFILE_AC_O_ID_LEN = 3\nFILE_ACT_LEN = 2\nFILE_DATETIME_LEN = 13\nFILE_EPOCH_LEN = 1\nFILE_GUIDESTAR_ATTMPT_LEN_MIN = 1\nFILE_GUIDESTAR_ATTMPT_LEN_MAX = 3\nFILE_OBS_LEN = 3\nFILE_PARALLEL_SEQ_ID_LEN = 1\nFILE_PROG_ID_LEN = 5\nFILE_SEG_LEN = 3\nFILE_SOURCE_ID_LEN = 5\nFILE_TARG_ID_LEN = 3\nFILE_VISIT_GRP_LEN = 2\nFILE_VISIT_LEN = 3\n\n# MSA metadata file do not have a standard suffix attached\nFILETYPE_WO_STANDARD_SUFFIX = \"msa.fits\"\n\nFLAT_EXP_TYPES = {\n \"nircam\": [\"NRC_FLAT\"],\n \"niriss\": [\"NIS_LAMP\"],\n \"miri\": [\"MIR_FLATIMAGE\", \"MIR_FLATMRS\"],\n \"nirspec\": [\"NRS_AUTOFLAT\", \"NRS_LAMP\"],\n \"fgs\": [\"FGS_INTFLAT\"],\n}\n\n# output subdirectories to keep track of via the filesytem monitor\nFILESYSTEM_MONITOR_SUBDIRS = ['logs', 'outputs', 'working', 'preview_images', 'thumbnails', 'all']\n\nFILTERS_PER_INSTRUMENT = {\n \"fgs\": [],\n \"miri\": [\n \"F560W\",\n \"F770W\",\n \"F1000W\",\n \"F1065C\",\n \"F1130W\",\n \"F1140C\",\n \"F1280W\",\n \"F1500W\",\n \"F1550C\",\n \"F1800W\",\n \"F2100W\",\n \"F2300C\",\n \"F2550W\",\n \"F2550WR\",\n \"FLENS\",\n \"FND\",\n \"OPAQUE\",\n \"P750L\",\n ],\n \"nircam\": [\n \"F070W\",\n \"F090W\",\n \"F115W\",\n \"F140M\",\n \"F150W\",\n \"F150W2\",\n \"F182M\",\n \"F187N\",\n \"F200W\",\n \"F210M\",\n \"F212N\",\n \"WLP4\",\n \"F277W\",\n \"F356W\",\n \"F444W\",\n \"F300M\",\n \"F335M\",\n \"F360M\",\n \"F410M\",\n \"F430M\",\n \"F460M\",\n \"F480M\",\n \"F250M\",\n \"F322W2\",\n ],\n \"niriss\": [\n \"F090W\",\n \"F115W\",\n \"F140M\",\n \"F150W\",\n \"F200W\",\n \"F277W\",\n \"F356W\",\n \"F380M\",\n \"F430M\",\n \"F444W\",\n \"F480M\",\n \"GR150C\",\n \"GR150R\",\n ],\n \"nirspec\": [\n \"CLEAR\",\n \"F070LP\",\n \"F100LP\",\n \"F110W\",\n \"F140X\",\n \"F170LP\",\n \"F290LP\",\n \"OPAQUE\",\n \"P750L\",\n ],\n}\n\nFOUR_AMP_SUBARRAYS = [\"WFSS128R\", \"WFSS64R\"]\n\n# Names of full-frame apertures for all instruments\nFULL_FRAME_APERTURES = {\n \"NIRCAM\": [\n \"NRCA1_FULL\",\n \"NRCA2_FULL\",\n \"NRCA3_FULL\",\n \"NRCA4_FULL\",\n \"NRCA5_FULL\",\n \"NRCB1_FULL\",\n \"NRCB2_FULL\",\n \"NRCB3_FULL\",\n \"NRCB4_FULL\",\n \"NRCB5_FULL\",\n ],\n \"NIRISS\": [\"NIS_CEN\"],\n \"NIRSPEC\": [\"NRS1_FULL\", \"NRS2_FULL\"],\n \"MIRI\": [\"MIRIM_FULL\"],\n \"FGS\": [\"FGS1_FULL\", \"FGS2_FULL\"],\n}\n\n# Possible suffix types for nominal files\nGENERIC_SUFFIX_TYPES = [\n \"uncal\",\n \"cal\",\n \"rateints\",\n \"rate\",\n \"trapsfilled\",\n \"i2d\",\n \"x1dints\",\n \"x1d\",\n \"s2d\",\n \"s3d\",\n \"dark\",\n \"crfints\",\n \"crf\",\n \"ramp\",\n \"fitopt\",\n \"bsubints\",\n \"bsub\",\n \"cat\",\n \"segm\",\n \"c1d\",\n]\n\n# Gratings available for each instrument\nGRATING_PER_INSTRUMENT = {\n \"fgs\": [],\n \"miri\": [],\n \"nircam\": [],\n \"niriss\": [],\n \"nirspec\": [\n \"G140M\",\n \"G235M\",\n \"G395M\",\n \"G140H\",\n \"G235H\",\n \"G395H\",\n \"PRISM\",\n \"MIRROR\",\n ],\n}\n\n# Filename extensions for guider data\nGUIDER_FILENAME_TYPE = [\"gs-fg\", \"gs-track\", \"gs-id\", \"gs-acq1\", \"gs-acq2\"]\n\n# Possible suffix types for guider exposures\nGUIDER_SUFFIX_TYPES = [\n \"stream\",\n \"stacked_uncal\",\n \"image_uncal\",\n \"stacked_cal\",\n \"image_cal\",\n]\n\n# JWQL should ignore some filetypes in the filesystem.\nIGNORED_SUFFIXES = [\"original\", \"stream\", \"x1d\", \"x1dints\", \"c1d\", \"pre-image\"]\n\n# Instrument monitor database tables\nINSTRUMENT_MONITOR_DATABASE_TABLES = {\n \"dark_monitor\": [\n \"<instrument>_dark_dark_current\",\n \"<instrument>_dark_pixel_stats\",\n \"<instrument>_dark_query_history\",\n ],\n \"bad_pixel_monitor\": [\n \"<instrument>_bad_pixel_stats\",\n \"<instrument>_bad_pixel_query_history\",\n ],\n \"cosmic_ray_monitor\": [\n \"<instrument>_cosmic_ray_stats\",\n \"<instrument>_cosmic_ray_query_history\",\n ],\n \"msata_monitor\": [\"<instrument>_ta_stats\", \"<instrument>_ta_query_history\"],\n \"wata_monitor\": [\"<instrument>_ta_stats\", \"<instrument>_ta_query_history\"],\n}\n\nINSTRUMENT_SERVICE_MATCH = {\n \"FGS\": \"Mast.Jwst.Filtered.Fgs\",\n \"MIRI\": \"Mast.Jwst.Filtered.Miri\",\n \"NIRCam\": \"Mast.Jwst.Filtered.Nircam\",\n \"NIRISS\": \"Mast.Jwst.Filtered.Niriss\",\n \"NIRSpec\": \"Mast.Jwst.Filtered.Nirspec\",\n}\n\n# JWST data products\nJWST_DATAPRODUCTS = [\n \"IMAGE\",\n \"SPECTRUM\",\n \"SED\",\n \"TIMESERIES\",\n \"VISIBILITY\",\n \"EVENTLIST\",\n \"CUBE\",\n \"CATALOG\",\n \"ENGINEERING\",\n \"NULL\",\n]\n\n# Lowercase JWST instrument names\nJWST_INSTRUMENT_NAMES = sorted([\"niriss\", \"nircam\", \"nirspec\", \"miri\", \"fgs\"])\n\n# JWST instrument names with shorthand notation\nJWST_INSTRUMENT_NAMES_SHORTHAND = {\n \"gui\": \"fgs\",\n \"mir\": \"miri\",\n \"nis\": \"niriss\",\n \"nrc\": \"nircam\",\n \"nrs\": \"nirspec\",\n}\n\n# Mixed case JWST instrument names\nJWST_INSTRUMENT_NAMES_MIXEDCASE = {\n \"fgs\": \"FGS\",\n \"miri\": \"MIRI\",\n \"nircam\": \"NIRCam\",\n \"niriss\": \"NIRISS\",\n \"nirspec\": \"NIRSpec\",\n}\n\n# Upper case JWST instrument names\nJWST_INSTRUMENT_NAMES_UPPERCASE = {\n key: value.upper() for key, value in JWST_INSTRUMENT_NAMES_MIXEDCASE.items()\n}\n\n# Astoquery service string for each JWST instrument\nJWST_MAST_SERVICES = [\n \"Mast.Jwst.Filtered.{}\".format(value.title()) for value in JWST_INSTRUMENT_NAMES\n]\n\n# Possible values for look status filter\nLOOK_OPTIONS = [\"New\", \"Viewed\"]\n\n# Maximum number of records returned by MAST for a single query\nMAST_QUERY_LIMIT = 550000\n\n# Minimum number of groups per integration required to include data\n# in the dark current monitor\nMINIMUM_DARK_CURRENT_GROUPS = 10\n\n# Expected position sensor values for MIRI. Used by the EDB monitor\n# to filter out bad values. Tuple values are the expected value and\n# the standard deviation associated with the value\nMIRI_POS_RATIO_VALUES = {\n \"FW\": {\n \"FND\": (-164.8728073, 0.204655346),\n \"OPAQUE\": (380.6122145, 0.078856646),\n \"F1000W\": (-24.15638797, 0.182865887),\n \"F1130W\": (137.8245397, 0.24910941),\n \"F1280W\": (-298.7062532, 0.229963508),\n \"P750L\": (12.39439777, 0.246932037),\n \"F1500W\": (-377.9888235, 0.263432415),\n \"F1800W\": (435.9046314, 0.27885876),\n \"F2100W\": (-126.5991201, 0.197193968),\n \"F560W\": (218.0010353, 0.282554884),\n \"FLENS\": (-212.7978283, 0.409300208),\n \"F2300C\": (306.0488778, 0.265448583),\n \"F770W\": (-62.48455213, 0.340861733),\n \"F1550C\": (188.7366748, 0.291288105),\n \"F2550W\": (-324.2364737, 0.176262309),\n \"F1140C\": (82.81057729, 0.169772457),\n \"F2550WR\": (-255.5816917, 0.251581688),\n \"F1065C\": (261.4486618, 0.16177981),\n },\n \"CCC\": {\"CLOSED\": (398.0376386, 0.173703628), \"OPEN\": (504.0482685, 0.328112274)},\n \"GW14\": {\n \"SHORT\": (626.9411005, 0.116034024),\n \"MEDIUM\": (342.8685233, 0.127123169),\n \"LONG\": (408.8339259, 0.117079193),\n },\n \"GW23\": {\n \"SHORT\": (619.7948107, 0.215417336),\n \"MEDIUM\": (373.1697309, 0.204314122),\n \"LONG\": (441.6632325, 0.349161169),\n },\n}\n\n# Names of all of the monitor database tables\nMONITOR_TABLE_NAMES = [\n \"fgs_bad_pixel_query_history\", \"fgs_bad_pixel_stats\",\n \"miri_bad_pixel_query_history\", \"miri_bad_pixel_stats\",\n \"nircam_bad_pixel_query_history\", \"nircam_bad_pixel_stats\",\n \"niriss_bad_pixel_query_history\", \"niriss_bad_pixel_stats\",\n \"nirspec_bad_pixel_query_history\", \"nirspec_bad_pixel_stats\",\n \"nircam_bias_query_history\", \"nircam_bias_stats\",\n \"niriss_bias_query_history\", \"niriss_bias_stats\",\n \"nirspec_bias_query_history\", \"nirspec_bias_stats\",\n \"nircam_claw_query_history\", \"nircam_claw_stats\",\n \"monitor\",\n \"central_storage\",\n \"filesystem_characteristics\",\n \"filesystem_general\",\n \"filesystem_instrument\",\n \"fgs_anomaly\",\n \"miri_anomaly\",\n \"nircam_anomaly\",\n \"niriss_anomaly\",\n \"nirspec_anomaly\",\n \"fgs_cosmic_ray_query_history\", \"fgs_cosmic_ray_stats\",\n \"miri_cosmic_ray_query_history\", \"miri_cosmic_ray_stats\",\n \"nircam_cosmic_ray_query_history\", \"nircam_cosmic_ray_stats\",\n \"niriss_cosmic_ray_query_history\", \"niriss_cosmic_ray_stats\",\n \"nirspec_cosmic_ray_query_history\", \"nirspec_cosmic_ray_stats\",\n \"fgs_dark_dark_current\", \"fgs_dark_pixel_stats\", \"fgs_dark_query_history\",\n \"miri_dark_dark_current\", \"miri_dark_pixel_stats\", \"miri_dark_query_history\",\n \"nircam_dark_dark_current\", \"nircam_dark_pixel_stats\", \"nircam_dark_query_history\",\n \"niriss_dark_dark_current\", \"niriss_dark_pixel_stats\", \"niriss_dark_query_history\",\n \"nirspec_dark_dark_current\", \"nirspec_dark_pixel_stats\", \"nirspec_dark_query_history\",\n \"nirspec_grating_query_history\",\n \"fgs_edb_blocks_stats\", \"fgs_edb_daily_stats\", \"fgs_edb_every_change_stats\", \"fgs_edb_time_interval_stats\", \"fgs_edb_time_stats\",\n \"miri_edb_blocks_stats\", \"miri_edb_daily_stats\", \"miri_edb_every_change_stats\", \"miri_edb_time_interval_stats\", \"miri_edb_time_stats\",\n \"nircam_edb_blocks_stats\", \"nircam_edb_daily_stats\", \"nircam_edb_every_change_stats\", \"nircam_edb_time_interval_stats\", \"nircam_edb_time_stats\",\n \"niriss_edb_blocks_stats\", \"niriss_edb_daily_stats\", \"niriss_edb_every_change_stats\", \"niriss_edb_time_interval_stats\", \"niriss_edb_time_stats\",\n \"nirspec_edb_blocks_stats\", \"nirspec_edb_daily_stats\", \"nirspec_edb_every_change_stats\", \"nirspec_edb_time_interval_stats\", \"nirspec_edb_time_stats\",\n \"nirspec_grating_stats\",\n \"fgs_readnoise_query_history\", \"fgs_readnoise_stats\",\n \"miri_readnoise_query_history\", \"miri_readnoise_stats\",\n \"nircam_readnoise_query_history\", \"nircam_readnoise_stats\",\n \"niriss_readnoise_query_history\", \"niriss_readnoise_stats\",\n \"nirspec_readnoise_query_history\", \"nirspec_readnoise_stats\",\n \"miri_ta_query_history\", \"miri_ta_stats\",\n \"nirspec_ta_query_history\", \"nirspec_ta_stats\", \"nirspec_wata_stats\", \"nirspec_msata_stats\"\n]\n\n# Suffix for msa files\nMSA_SUFFIX = [\"msa\"]\n\n# Available monitor names and their location for each JWST instrument\nMONITORS = {\n 'fgs': [('Bad Pixel Monitor', '/fgs/bad_pixel_monitor'),\n ('Cosmic Ray Monitor', '#'),\n ('Dark Current Monitor', '/fgs/dark_monitor'),\n ('EDB Telemetry Monitor', '/fgs/edb_monitor'),\n ('Readnoise Monitor', '/fgs/readnoise_monitor')],\n 'miri': [('Bad Pixel Monitor', '/miri/bad_pixel_monitor'),\n ('Cosmic Ray Monitor', '#'),\n ('Dark Current Monitor', '/miri/dark_monitor'),\n ('EDB Telemetry Monitor', '/miri/edb_monitor'),\n ('Readnoise Monitor', '/miri/readnoise_monitor')],\n 'nircam': [('Background Monitor', '/nircam/background_monitor'),\n ('Bad Pixel Monitor', '/nircam/bad_pixel_monitor'),\n ('Bias Monitor', '/nircam/bias_monitor'),\n ('Claw Monitor', '/nircam/claw_monitor'),\n ('Cosmic Ray Monitor', '#'),\n ('Dark Current Monitor', '/nircam/dark_monitor'),\n ('EDB Telemetry Monitor', '/nircam/edb_monitor'),\n ('Readnoise Monitor', '/nircam/readnoise_monitor')],\n 'niriss': [('Bad Pixel Monitor', '/niriss/bad_pixel_monitor'),\n ('Bias Monitor', '/niriss/bias_monitor'),\n ('Cosmic Ray Monitor', '#'),\n ('Dark Current Monitor', '/niriss/dark_monitor'),\n ('EDB Telemetry Monitor', '/niriss/edb_monitor'),\n ('Readnoise Monitor', '/niriss/readnoise_monitor')],\n 'nirspec': [('Bad Pixel Monitor', '/nirspec/bad_pixel_monitor'),\n ('Bias Monitor', '/nirspec/bias_monitor'),\n ('Dark Monitor', '/nirspec/dark_monitor'),\n ('Cosmic Ray Monitor', '#'),\n ('EDB Telemetry Monitor', '/nirspec/edb_monitor'),\n ('MSATA Monitor', '/nirspec/msata_monitor'),\n ('Readnoise Monitor', '/nirspec/readnoise_monitor'),\n ('WATA Monitor', '/nirspec/wata_monitor')\n ]}\n# Possible suffix types for coronograph exposures\nNIRCAM_CORONAGRAPHY_SUFFIX_TYPES = [\"psfstack\", \"psfalign\", \"psfsub\"]\n\n# NIRCam subarrays that use four amps for readout\nNIRCAM_FOUR_AMP_SUBARRAYS = [\"WFSS128R\", \"WFSS64R\"]\n\n# NIRCam long wavelength detector names\nNIRCAM_LONGWAVE_DETECTORS = [\"NRCA5\", \"NRCB5\"]\n\n# NIRCam short wavelength detector names\nNIRCAM_SHORTWAVE_DETECTORS = [\n \"NRCA1\",\n \"NRCA2\",\n \"NRCA3\",\n \"NRCA4\",\n \"NRCB1\",\n \"NRCB2\",\n \"NRCB3\",\n \"NRCB4\",\n]\n\n# NIRCam subarrays that use either one or four amps\nNIRCAM_SUBARRAYS_ONE_OR_FOUR_AMPS = [\n \"SUBGRISMSTRIPE64\",\n \"SUBGRISMSTRIPE128\",\n \"SUBGRISMSTRIPE256\",\n]\n\n# Possible suffix types for AMI files\nNIRISS_AMI_SUFFIX_TYPES = [\"amiavg\", \"aminorm\", \"ami\", \"psf-amiavg\"]\n\n# Determine if the code is being run as part of CI checking on github\nON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~')\n\n# Determine if the code is being run as part of a Readthedocs build\nON_READTHEDOCS = os.environ.get('READTHEDOCS', False)\n\n# Base name for the file listing the preview images for a given instrument.\n# The complete name will have \"_{instrument.lower}.txt\" added to the end of this.\nPREVIEW_IMAGE_LISTFILE = \"preview_image_inventory\"\n\n# All possible proposal categories\nPROPOSAL_CATEGORIES = [\"AR\", \"CAL\", \"COM\", \"DD\", \"ENG\", \"GO\", \"GTO\", \"NASA\", \"SURVEY\"]\n\nPUPILS_PER_INSTRUMENT = {\n \"nircam\": [\n \"CLEAR\",\n \"FLAT\",\n \"F162M\",\n \"F164N\",\n \"GDHS0\",\n \"GDHS60\",\n \"MASKBAR\",\n \"MASKIPR\",\n \"MASKRND\",\n \"PINHOLES\",\n \"WLM8\",\n \"WLP8\",\n \"F323N\",\n \"F405N\",\n \"F466N\",\n \"F470N\",\n \"GRISMC\",\n \"GRISMR\",\n \"GRISMV2\",\n \"GRISMV3\",\n ],\n \"niriss\": [\n \"CLEARP\",\n \"F090W\",\n \"F115W\",\n \"F140M\",\n \"F150W\",\n \"F158M\",\n \"F200W\",\n \"GR700XD\",\n \"NRM\",\n ],\n \"nirspec\": [],\n \"miri\": [],\n \"fgs\": [],\n}\n\n\n# Keep keys defined via class as they are used many places with potential mispellings\n# Keys are in sort order from general to instrument specific, then alphabetical\n# within instrument specific fields.\nclass QueryConfigKeys:\n INSTRUMENTS = \"INSTRUMENTS\"\n PROPOSAL_CATEGORY = \"PROPOSAL_CATEGORY\"\n LOOK_STATUS = \"LOOK_STATUS\"\n DATE_RANGE = \"DATE_RANGE\"\n NUM_PER_PAGE = \"NUM_PER_PAGE\"\n SORT_TYPE = \"SORT_TYPE\"\n ANOMALIES = \"ANOMALIES\"\n APERTURES = \"APERTURES\"\n DETECTORS = \"DETECTORS\"\n EXP_TYPES = \"EXP_TYPES\"\n FILTERS = \"FILTERS\"\n GRATINGS = \"GRATINGS\"\n PUPILS = \"PUPILS\"\n READ_PATTS = \"READ_PATTS\"\n SUBARRAYS = \"SUBARRAYS\"\n\n\n# Template for parameters to be stored in \"query_config\" session for query_page\nQUERY_CONFIG_TEMPLATE = {\n QueryConfigKeys.INSTRUMENTS: [],\n QueryConfigKeys.PROPOSAL_CATEGORY: [],\n QueryConfigKeys.LOOK_STATUS: [],\n QueryConfigKeys.NUM_PER_PAGE: 100,\n QueryConfigKeys.SORT_TYPE: \"Recent\",\n QueryConfigKeys.DATE_RANGE: \"\",\n QueryConfigKeys.ANOMALIES: {},\n QueryConfigKeys.APERTURES: {},\n QueryConfigKeys.DETECTORS: {},\n QueryConfigKeys.EXP_TYPES: {},\n QueryConfigKeys.FILTERS: {},\n QueryConfigKeys.GRATINGS: {},\n QueryConfigKeys.PUPILS: {},\n QueryConfigKeys.READ_PATTS: {},\n QueryConfigKeys.SUBARRAYS: {},\n}\n\n# RAPID-style readout patterns for each instrument. Added so we can\n# differentiate in MAST searches for e.g. the dark current monitor\nRAPID_READPATTERNS = {\n \"fgs\": [\"FGSRAPID\"],\n \"miri\": [\n \"FAST\",\n \"FASTR1\",\n \"SLOW\",\n \"SLOWR1\",\n \"FASTGRPAVG\",\n \"FASTGRPAVG8\",\n \"FASTGRPAVG16\",\n \"FASTGRPAVG32\",\n \"FASTGRPAVG64\",\n \"FASTR100\",\n ],\n \"nircam\": [\"RAPID\"],\n \"niriss\": [\"NISRAPID\"],\n \"nirspec\": [\"NRSRAPID\", \"NRSIRS2RAPID\"],\n}\n\nREADPATT_PER_INSTRUMENT = {\n \"fgs\": [\"FGS\", \"FGSRAPID\", \"FGS60\", \"FGS840\", \"FGS8370\"],\n \"miri\": [\n \"FAST\",\n \"FASTR1\",\n \"SLOW\",\n \"SLOWR1\",\n \"FASTGRPAVG\",\n \"FASTGRPAVG8\",\n \"FASTGRPAVG16\",\n \"FASTGRPAVG32\",\n \"FASTGRPAVG64\",\n \"FASTR100\",\n ],\n \"nircam\": [\n \"RAPID\",\n \"SHALLOW2\",\n \"BRIGHT2\",\n \"MEDIUM2\",\n \"SHALLOW4\",\n \"MEDIUM8\",\n \"BRIGHT1\",\n \"DEEP2\",\n \"DEEP8\",\n ],\n \"niriss\": [\"NISRAPID\", \"NIS\"],\n \"nirspec\": [\"NRS\", \"NRSRAPID\", \"NRSIRS2RAPID\", \"NRSRAPIDD2\", \"NRSRAPIDD6\"],\n}\n\n\nREPORT_KEYS_PER_INSTRUMENT = {\n \"fgs\": [\n \"proposal\",\n \"exp_type\",\n \"expstart\",\n \"filter\",\n \"aperture\",\n \"detector\",\n \"subarray\",\n \"viewed\",\n ],\n \"miri\": [\n \"proposal\",\n \"exp_type\",\n \"expstart\",\n \"filter\",\n \"aperture\",\n \"detector\",\n \"subarray\",\n \"viewed\",\n ],\n \"nircam\": [\n \"proposal\",\n \"exp_type\",\n \"expstart\",\n \"filter\",\n \"pupil\",\n \"aperture\",\n \"detector\",\n \"subarray\",\n \"viewed\",\n ],\n \"niriss\": [\n \"proposal\",\n \"exp_type\",\n \"expstart\",\n \"filter\",\n \"pupil\",\n \"aperture\",\n \"detector\",\n \"subarray\",\n \"viewed\",\n ],\n \"nirspec\": [\"exp_type\", \"filter\", \"grating\", \"read_patt_num\", \"viewed\"],\n}\n\n# Possible values for sort order\nSORT_OPTIONS = [\"Ascending\", \"Descending\", \"Recent\", \"Oldest\"]\n\nSUBARRAYS_ONE_OR_FOUR_AMPS = [\n \"SUBGRISMSTRIPE64\",\n \"SUBGRISMSTRIPE128\",\n \"SUBGRISMSTRIPE256\",\n]\n\nschema = asdf.schema.load_schema(\"http://stsci.edu/schemas/jwst_datamodel/subarray.schema\")\nSUBARRAYS_PER_INSTRUMENT = {\n \"nircam\": ['FULL'] + sorted(schema[\"properties\"][\"meta\"][\"properties\"][\"subarray\"][\"properties\"][\"name\"][\"anyOf\"][2]['enum']),\n \"niriss\": ['FULL'] + sorted(schema[\"properties\"][\"meta\"][\"properties\"][\"subarray\"][\"properties\"][\"name\"][\"anyOf\"][4]['enum']),\n \"nirspec\": ['FULL'] + sorted(schema[\"properties\"][\"meta\"][\"properties\"][\"subarray\"][\"properties\"][\"name\"][\"anyOf\"][6]['enum']),\n \"miri\": ['FULL'] + sorted(schema[\"properties\"][\"meta\"][\"properties\"][\"subarray\"][\"properties\"][\"name\"][\"anyOf\"][1]['enum']),\n \"fgs\": ['FULL'] + sorted(schema[\"properties\"][\"meta\"][\"properties\"][\"subarray\"][\"properties\"][\"name\"][\"anyOf\"][0]['enum'])\n}\n\n# Filename suffixes that need to include the association value in the suffix in\n# order to identify the preview image file. This should only be crf and crfints,\n# since those are essentially level 2 files that are output by the level 3 pipeline.\nSUFFIXES_TO_ADD_ASSOCIATION = [\"crf\", \"crfints\"]\n\n# Filename suffixes where data have been averaged over integrations\nSUFFIXES_WITH_AVERAGED_INTS = [\"rate\", \"cal\", \"crf\", \"i2d\", \"bsub\"]\n\n# boolean accessed according to a viewed flag\nTHUMBNAIL_FILTER_LOOK = [\"New\", \"Viewed\"]\n\n# Base name for the file listing the thumbnail images for a given instrument.\n# The complete name will have \"_{instrument.lower}.txt\" added to the end of this.\nTHUMBNAIL_LISTFILE = \"thumbnail_inventory\"\n\n# Possible suffix types for time-series exposures\nTIME_SERIES_SUFFIX_TYPES = [\"phot\", \"whtlt\"]\n\n# Instrument Documentation Links\nURL_DICT = {\n \"fgs\": \"https://jwst-docs.stsci.edu/jwst-observatory-hardware/jwst-fine-guidance-sensor\",\n \"miri\": \"https://jwst-docs.stsci.edu/jwst-mid-infrared-instrument\",\n \"niriss\": \"https://jwst-docs.stsci.edu/jwst-near-infrared-imager-and-slitless-spectrograph\",\n \"nirspec\": \"https://jwst-docs.stsci.edu/jwst-near-infrared-spectrograph\",\n \"nircam\": \"https://jwst-docs.stsci.edu/jwst-near-infrared-camera\",\n}\n\n# Possible suffix types for WFS&C files\nWFSC_SUFFIX_TYPES = [\"wfscmb\"]\n\n# Concatenate all suffix types (ordered to ensure successful matching)\nFILE_SUFFIX_TYPES = (\n GUIDER_SUFFIX_TYPES\n + GENERIC_SUFFIX_TYPES\n + TIME_SERIES_SUFFIX_TYPES\n + NIRCAM_CORONAGRAPHY_SUFFIX_TYPES\n + NIRISS_AMI_SUFFIX_TYPES\n + WFSC_SUFFIX_TYPES\n + MSA_SUFFIX\n)\n\n# Model.Charfield Max Length Constants\nMAX_LEN_AMPLIFIER = 40\nMAX_LEN_APERTURE = 40\nMAX_LEN_DEPENDENCY_VALUE = 40\nMAX_LEN_DETECTOR = 40\nMAX_LEN_DIFF_IMAGE = 1000\nMAX_LEN_FILENAME = 1000\nMAX_LEN_FILTER = 7\nMAX_LEN_GENERIC_TEXT = 100\nMAX_LEN_GRATING = 40\nMAX_LEN_INSTRUMENT = 7\nMAX_LEN_MNEMONIC = 40\nMAX_LEN_NGROUPS = 10\nMAX_LEN_NINTS = 10\nMAX_LEN_OBS = 3\nMAX_LEN_PATH = 1000\nMAX_LEN_PROPOSAL = 5\nMAX_LEN_PUPIL = 40\nMAX_LEN_READPATTERN = 40\nMAX_LEN_SUBARRAY = 40\nMAX_LEN_TIME = 50\nMAX_LEN_TYPE = 40\nMAX_LEN_USER = 50\nMAX_LEN_VISIT = 30\n", "path": "jwql/utils/constants.py" } ]
[ { "content": "\"\"\"Globally defined and used variables for the ``jwql`` project.\n\nAuthors\n-------\n\n - Johannes Sahlmann\n - Matthew Bourque\n - Bryan Hilbert\n - Ben Sunnquist\n - Teagan King\n - Mike Engesser\n - Maria Pena-Guerrero\n - Rachel Cooper\n - Brad Sappington\n\nUse\n---\n This variables within this module are intended to be directly\n imported, e.g.:\n ::\n\n from jwql.utils.constants import JWST_INSTRUMENT_NAMES\n\nReferences\n----------\n\n Many variables were transferred from an earlier version of\n ``utils.py``\n\"\"\"\n\nimport asdf\nimport inflection\nimport os\n\n# Each amplifier is represented by 2 tuples, the first for x coordinates\n# and the second for y coordinates. Within each tuple are value for\n# starting, ending, and step size. Step size is needed for MIRI, where\n# the pixels corresponding to the 4 amplifiers are interleaved.\nAMPLIFIER_BOUNDARIES = {\n \"nircam\": {\n \"1\": [(0, 512, 1), (0, 2048, 1)],\n \"2\": [(512, 1024, 1), (0, 2048, 1)],\n \"3\": [(1024, 1536, 1), (0, 2048, 1)],\n \"4\": [(1536, 2048, 1), (0, 2048, 1)],\n },\n \"niriss\": {\n \"1\": [(0, 2048, 1), (0, 512, 1)],\n \"2\": [(0, 2048, 1), (512, 1024, 1)],\n \"3\": [(0, 2048, 1), (1024, 1536, 1)],\n \"4\": [(0, 2048, 1), (1536, 2048, 1)],\n },\n \"fgs\": {\n \"1\": [(0, 512, 1), (0, 2048, 1)],\n \"2\": [(512, 1024, 1), (0, 2048, 1)],\n \"3\": [(1024, 1536, 1), (0, 2048, 1)],\n \"4\": [(1536, 2048, 1), (0, 2048, 1)],\n },\n \"nirspec\": {\n \"1\": [(0, 2048, 1), (0, 512, 1)],\n \"2\": [(0, 2048, 1), (512, 1024, 1)],\n \"3\": [(0, 2048, 1), (1024, 1536, 1)],\n \"4\": [(0, 2048, 1), (1536, 2048, 1)],\n },\n \"miri\": {\n \"1\": [(0, 1032, 4), (0, 1024, 1)],\n \"2\": [(1, 1032, 4), (0, 1024, 1)],\n \"3\": [(2, 1032, 4), (0, 1024, 1)],\n \"4\": [(3, 1032, 4), (0, 1024, 1)],\n },\n}\n\n# Dictionary describing instruments to which anomalies apply\nANOMALIES_PER_INSTRUMENT = {\n # anomalies affecting all instruments:\n \"diffraction_spike\": [\"fgs\", \"miri\", \"nircam\", \"niriss\", \"nirspec\"],\n \"excessive_saturation\": [\"fgs\", \"miri\", \"nircam\", \"niriss\", \"nirspec\"],\n \"persistence\": [\"fgs\", \"miri\", \"nircam\", \"niriss\", \"nirspec\"],\n # anomalies affecting multiple instruments:\n \"crosstalk\": [\"fgs\", \"nircam\", \"niriss\"],\n \"data_transfer_error\": [\"fgs\", \"nircam\", \"niriss\"],\n \"ghost\": [\"fgs\", \"nircam\", \"niriss\"],\n \"guidestar_failure\": [\"fgs\", \"miri\", \"nircam\", \"niriss\"],\n \"unusual_cosmic_rays\": [\"fgs\", \"nircam\", \"niriss\", \"nirspec\"],\n \"unusual_snowballs\": [\"fgs\", \"nircam\", \"niriss\", \"nirspec\"],\n # instrument-specific anomalies:\n \"cosmic_ray_shower\": [\"miri\"],\n \"column_pull_up\": [\"miri\"],\n \"column_pull_down\": [\"miri\"],\n \"noticeable_msa_leakage\": [\"nirspec\"],\n \"dragons_breath\": [\"nircam\"],\n \"mrs_glow\": [\"miri\"],\n \"mrs_zipper\": [\"miri\"],\n \"internal_reflection\": [\"miri\"],\n \"new_short\": [\"nirspec\"], # Only for MOS observations\n \"row_pull_up\": [\"miri\"],\n \"row_pull_down\": [\"miri\"],\n \"lrs_contamination\": [\"miri\"],\n \"tree_rings\": [\"miri\"],\n \"scattered_light\": [\"niriss\", \"nircam\", \"nirspec\"],\n \"claws\": [\"nircam\"],\n \"wisps\": [\"nircam\"],\n \"tilt_event\": [\"nircam\"],\n \"light_saber\": [\"niriss\"],\n \"transient_short\": [\"nirspec\"],\n \"subsequently_masked_short\": [\"nirspec\"],\n \"monitored_short\": [\"nirspec\"],\n \"bright_object_not_a_short\": [\"nirspec\"],\n # additional anomalies:\n \"other\": [\"fgs\", \"miri\", \"nircam\", \"niriss\", \"nirspec\"],\n \"needs_discussion\": [\"fgs\", \"miri\", \"nircam\", \"niriss\", \"nirspec\"],\n}\n\n# Defines the possible anomalies to flag through the web app\nANOMALY_CHOICES = [\n (anomaly, anomaly.replace(\"_\", \" \").upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n for anomaly in ANOMALIES_PER_INSTRUMENT\n]\n\nANOMALY_CHOICES_FGS = [\n (anomaly, inflection.titleize(anomaly).upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n if \"fgs\" in ANOMALIES_PER_INSTRUMENT[anomaly]\n]\n\nANOMALY_CHOICES_MIRI = [\n (anomaly, anomaly.replace(\"_\", \" \").upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n if \"miri\" in ANOMALIES_PER_INSTRUMENT[anomaly]\n]\n\nANOMALY_CHOICES_NIRCAM = [\n (anomaly, anomaly.replace(\"_\", \" \").upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n if \"nircam\" in ANOMALIES_PER_INSTRUMENT[anomaly]\n]\n\nANOMALY_CHOICES_NIRISS = [\n (anomaly, anomaly.replace(\"_\", \" \").upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n if \"niriss\" in ANOMALIES_PER_INSTRUMENT[anomaly]\n]\n\nANOMALY_CHOICES_NIRSPEC = [\n (anomaly, anomaly.replace(\"_\", \" \").upper())\n for anomaly in ANOMALIES_PER_INSTRUMENT\n if \"nirspec\" in ANOMALIES_PER_INSTRUMENT[anomaly]\n]\n\nANOMALY_CHOICES_PER_INSTRUMENT = {\n \"fgs\": ANOMALY_CHOICES_FGS,\n \"miri\": ANOMALY_CHOICES_MIRI,\n \"nircam\": ANOMALY_CHOICES_NIRCAM,\n \"niriss\": ANOMALY_CHOICES_NIRISS,\n \"nirspec\": ANOMALY_CHOICES_NIRSPEC,\n}\n\nAPERTURES_PER_INSTRUMENT = {\n \"nircam\": [], # NIRCAM aperture redundant, can just use Subarray + Detector\n \"niriss\": [], # NIRISS preferred subarray only\n \"nirspec\": [\n \"NRS_FULL_MSA\",\n \"NRS_FULL_IFU\",\n \"NRS_S200A1_SLIT\",\n \"NRS_S200A2_SLIT\",\n \"NRS_S400A1_SLIT\",\n \"NRS_S1600A1_SLIT\",\n \"NRS_S200B1_SLIT\",\n ],\n \"miri\": [], # MIRI preferred subarray only\n \"fgs\": [\"FGS1_FULL\", \"FGS2_FULL\"],\n}\n\n# Observing templates used for ASIC tuning. MAST query results that\n# have one of these templates will be ignored\nASIC_TEMPLATES = [\"ISIM ASIC Tuning\"]\n\n# Bad pixel types by the type of data used to find them\nBAD_PIXEL_TYPES = [\n \"DEAD\",\n \"HOT\",\n \"LOW_QE\",\n \"RC\",\n \"OPEN\",\n \"ADJ_OPEN\",\n \"TELEGRAPH\",\n \"OTHER_BAD_PIXEL\",\n]\nDARKS_BAD_PIXEL_TYPES = [\"HOT\", \"RC\", \"OTHER_BAD_PIXEL\", \"TELEGRAPH\"]\nFLATS_BAD_PIXEL_TYPES = [\"DEAD\", \"OPEN\", \"ADJ_OPEN\", \"LOW_QE\"]\n\n# The maximum number of bad pixels allowed on a bad pixel monitor plot. If there\n# are more than this number of bad pixels identified for a particular type of\n# bad pixel, then the figure is saved as a png rather than an interactive plot,\n# in order to reduce the amount of data sent to the browser.\nBAD_PIXEL_MONITOR_MAX_POINTS_TO_PLOT = 15000\n\n# Possible exposure types for dark current data\nDARK_EXP_TYPES = {\n \"nircam\": [\"NRC_DARK\"],\n \"niriss\": [\"NIS_DARK\"],\n \"miri\": [\"MIR_DARKIMG\", \"MIR_DARKMRS\", \"MIR_DARKALL\"],\n \"nirspec\": [\"NRS_DARK\"],\n \"fgs\": [\"FGS_DARK\"],\n}\n\n# Types of potential bad pixels identified by the dark current monitor\nDARK_MONITOR_BADPIX_TYPES = [\"hot\", \"dead\", \"noisy\"]\n\n# Minimum amount of time, in days, between epochs of dark current observations. If the\n# dark monitor sees this much time, or longer, between two dark current files, it assumes\n# that the two files are part of separate epochs. This means the monitor will run separately\n# on these files, rather than bundling them together into a batch, where they would have\n# been combined into a mean dark rate\nDARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME = {'nircam': 10.,\n 'niriss': 10.,\n 'miri': 0.00001, # Treat each MIRI exposure separately\n 'nirspec': 10.,\n 'fgs': 10.\n }\n\n# Maximum number of potential new bad pixels to overplot on the dark monitor\n# mean dark image plot. Too many overplotted points starts to obscure the image\n# itself, and are most likely not really new bad pixels\nDARK_MONITOR_MAX_BADPOINTS_TO_PLOT = 1000\n\n# Dictionary of observing modes available for each instrument\nDETECTOR_PER_INSTRUMENT = {\n \"miri\": [\"MIRIFULONG\", \"MIRIFUSHORT\", \"MIRIMAGE\"],\n \"nircam\": [\n \"NRCB4\",\n \"NRCA4\",\n \"NRCA2\",\n \"NRCALONG\",\n \"NRCBLONG\",\n \"NRCB2\",\n \"NRCB3\",\n \"NRCA1\",\n \"NRCA3\",\n \"NRCB1\",\n ],\n \"niriss\": [\"NIS\"],\n \"nirspec\": [\"NRS1\", \"NRS2\"],\n \"fgs\": [\"GUIDER1\", \"GUIDER2\"],\n}\n\n# Default time range to use for EDB monitor telemetry plots. The plots will\n# go from this starting time to the monitor run time, unless otherwise requested.\nEDB_DEFAULT_PLOT_RANGE = 14 # days.\n\nEXP_TYPE_PER_INSTRUMENT = {\n \"fgs\": [\"FGS_FOCUS\", \"FGS_IMAGE\", \"FGS_INTFLAT\", \"FGS_SKYFLAT\", \"FGS_DARK\"],\n \"miri\": [\n \"MIR_FLATMRS\",\n \"MIR_MRS\",\n \"MIR_FLATIMAGE\",\n \"MIR_DARK\",\n \"MIR_LYOT\",\n \"MIR_IMAGE\",\n \"MIR_LRS-FIXEDSLIT\",\n \"MIR_LRS-SLITLESS\",\n \"MIR_CORONCAL\",\n \"MIR_4QPM\",\n \"MIR_FLATIMAGE-EXT\",\n \"MIR_TACQ\",\n \"MIR_DARKMRS\",\n \"MIR_DARKIMG\",\n \"MIR_FLATMRS-EXT\",\n \"MIR_TACONFIRM\",\n ],\n \"nircam\": [\n \"NRC_LED\",\n \"NRC_DARK\",\n \"NRC_CORON\",\n \"NRC_IMAGE\",\n \"NRC_FOCUS\",\n \"NRC_TSGRISM\",\n \"NRC_TSIMAGE\",\n \"NRC_WFSS\",\n \"NRC_TACQ\",\n \"NRC_TACONFIRM\",\n \"NRC_FLAT\",\n \"NRC_GRISM\",\n ],\n \"niriss\": [\n \"NIS_IMAGE\",\n \"NIS_FOCUS\",\n \"NIS_SOSS\",\n \"NIS_AMI\",\n \"NIS_LAMP\",\n \"NIS_WFSS\",\n \"NIS_DARK\",\n \"NIS_EXTCAL\",\n \"NIS_TACONFIRM\",\n \"NIS_TACQ\",\n ],\n \"nirspec\": [\n \"NRS_IFU\",\n \"NRS_MSASPEC\",\n \"NRS_BRIGHTOBJ\",\n \"NRS_DARK\",\n \"NRS_AUTOWAVE\",\n \"NRS_LAMP\",\n \"NRS_AUTOFLAT\",\n \"NRS_IMAGE\",\n \"NRS_CONFIRM\",\n \"NRS_FIXEDSLIT\",\n \"NRS_MIMF\",\n \"NRS_FOCUS\",\n \"NRS_TACONFIRM\",\n \"NRS_WATA\",\n \"NRS_MSATA\",\n ],\n}\n\nEXPTYPES = {\n \"nircam\": {\n \"imaging\": \"NRC_IMAGE\",\n \"ts_imaging\": \"NRC_TSIMAGE\",\n \"wfss\": \"NRC_WFSS\",\n \"ts_grism\": \"NRC_TSGRISM\",\n },\n \"niriss\": {\n \"imaging\": \"NIS_IMAGE\",\n \"ami\": \"NIS_IMAGE\",\n \"pom\": \"NIS_IMAGE\",\n \"wfss\": \"NIS_WFSS\",\n },\n \"fgs\": {\"imaging\": \"FGS_IMAGE\"},\n}\n\nEXPOSURE_PAGE_SUFFIX_ORDER = [\n \"uncal\",\n \"dark\",\n \"trapsfilled\",\n \"ramp\",\n \"rate\",\n \"rateints\",\n \"fitopt\",\n \"cal\",\n \"calints\",\n \"msa\",\n \"crf\",\n \"crfints\",\n \"bsub\",\n \"bsubints\",\n \"i2d\",\n \"s2d\",\n \"s3d\",\n \"x1d\",\n \"x1dints\",\n \"cat\",\n \"segm\",\n \"c1d\",\n \"psfstack\",\n \"psfalign\",\n \"psfsub\",\n \"amiavg\",\n \"aminorm\",\n \"ami\",\n \"psf-amiavg\",\n \"phot\",\n \"whtlt\",\n \"wfscmb\",\n]\n\n# Default Model Values\nDEFAULT_MODEL_CHARFIELD = \"empty\"\n\n# Filename Component Lengths\nFILE_AC_CAR_ID_LEN = 4\nFILE_AC_O_ID_LEN = 3\nFILE_ACT_LEN = 2\nFILE_DATETIME_LEN = 13\nFILE_EPOCH_LEN = 1\nFILE_GUIDESTAR_ATTMPT_LEN_MIN = 1\nFILE_GUIDESTAR_ATTMPT_LEN_MAX = 3\nFILE_OBS_LEN = 3\nFILE_PARALLEL_SEQ_ID_LEN = 1\nFILE_PROG_ID_LEN = 5\nFILE_SEG_LEN = 3\nFILE_SOURCE_ID_LEN = 5\nFILE_TARG_ID_LEN = 3\nFILE_VISIT_GRP_LEN = 2\nFILE_VISIT_LEN = 3\n\n# MSA metadata file do not have a standard suffix attached\nFILETYPE_WO_STANDARD_SUFFIX = \"msa.fits\"\n\nFLAT_EXP_TYPES = {\n \"nircam\": [\"NRC_FLAT\"],\n \"niriss\": [\"NIS_LAMP\"],\n \"miri\": [\"MIR_FLATIMAGE\", \"MIR_FLATMRS\"],\n \"nirspec\": [\"NRS_AUTOFLAT\", \"NRS_LAMP\"],\n \"fgs\": [\"FGS_INTFLAT\"],\n}\n\n# output subdirectories to keep track of via the filesytem monitor\nFILESYSTEM_MONITOR_SUBDIRS = ['logs', 'outputs', 'working', 'preview_images', 'thumbnails', 'all']\n\nFILTERS_PER_INSTRUMENT = {\n \"fgs\": [],\n \"miri\": [\n \"F560W\",\n \"F770W\",\n \"F1000W\",\n \"F1065C\",\n \"F1130W\",\n \"F1140C\",\n \"F1280W\",\n \"F1500W\",\n \"F1550C\",\n \"F1800W\",\n \"F2100W\",\n \"F2300C\",\n \"F2550W\",\n \"F2550WR\",\n \"FLENS\",\n \"FND\",\n \"OPAQUE\",\n \"P750L\",\n ],\n \"nircam\": [\n \"F070W\",\n \"F090W\",\n \"F115W\",\n \"F140M\",\n \"F150W\",\n \"F150W2\",\n \"F182M\",\n \"F187N\",\n \"F200W\",\n \"F210M\",\n \"F212N\",\n \"WLP4\",\n \"F277W\",\n \"F356W\",\n \"F444W\",\n \"F300M\",\n \"F335M\",\n \"F360M\",\n \"F410M\",\n \"F430M\",\n \"F460M\",\n \"F480M\",\n \"F250M\",\n \"F322W2\",\n ],\n \"niriss\": [\n \"F090W\",\n \"F115W\",\n \"F140M\",\n \"F150W\",\n \"F200W\",\n \"F277W\",\n \"F356W\",\n \"F380M\",\n \"F430M\",\n \"F444W\",\n \"F480M\",\n \"GR150C\",\n \"GR150R\",\n ],\n \"nirspec\": [\n \"CLEAR\",\n \"F070LP\",\n \"F100LP\",\n \"F110W\",\n \"F140X\",\n \"F170LP\",\n \"F290LP\",\n \"OPAQUE\",\n ],\n}\n\nFOUR_AMP_SUBARRAYS = [\"WFSS128R\", \"WFSS64R\"]\n\n# Names of full-frame apertures for all instruments\nFULL_FRAME_APERTURES = {\n \"NIRCAM\": [\n \"NRCA1_FULL\",\n \"NRCA2_FULL\",\n \"NRCA3_FULL\",\n \"NRCA4_FULL\",\n \"NRCA5_FULL\",\n \"NRCB1_FULL\",\n \"NRCB2_FULL\",\n \"NRCB3_FULL\",\n \"NRCB4_FULL\",\n \"NRCB5_FULL\",\n ],\n \"NIRISS\": [\"NIS_CEN\"],\n \"NIRSPEC\": [\"NRS1_FULL\", \"NRS2_FULL\"],\n \"MIRI\": [\"MIRIM_FULL\"],\n \"FGS\": [\"FGS1_FULL\", \"FGS2_FULL\"],\n}\n\n# Possible suffix types for nominal files\nGENERIC_SUFFIX_TYPES = [\n \"uncal\",\n \"cal\",\n \"rateints\",\n \"rate\",\n \"trapsfilled\",\n \"i2d\",\n \"x1dints\",\n \"x1d\",\n \"s2d\",\n \"s3d\",\n \"dark\",\n \"crfints\",\n \"crf\",\n \"ramp\",\n \"fitopt\",\n \"bsubints\",\n \"bsub\",\n \"cat\",\n \"segm\",\n \"c1d\",\n]\n\n# Gratings available for each instrument\nGRATING_PER_INSTRUMENT = {\n \"fgs\": [],\n \"miri\": [],\n \"nircam\": [],\n \"niriss\": [],\n \"nirspec\": [\n \"G140M\",\n \"G235M\",\n \"G395M\",\n \"G140H\",\n \"G235H\",\n \"G395H\",\n \"PRISM\",\n \"MIRROR\",\n ],\n}\n\n# Filename extensions for guider data\nGUIDER_FILENAME_TYPE = [\"gs-fg\", \"gs-track\", \"gs-id\", \"gs-acq1\", \"gs-acq2\"]\n\n# Possible suffix types for guider exposures\nGUIDER_SUFFIX_TYPES = [\n \"stream\",\n \"stacked_uncal\",\n \"image_uncal\",\n \"stacked_cal\",\n \"image_cal\",\n]\n\n# JWQL should ignore some filetypes in the filesystem.\nIGNORED_SUFFIXES = [\"original\", \"stream\", \"x1d\", \"x1dints\", \"c1d\", \"pre-image\"]\n\n# Instrument monitor database tables\nINSTRUMENT_MONITOR_DATABASE_TABLES = {\n \"dark_monitor\": [\n \"<instrument>_dark_dark_current\",\n \"<instrument>_dark_pixel_stats\",\n \"<instrument>_dark_query_history\",\n ],\n \"bad_pixel_monitor\": [\n \"<instrument>_bad_pixel_stats\",\n \"<instrument>_bad_pixel_query_history\",\n ],\n \"cosmic_ray_monitor\": [\n \"<instrument>_cosmic_ray_stats\",\n \"<instrument>_cosmic_ray_query_history\",\n ],\n \"msata_monitor\": [\"<instrument>_ta_stats\", \"<instrument>_ta_query_history\"],\n \"wata_monitor\": [\"<instrument>_ta_stats\", \"<instrument>_ta_query_history\"],\n}\n\nINSTRUMENT_SERVICE_MATCH = {\n \"FGS\": \"Mast.Jwst.Filtered.Fgs\",\n \"MIRI\": \"Mast.Jwst.Filtered.Miri\",\n \"NIRCam\": \"Mast.Jwst.Filtered.Nircam\",\n \"NIRISS\": \"Mast.Jwst.Filtered.Niriss\",\n \"NIRSpec\": \"Mast.Jwst.Filtered.Nirspec\",\n}\n\n# JWST data products\nJWST_DATAPRODUCTS = [\n \"IMAGE\",\n \"SPECTRUM\",\n \"SED\",\n \"TIMESERIES\",\n \"VISIBILITY\",\n \"EVENTLIST\",\n \"CUBE\",\n \"CATALOG\",\n \"ENGINEERING\",\n \"NULL\",\n]\n\n# Lowercase JWST instrument names\nJWST_INSTRUMENT_NAMES = sorted([\"niriss\", \"nircam\", \"nirspec\", \"miri\", \"fgs\"])\n\n# JWST instrument names with shorthand notation\nJWST_INSTRUMENT_NAMES_SHORTHAND = {\n \"gui\": \"fgs\",\n \"mir\": \"miri\",\n \"nis\": \"niriss\",\n \"nrc\": \"nircam\",\n \"nrs\": \"nirspec\",\n}\n\n# Mixed case JWST instrument names\nJWST_INSTRUMENT_NAMES_MIXEDCASE = {\n \"fgs\": \"FGS\",\n \"miri\": \"MIRI\",\n \"nircam\": \"NIRCam\",\n \"niriss\": \"NIRISS\",\n \"nirspec\": \"NIRSpec\",\n}\n\n# Upper case JWST instrument names\nJWST_INSTRUMENT_NAMES_UPPERCASE = {\n key: value.upper() for key, value in JWST_INSTRUMENT_NAMES_MIXEDCASE.items()\n}\n\n# Astoquery service string for each JWST instrument\nJWST_MAST_SERVICES = [\n \"Mast.Jwst.Filtered.{}\".format(value.title()) for value in JWST_INSTRUMENT_NAMES\n]\n\n# Possible values for look status filter\nLOOK_OPTIONS = [\"New\", \"Viewed\"]\n\n# Maximum number of records returned by MAST for a single query\nMAST_QUERY_LIMIT = 550000\n\n# Minimum number of groups per integration required to include data\n# in the dark current monitor\nMINIMUM_DARK_CURRENT_GROUPS = 10\n\n# Expected position sensor values for MIRI. Used by the EDB monitor\n# to filter out bad values. Tuple values are the expected value and\n# the standard deviation associated with the value\nMIRI_POS_RATIO_VALUES = {\n \"FW\": {\n \"FND\": (-164.8728073, 0.204655346),\n \"OPAQUE\": (380.6122145, 0.078856646),\n \"F1000W\": (-24.15638797, 0.182865887),\n \"F1130W\": (137.8245397, 0.24910941),\n \"F1280W\": (-298.7062532, 0.229963508),\n \"P750L\": (12.39439777, 0.246932037),\n \"F1500W\": (-377.9888235, 0.263432415),\n \"F1800W\": (435.9046314, 0.27885876),\n \"F2100W\": (-126.5991201, 0.197193968),\n \"F560W\": (218.0010353, 0.282554884),\n \"FLENS\": (-212.7978283, 0.409300208),\n \"F2300C\": (306.0488778, 0.265448583),\n \"F770W\": (-62.48455213, 0.340861733),\n \"F1550C\": (188.7366748, 0.291288105),\n \"F2550W\": (-324.2364737, 0.176262309),\n \"F1140C\": (82.81057729, 0.169772457),\n \"F2550WR\": (-255.5816917, 0.251581688),\n \"F1065C\": (261.4486618, 0.16177981),\n },\n \"CCC\": {\"CLOSED\": (398.0376386, 0.173703628), \"OPEN\": (504.0482685, 0.328112274)},\n \"GW14\": {\n \"SHORT\": (626.9411005, 0.116034024),\n \"MEDIUM\": (342.8685233, 0.127123169),\n \"LONG\": (408.8339259, 0.117079193),\n },\n \"GW23\": {\n \"SHORT\": (619.7948107, 0.215417336),\n \"MEDIUM\": (373.1697309, 0.204314122),\n \"LONG\": (441.6632325, 0.349161169),\n },\n}\n\n# Names of all of the monitor database tables\nMONITOR_TABLE_NAMES = [\n \"fgs_bad_pixel_query_history\", \"fgs_bad_pixel_stats\",\n \"miri_bad_pixel_query_history\", \"miri_bad_pixel_stats\",\n \"nircam_bad_pixel_query_history\", \"nircam_bad_pixel_stats\",\n \"niriss_bad_pixel_query_history\", \"niriss_bad_pixel_stats\",\n \"nirspec_bad_pixel_query_history\", \"nirspec_bad_pixel_stats\",\n \"nircam_bias_query_history\", \"nircam_bias_stats\",\n \"niriss_bias_query_history\", \"niriss_bias_stats\",\n \"nirspec_bias_query_history\", \"nirspec_bias_stats\",\n \"nircam_claw_query_history\", \"nircam_claw_stats\",\n \"monitor\",\n \"central_storage\",\n \"filesystem_characteristics\",\n \"filesystem_general\",\n \"filesystem_instrument\",\n \"fgs_anomaly\",\n \"miri_anomaly\",\n \"nircam_anomaly\",\n \"niriss_anomaly\",\n \"nirspec_anomaly\",\n \"fgs_cosmic_ray_query_history\", \"fgs_cosmic_ray_stats\",\n \"miri_cosmic_ray_query_history\", \"miri_cosmic_ray_stats\",\n \"nircam_cosmic_ray_query_history\", \"nircam_cosmic_ray_stats\",\n \"niriss_cosmic_ray_query_history\", \"niriss_cosmic_ray_stats\",\n \"nirspec_cosmic_ray_query_history\", \"nirspec_cosmic_ray_stats\",\n \"fgs_dark_dark_current\", \"fgs_dark_pixel_stats\", \"fgs_dark_query_history\",\n \"miri_dark_dark_current\", \"miri_dark_pixel_stats\", \"miri_dark_query_history\",\n \"nircam_dark_dark_current\", \"nircam_dark_pixel_stats\", \"nircam_dark_query_history\",\n \"niriss_dark_dark_current\", \"niriss_dark_pixel_stats\", \"niriss_dark_query_history\",\n \"nirspec_dark_dark_current\", \"nirspec_dark_pixel_stats\", \"nirspec_dark_query_history\",\n \"nirspec_grating_query_history\",\n \"fgs_edb_blocks_stats\", \"fgs_edb_daily_stats\", \"fgs_edb_every_change_stats\", \"fgs_edb_time_interval_stats\", \"fgs_edb_time_stats\",\n \"miri_edb_blocks_stats\", \"miri_edb_daily_stats\", \"miri_edb_every_change_stats\", \"miri_edb_time_interval_stats\", \"miri_edb_time_stats\",\n \"nircam_edb_blocks_stats\", \"nircam_edb_daily_stats\", \"nircam_edb_every_change_stats\", \"nircam_edb_time_interval_stats\", \"nircam_edb_time_stats\",\n \"niriss_edb_blocks_stats\", \"niriss_edb_daily_stats\", \"niriss_edb_every_change_stats\", \"niriss_edb_time_interval_stats\", \"niriss_edb_time_stats\",\n \"nirspec_edb_blocks_stats\", \"nirspec_edb_daily_stats\", \"nirspec_edb_every_change_stats\", \"nirspec_edb_time_interval_stats\", \"nirspec_edb_time_stats\",\n \"nirspec_grating_stats\",\n \"fgs_readnoise_query_history\", \"fgs_readnoise_stats\",\n \"miri_readnoise_query_history\", \"miri_readnoise_stats\",\n \"nircam_readnoise_query_history\", \"nircam_readnoise_stats\",\n \"niriss_readnoise_query_history\", \"niriss_readnoise_stats\",\n \"nirspec_readnoise_query_history\", \"nirspec_readnoise_stats\",\n \"miri_ta_query_history\", \"miri_ta_stats\",\n \"nirspec_ta_query_history\", \"nirspec_ta_stats\", \"nirspec_wata_stats\", \"nirspec_msata_stats\"\n]\n\n# Suffix for msa files\nMSA_SUFFIX = [\"msa\"]\n\n# Available monitor names and their location for each JWST instrument\nMONITORS = {\n 'fgs': [('Bad Pixel Monitor', '/fgs/bad_pixel_monitor'),\n ('Cosmic Ray Monitor', '#'),\n ('Dark Current Monitor', '/fgs/dark_monitor'),\n ('EDB Telemetry Monitor', '/fgs/edb_monitor'),\n ('Readnoise Monitor', '/fgs/readnoise_monitor')],\n 'miri': [('Bad Pixel Monitor', '/miri/bad_pixel_monitor'),\n ('Cosmic Ray Monitor', '#'),\n ('Dark Current Monitor', '/miri/dark_monitor'),\n ('EDB Telemetry Monitor', '/miri/edb_monitor'),\n ('Readnoise Monitor', '/miri/readnoise_monitor')],\n 'nircam': [('Background Monitor', '/nircam/background_monitor'),\n ('Bad Pixel Monitor', '/nircam/bad_pixel_monitor'),\n ('Bias Monitor', '/nircam/bias_monitor'),\n ('Claw Monitor', '/nircam/claw_monitor'),\n ('Cosmic Ray Monitor', '#'),\n ('Dark Current Monitor', '/nircam/dark_monitor'),\n ('EDB Telemetry Monitor', '/nircam/edb_monitor'),\n ('Readnoise Monitor', '/nircam/readnoise_monitor')],\n 'niriss': [('Bad Pixel Monitor', '/niriss/bad_pixel_monitor'),\n ('Bias Monitor', '/niriss/bias_monitor'),\n ('Cosmic Ray Monitor', '#'),\n ('Dark Current Monitor', '/niriss/dark_monitor'),\n ('EDB Telemetry Monitor', '/niriss/edb_monitor'),\n ('Readnoise Monitor', '/niriss/readnoise_monitor')],\n 'nirspec': [('Bad Pixel Monitor', '/nirspec/bad_pixel_monitor'),\n ('Bias Monitor', '/nirspec/bias_monitor'),\n ('Dark Monitor', '/nirspec/dark_monitor'),\n ('Cosmic Ray Monitor', '#'),\n ('EDB Telemetry Monitor', '/nirspec/edb_monitor'),\n ('MSATA Monitor', '/nirspec/msata_monitor'),\n ('Readnoise Monitor', '/nirspec/readnoise_monitor'),\n ('WATA Monitor', '/nirspec/wata_monitor')\n ]}\n# Possible suffix types for coronograph exposures\nNIRCAM_CORONAGRAPHY_SUFFIX_TYPES = [\"psfstack\", \"psfalign\", \"psfsub\"]\n\n# NIRCam subarrays that use four amps for readout\nNIRCAM_FOUR_AMP_SUBARRAYS = [\"WFSS128R\", \"WFSS64R\"]\n\n# NIRCam long wavelength detector names\nNIRCAM_LONGWAVE_DETECTORS = [\"NRCA5\", \"NRCB5\"]\n\n# NIRCam short wavelength detector names\nNIRCAM_SHORTWAVE_DETECTORS = [\n \"NRCA1\",\n \"NRCA2\",\n \"NRCA3\",\n \"NRCA4\",\n \"NRCB1\",\n \"NRCB2\",\n \"NRCB3\",\n \"NRCB4\",\n]\n\n# NIRCam subarrays that use either one or four amps\nNIRCAM_SUBARRAYS_ONE_OR_FOUR_AMPS = [\n \"SUBGRISMSTRIPE64\",\n \"SUBGRISMSTRIPE128\",\n \"SUBGRISMSTRIPE256\",\n]\n\n# Possible suffix types for AMI files\nNIRISS_AMI_SUFFIX_TYPES = [\"amiavg\", \"aminorm\", \"ami\", \"psf-amiavg\"]\n\n# Determine if the code is being run as part of CI checking on github\nON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~')\n\n# Determine if the code is being run as part of a Readthedocs build\nON_READTHEDOCS = os.environ.get('READTHEDOCS', False)\n\n# Base name for the file listing the preview images for a given instrument.\n# The complete name will have \"_{instrument.lower}.txt\" added to the end of this.\nPREVIEW_IMAGE_LISTFILE = \"preview_image_inventory\"\n\n# All possible proposal categories\nPROPOSAL_CATEGORIES = [\"AR\", \"CAL\", \"COM\", \"DD\", \"ENG\", \"GO\", \"GTO\", \"NASA\", \"SURVEY\"]\n\nPUPILS_PER_INSTRUMENT = {\n \"nircam\": [\n \"CLEAR\",\n \"FLAT\",\n \"F162M\",\n \"F164N\",\n \"GDHS0\",\n \"GDHS60\",\n \"MASKBAR\",\n \"MASKIPR\",\n \"MASKRND\",\n \"PINHOLES\",\n \"WLM8\",\n \"WLP8\",\n \"F323N\",\n \"F405N\",\n \"F466N\",\n \"F470N\",\n \"GRISMC\",\n \"GRISMR\",\n \"GRISMV2\",\n \"GRISMV3\",\n ],\n \"niriss\": [\n \"CLEARP\",\n \"F090W\",\n \"F115W\",\n \"F140M\",\n \"F150W\",\n \"F158M\",\n \"F200W\",\n \"GR700XD\",\n \"NRM\",\n ],\n \"nirspec\": [],\n \"miri\": [],\n \"fgs\": [],\n}\n\n\n# Keep keys defined via class as they are used many places with potential mispellings\n# Keys are in sort order from general to instrument specific, then alphabetical\n# within instrument specific fields.\nclass QueryConfigKeys:\n INSTRUMENTS = \"INSTRUMENTS\"\n PROPOSAL_CATEGORY = \"PROPOSAL_CATEGORY\"\n LOOK_STATUS = \"LOOK_STATUS\"\n DATE_RANGE = \"DATE_RANGE\"\n NUM_PER_PAGE = \"NUM_PER_PAGE\"\n SORT_TYPE = \"SORT_TYPE\"\n ANOMALIES = \"ANOMALIES\"\n APERTURES = \"APERTURES\"\n DETECTORS = \"DETECTORS\"\n EXP_TYPES = \"EXP_TYPES\"\n FILTERS = \"FILTERS\"\n GRATINGS = \"GRATINGS\"\n PUPILS = \"PUPILS\"\n READ_PATTS = \"READ_PATTS\"\n SUBARRAYS = \"SUBARRAYS\"\n\n\n# Template for parameters to be stored in \"query_config\" session for query_page\nQUERY_CONFIG_TEMPLATE = {\n QueryConfigKeys.INSTRUMENTS: [],\n QueryConfigKeys.PROPOSAL_CATEGORY: [],\n QueryConfigKeys.LOOK_STATUS: [],\n QueryConfigKeys.NUM_PER_PAGE: 100,\n QueryConfigKeys.SORT_TYPE: \"Recent\",\n QueryConfigKeys.DATE_RANGE: \"\",\n QueryConfigKeys.ANOMALIES: {},\n QueryConfigKeys.APERTURES: {},\n QueryConfigKeys.DETECTORS: {},\n QueryConfigKeys.EXP_TYPES: {},\n QueryConfigKeys.FILTERS: {},\n QueryConfigKeys.GRATINGS: {},\n QueryConfigKeys.PUPILS: {},\n QueryConfigKeys.READ_PATTS: {},\n QueryConfigKeys.SUBARRAYS: {},\n}\n\n# RAPID-style readout patterns for each instrument. Added so we can\n# differentiate in MAST searches for e.g. the dark current monitor\nRAPID_READPATTERNS = {\n \"fgs\": [\"FGSRAPID\"],\n \"miri\": [\n \"FAST\",\n \"FASTR1\",\n \"SLOW\",\n \"SLOWR1\",\n \"FASTGRPAVG\",\n \"FASTGRPAVG8\",\n \"FASTGRPAVG16\",\n \"FASTGRPAVG32\",\n \"FASTGRPAVG64\",\n \"FASTR100\",\n ],\n \"nircam\": [\"RAPID\"],\n \"niriss\": [\"NISRAPID\"],\n \"nirspec\": [\"NRSRAPID\", \"NRSIRS2RAPID\"],\n}\n\nREADPATT_PER_INSTRUMENT = {\n \"fgs\": [\"FGS\", \"FGSRAPID\", \"FGS60\", \"FGS840\", \"FGS8370\"],\n \"miri\": [\n \"FAST\",\n \"FASTR1\",\n \"SLOW\",\n \"SLOWR1\",\n \"FASTGRPAVG\",\n \"FASTGRPAVG8\",\n \"FASTGRPAVG16\",\n \"FASTGRPAVG32\",\n \"FASTGRPAVG64\",\n \"FASTR100\",\n ],\n \"nircam\": [\n \"RAPID\",\n \"SHALLOW2\",\n \"BRIGHT2\",\n \"MEDIUM2\",\n \"SHALLOW4\",\n \"MEDIUM8\",\n \"BRIGHT1\",\n \"DEEP2\",\n \"DEEP8\",\n ],\n \"niriss\": [\"NISRAPID\", \"NIS\"],\n \"nirspec\": [\"NRS\", \"NRSRAPID\", \"NRSIRS2RAPID\", \"NRSRAPIDD2\", \"NRSRAPIDD6\"],\n}\n\n\nREPORT_KEYS_PER_INSTRUMENT = {\n \"fgs\": [\n \"proposal\",\n \"exp_type\",\n \"expstart\",\n \"filter\",\n \"aperture\",\n \"detector\",\n \"subarray\",\n \"viewed\",\n ],\n \"miri\": [\n \"proposal\",\n \"exp_type\",\n \"expstart\",\n \"filter\",\n \"aperture\",\n \"detector\",\n \"subarray\",\n \"viewed\",\n ],\n \"nircam\": [\n \"proposal\",\n \"exp_type\",\n \"expstart\",\n \"filter\",\n \"pupil\",\n \"aperture\",\n \"detector\",\n \"subarray\",\n \"viewed\",\n ],\n \"niriss\": [\n \"proposal\",\n \"exp_type\",\n \"expstart\",\n \"filter\",\n \"pupil\",\n \"aperture\",\n \"detector\",\n \"subarray\",\n \"viewed\",\n ],\n \"nirspec\": [\"exp_type\", \"filter\", \"grating\", \"read_patt_num\", \"viewed\"],\n}\n\n# Possible values for sort order\nSORT_OPTIONS = [\"Ascending\", \"Descending\", \"Recent\", \"Oldest\"]\n\nSUBARRAYS_ONE_OR_FOUR_AMPS = [\n \"SUBGRISMSTRIPE64\",\n \"SUBGRISMSTRIPE128\",\n \"SUBGRISMSTRIPE256\",\n]\n\nschema = asdf.schema.load_schema(\"http://stsci.edu/schemas/jwst_datamodel/subarray.schema\")\nSUBARRAYS_PER_INSTRUMENT = {\n \"nircam\": ['FULL'] + sorted(schema[\"properties\"][\"meta\"][\"properties\"][\"subarray\"][\"properties\"][\"name\"][\"anyOf\"][2]['enum']),\n \"niriss\": ['FULL'] + sorted(schema[\"properties\"][\"meta\"][\"properties\"][\"subarray\"][\"properties\"][\"name\"][\"anyOf\"][4]['enum']),\n \"nirspec\": ['FULL'] + sorted(schema[\"properties\"][\"meta\"][\"properties\"][\"subarray\"][\"properties\"][\"name\"][\"anyOf\"][6]['enum']),\n \"miri\": ['FULL'] + sorted(schema[\"properties\"][\"meta\"][\"properties\"][\"subarray\"][\"properties\"][\"name\"][\"anyOf\"][1]['enum']),\n \"fgs\": ['FULL'] + sorted(schema[\"properties\"][\"meta\"][\"properties\"][\"subarray\"][\"properties\"][\"name\"][\"anyOf\"][0]['enum'])\n}\n\n# Filename suffixes that need to include the association value in the suffix in\n# order to identify the preview image file. This should only be crf and crfints,\n# since those are essentially level 2 files that are output by the level 3 pipeline.\nSUFFIXES_TO_ADD_ASSOCIATION = [\"crf\", \"crfints\"]\n\n# Filename suffixes where data have been averaged over integrations\nSUFFIXES_WITH_AVERAGED_INTS = [\"rate\", \"cal\", \"crf\", \"i2d\", \"bsub\"]\n\n# boolean accessed according to a viewed flag\nTHUMBNAIL_FILTER_LOOK = [\"New\", \"Viewed\"]\n\n# Base name for the file listing the thumbnail images for a given instrument.\n# The complete name will have \"_{instrument.lower}.txt\" added to the end of this.\nTHUMBNAIL_LISTFILE = \"thumbnail_inventory\"\n\n# Possible suffix types for time-series exposures\nTIME_SERIES_SUFFIX_TYPES = [\"phot\", \"whtlt\"]\n\n# Instrument Documentation Links\nURL_DICT = {\n \"fgs\": \"https://jwst-docs.stsci.edu/jwst-observatory-hardware/jwst-fine-guidance-sensor\",\n \"miri\": \"https://jwst-docs.stsci.edu/jwst-mid-infrared-instrument\",\n \"niriss\": \"https://jwst-docs.stsci.edu/jwst-near-infrared-imager-and-slitless-spectrograph\",\n \"nirspec\": \"https://jwst-docs.stsci.edu/jwst-near-infrared-spectrograph\",\n \"nircam\": \"https://jwst-docs.stsci.edu/jwst-near-infrared-camera\",\n}\n\n# Possible suffix types for WFS&C files\nWFSC_SUFFIX_TYPES = [\"wfscmb\"]\n\n# Concatenate all suffix types (ordered to ensure successful matching)\nFILE_SUFFIX_TYPES = (\n GUIDER_SUFFIX_TYPES\n + GENERIC_SUFFIX_TYPES\n + TIME_SERIES_SUFFIX_TYPES\n + NIRCAM_CORONAGRAPHY_SUFFIX_TYPES\n + NIRISS_AMI_SUFFIX_TYPES\n + WFSC_SUFFIX_TYPES\n + MSA_SUFFIX\n)\n\n# Model.Charfield Max Length Constants\nMAX_LEN_AMPLIFIER = 40\nMAX_LEN_APERTURE = 40\nMAX_LEN_DEPENDENCY_VALUE = 40\nMAX_LEN_DETECTOR = 40\nMAX_LEN_DIFF_IMAGE = 1000\nMAX_LEN_FILENAME = 1000\nMAX_LEN_FILTER = 7\nMAX_LEN_GENERIC_TEXT = 100\nMAX_LEN_GRATING = 40\nMAX_LEN_INSTRUMENT = 7\nMAX_LEN_MNEMONIC = 40\nMAX_LEN_NGROUPS = 10\nMAX_LEN_NINTS = 10\nMAX_LEN_OBS = 3\nMAX_LEN_PATH = 1000\nMAX_LEN_PROPOSAL = 5\nMAX_LEN_PUPIL = 40\nMAX_LEN_READPATTERN = 40\nMAX_LEN_SUBARRAY = 40\nMAX_LEN_TIME = 50\nMAX_LEN_TYPE = 40\nMAX_LEN_USER = 50\nMAX_LEN_VISIT = 30\n", "path": "jwql/utils/constants.py" } ]
diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index 3691d0cd8..814eca299 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -470,7 +470,6 @@ "F170LP", "F290LP", "OPAQUE", - "P750L", ], }
scikit-hep__awkward-3025
Custom behaviors plus jax leading to lookup in wrong spot ### Version of Awkward Array ce63bf2 ### Description and code to reproduce This is partner issue to https://github.com/CoffeaTeam/coffea/issues/874 as perhaps this is more on the side of awkward than coffea. I am trying to combine custom behaviors (defined by coffea) with the jax backend of awkward. The reproducer below results in: ```pytb AttributeError: module 'jax.numpy' has no attribute '_mass2_kernel' ``` Reproducer: ```python import awkward as ak from coffea.nanoevents.methods import candidate import numpy as np import uproot ak.jax.register_and_check() ak.behavior.update(candidate.behavior) ttbar_file = "https://github.com/scikit-hep/scikit-hep-testdata/"\ "raw/main/src/skhep_testdata/data/nanoAOD_2015_CMS_Open_Data_ttbar.root" with uproot.open(ttbar_file) as f: arr = f["Events"].arrays(["Electron_pt", "Electron_eta", "Electron_phi", "Electron_mass", "Electron_charge"]) px = arr.Electron_pt * np.cos(arr.Electron_phi) py = arr.Electron_pt * np.sin(arr.Electron_phi) pz = arr.Electron_pt * np.sinh(arr.Electron_eta) E = np.sqrt(arr.Electron_mass**2 + px**2 + py**2 + pz**2) evtfilter = ak.num(arr["Electron_pt"]) >= 2 els = ak.zip({"pt": arr.Electron_pt, "eta": arr.Electron_eta, "phi": arr.Electron_phi, "energy": E, "charge": arr.Electron_charge}, with_name="PtEtaPhiECandidate")[evtfilter] els = ak.to_backend(els, "jax") (els[:, 0] + els[:, 1]).mass ``` Using the `"Momentum4D"` behavior from `vector` (after `vector.register_awkward()`) works. Skipping the backend conversion to jax also makes this work. <details> <summary>Full trace</summary> --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) Cell In[1], line 32 28 els = ak.zip({"pt": arr.Electron_pt, "eta": arr.Electron_eta, "phi": arr.Electron_phi, 29 "energy": E, "charge": arr.Electron_charge}, with_name="PtEtaPhiECandidate")[evtfilter] 30 els = ak.to_backend(els, "jax") ---> 32 (els[:, 0] + els[:, 1]).mass File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/highlevel.py:1097, in Array.__getattr__(self, where) 1061 """ 1062 Args: 1063 where (str): Attribute name to lookup (...) 1094 *assigned* as attributes. See #ak.Array.__setitem__ for more. 1095 """ 1096 if hasattr(type(self), where): -> 1097 return super().__getattribute__(where) 1098 else: 1099 if where in self._layout.fields: File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/coffea/nanoevents/methods/vector.py:531, in LorentzVector.mass(self) 525 @property 526 def mass(self): 527 r"""Invariant mass (+, -, -, -) 528 529 :math:`\sqrt{t^2-x^2-y^2-z^2}` 530 """ --> 531 return numpy.sqrt(self.mass2) File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/highlevel.py:1097, in Array.__getattr__(self, where) 1061 """ 1062 Args: 1063 where (str): Attribute name to lookup (...) 1094 *assigned* as attributes. See #ak.Array.__setitem__ for more. 1095 """ 1096 if hasattr(type(self), where): -> 1097 return super().__getattribute__(where) 1098 else: 1099 if where in self._layout.fields: File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/coffea/nanoevents/methods/vector.py:523, in LorentzVector.mass2(self) 520 @property 521 def mass2(self): 522 """Squared `mass`""" --> 523 return _mass2_kernel(self.t, self.x, self.y, self.z) File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/highlevel.py:1349, in Array.__array_ufunc__(self, ufunc, method, *inputs, **kwargs) 1347 name = f"{type(ufunc).__module__}.{ufunc.__name__}.{method!s}" 1348 with ak._errors.OperationErrorContext(name, inputs, kwargs): -> 1349 return ak._connect.numpy.array_ufunc(ufunc, method, inputs, kwargs) File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/_connect/numpy.py:459, in array_ufunc(ufunc, method, inputs, kwargs) 450 out = ak._do.recursively_apply( 451 inputs[where], 452 unary_action, (...) 455 allow_records=False, 456 ) 458 else: --> 459 out = ak._broadcasting.broadcast_and_apply( 460 inputs, action, behavior, allow_records=False, function_name=ufunc.__name__ 461 ) 462 assert isinstance(out, tuple) and len(out) == 1 463 out = out[0] File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/_broadcasting.py:1022, in broadcast_and_apply(inputs, action, behavior, depth_context, lateral_context, allow_records, left_broadcast, right_broadcast, numpy_to_regular, regular_to_jagged, function_name, broadcast_parameters_rule) 1020 backend = backend_of(*inputs) 1021 isscalar = [] -> 1022 out = apply_step( 1023 backend, 1024 broadcast_pack(inputs, isscalar), 1025 action, 1026 0, 1027 depth_context, 1028 lateral_context, 1029 behavior, 1030 { 1031 "allow_records": allow_records, 1032 "left_broadcast": left_broadcast, 1033 "right_broadcast": right_broadcast, 1034 "numpy_to_regular": numpy_to_regular, 1035 "regular_to_jagged": regular_to_jagged, 1036 "function_name": function_name, 1037 "broadcast_parameters_rule": broadcast_parameters_rule, 1038 }, 1039 ) 1040 assert isinstance(out, tuple) 1041 return tuple(broadcast_unpack(x, isscalar, backend) for x in out) File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/_broadcasting.py:1001, in apply_step(backend, inputs, action, depth, depth_context, lateral_context, behavior, options) 999 return result 1000 elif result is None: -> 1001 return continuation() 1002 else: 1003 raise AssertionError(result) File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/_broadcasting.py:974, in apply_step.<locals>.continuation() 972 # Any non-string list-types? 973 elif any(x.is_list and not is_string_like(x) for x in contents): --> 974 return broadcast_any_list() 976 # Any RecordArrays? 977 elif any(x.is_record for x in contents): File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/_broadcasting.py:622, in apply_step.<locals>.broadcast_any_list() 619 nextinputs.append(x) 620 nextparameters.append(NO_PARAMETERS) --> 622 outcontent = apply_step( 623 backend, 624 nextinputs, 625 action, 626 depth + 1, 627 copy.copy(depth_context), 628 lateral_context, 629 behavior, 630 options, 631 ) 632 assert isinstance(outcontent, tuple) 633 parameters = parameters_factory(nextparameters, len(outcontent)) File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/_broadcasting.py:987, in apply_step(backend, inputs, action, depth, depth_context, lateral_context, behavior, options) 980 else: 981 raise ValueError( 982 "cannot broadcast: {}{}".format( 983 ", ".join(repr(type(x)) for x in inputs), in_function(options) 984 ) 985 ) --> 987 result = action( 988 inputs, 989 depth=depth, 990 depth_context=depth_context, 991 lateral_context=lateral_context, 992 continuation=continuation, 993 behavior=behavior, 994 backend=backend, 995 options=options, 996 ) 998 if isinstance(result, tuple) and all(isinstance(x, Content) for x in result): 999 return result File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/_connect/numpy.py:400, in array_ufunc.<locals>.action(inputs, **ignore) 397 args.append(x) 399 # Give backend a chance to change the ufunc implementation --> 400 impl = backend.prepare_ufunc(ufunc) 402 # Invoke ufunc 403 result = impl(*args, **kwargs) File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/_backends/jax.py:50, in JaxBackend.prepare_ufunc(self, ufunc) 47 def prepare_ufunc(self, ufunc: UfuncLike) -> UfuncLike: 48 from awkward._connect.jax import get_jax_ufunc ---> 50 return get_jax_ufunc(ufunc) File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/awkward/_connect/jax/__init__.py:8, in get_jax_ufunc(ufunc) 7 def get_jax_ufunc(ufunc): ----> 8 return getattr(jax.numpy, ufunc.__name__) File ~/mambaforge/envs/agc-ad/lib/python3.11/site-packages/jax/_src/deprecations.py:53, in deprecation_getattr.<locals>.getattr(name) 51 warnings.warn(message, DeprecationWarning, stacklevel=2) 52 return fn ---> 53 raise AttributeError(f"module {module!r} has no attribute {name!r}") AttributeError: module 'jax.numpy' has no attribute '_mass2_kernel' This error occurred while calling numpy._mass2_kernel.__call__( <Array [192.54099, 132.60043, ..., 142.34727] type='5 * float32'> <Array [5.5301285, -46.949707, ..., -58.96562] type='5 * float32'> <Array [-70.93436, -12.467135, ..., -31.510773] type='5 * float32'> <Array [156.38907, -75.47587, ..., -115.080734] type='5 * float32'> ) </details>
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward/blob/main/LICENSE\n\nfrom __future__ import annotations\n\nimport jax.numpy\n\nfrom awkward._connect.jax.reducers import get_jax_reducer # noqa: F401\nfrom awkward._connect.jax.trees import register_pytree_class # noqa: F401\n\n\ndef get_jax_ufunc(ufunc):\n return getattr(jax.numpy, ufunc.__name__)\n", "path": "src/awkward/_connect/jax/__init__.py" } ]
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward/blob/main/LICENSE\n\nfrom __future__ import annotations\n\nimport jax.numpy\n\nfrom awkward._connect.jax.reducers import get_jax_reducer # noqa: F401\nfrom awkward._connect.jax.trees import register_pytree_class # noqa: F401\n\n\ndef get_jax_ufunc(ufunc):\n return getattr(jax.numpy, ufunc.__name__, ufunc)\n", "path": "src/awkward/_connect/jax/__init__.py" } ]
diff --git a/src/awkward/_connect/jax/__init__.py b/src/awkward/_connect/jax/__init__.py index d66ea654c4..07a7cfcfba 100644 --- a/src/awkward/_connect/jax/__init__.py +++ b/src/awkward/_connect/jax/__init__.py @@ -9,4 +9,4 @@ def get_jax_ufunc(ufunc): - return getattr(jax.numpy, ufunc.__name__) + return getattr(jax.numpy, ufunc.__name__, ufunc) diff --git a/tests/test_2603_custom_behaviors_with_jax.py b/tests/test_2603_custom_behaviors_with_jax.py new file mode 100644 index 0000000000..934ec46457 --- /dev/null +++ b/tests/test_2603_custom_behaviors_with_jax.py @@ -0,0 +1,38 @@ +# BSD 3-Clause License; see https://github.com/scikit-hep/awkward/blob/main/LICENSE + +from __future__ import annotations + +import pytest + +import awkward as ak + +numba = pytest.importorskip("numba") + + +def test(): + behavior = {} + + ak.jax.register_and_check() + + input_arr = ak.Array([1.0], backend="jax") + + @numba.vectorize( + [ + numba.float32(numba.float32, numba.float32), + numba.float64(numba.float64, numba.float64), + ] + ) + def _some_kernel(x, y): + return x * x + y * y + + @ak.mixin_class(behavior) + class SomeClass: + @property + def some_kernel(self): + return _some_kernel(self.x, self.y) + + ak.behavior.update(behavior) + + arr = ak.zip({"x": input_arr, "y": input_arr}, with_name="SomeClass") + + assert ak.all(arr.some_kernel == ak.Array([2.0], backend="jax"))
flairNLP__flair-447
__version__ attribute? I'm always frustrated when flair doesn't have a __version__attribute... :-) Please, add a __version__attribute to the module. Thank you! DC
[ { "content": "import torch\n\nfrom . import data\nfrom . import models\nfrom . import visual\nfrom . import trainers\n\nimport logging.config\n\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)-15s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout'\n },\n },\n 'loggers': {\n 'flair': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False\n }\n },\n 'root': {\n 'handlers': ['console'],\n 'level': 'WARNING'\n }\n})\n\nlogger = logging.getLogger('flair')\n\n\ndevice = None\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\nelse:\n device = torch.device('cpu')\n", "path": "flair/__init__.py" } ]
[ { "content": "import torch\n\nfrom . import data\nfrom . import models\nfrom . import visual\nfrom . import trainers\n\nimport logging.config\n\n__version__ = \"0.4.1\"\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)-15s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout'\n },\n },\n 'loggers': {\n 'flair': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False\n }\n },\n 'root': {\n 'handlers': ['console'],\n 'level': 'WARNING'\n }\n})\n\nlogger = logging.getLogger('flair')\n\n\ndevice = None\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\nelse:\n device = torch.device('cpu')\n", "path": "flair/__init__.py" } ]
diff --git a/flair/__init__.py b/flair/__init__.py index 22803f63c4..7b592ded00 100644 --- a/flair/__init__.py +++ b/flair/__init__.py @@ -7,6 +7,7 @@ import logging.config +__version__ = "0.4.1" logging.config.dictConfig({ 'version': 1,
scverse__scanpy-1807
Report pynndescent version in sc.logging.print_header Hi, Thank you for the great tool. I think this is not a bug. Recently I upgraded some packages and found my results were different from the previous runs. I figured out that it is caused by different versions of `pynndescent` (0.4.7 vs 0.5.1), which is recommended to use in UMAP. So I think `pynndescent` should be included in the output of `sc.logging.print_header()`. #### Versions <details> ----- anndata 0.7.5 scanpy 1.6.1 sinfo 0.3.1 ----- PIL 8.1.0 anndata 0.7.5 constants NA cycler 0.10.0 cython_runtime NA dateutil 2.8.1 get_version 2.1 h5py 3.1.0 highs_wrapper NA igraph 0.8.3 joblib 1.0.0 kiwisolver 1.3.1 legacy_api_wrap 1.2 leidenalg 0.8.3 llvmlite 0.35.0 louvain 0.7.0 matplotlib 3.3.3 mpl_toolkits NA natsort 7.1.1 numba 0.52.0 numexpr 2.7.2 numpy 1.19.5 packaging 20.8 pandas 1.2.1 pkg_resources NA pynndescent 0.5.1 pyparsing 2.4.7 pytz 2020.5 scanpy 1.6.1 scipy 1.6.0 setuptools_scm NA sinfo 0.3.1 six 1.15.0 sklearn 0.24.1 statsmodels 0.12.1 tables 3.6.1 texttable 1.6.3 umap 0.4.6 ----- Python 3.8.5 (default, Sep 4 2020, 07:30:14) [GCC 7.3.0] Linux-3.10.0-1160.11.1.el7.x86_64-x86_64-with-glibc2.10 40 logical CPU cores, x86_64 </details>
[ { "content": "\"\"\"Logging and Profiling\n\"\"\"\nimport io\nimport logging\nimport sys\nfrom functools import update_wrapper, partial\nfrom logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Optional\n\nimport anndata.logging\nfrom sinfo import sinfo\n\n\nHINT = (INFO + DEBUG) // 2\nlogging.addLevelName(HINT, 'HINT')\n\n\nclass _RootLogger(logging.RootLogger):\n def __init__(self, level):\n super().__init__(level)\n self.propagate = False\n _RootLogger.manager = logging.Manager(self)\n\n def log(\n self,\n level: int,\n msg: str,\n *,\n extra: Optional[dict] = None,\n time: datetime = None,\n deep: Optional[str] = None,\n ) -> datetime:\n from . import settings\n\n now = datetime.now(timezone.utc)\n time_passed: timedelta = None if time is None else now - time\n extra = {\n **(extra or {}),\n 'deep': deep if settings.verbosity.level < level else None,\n 'time_passed': time_passed,\n }\n super().log(level, msg, extra=extra)\n return now\n\n def critical(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(CRITICAL, msg, time=time, deep=deep, extra=extra)\n\n def error(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(ERROR, msg, time=time, deep=deep, extra=extra)\n\n def warning(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(WARNING, msg, time=time, deep=deep, extra=extra)\n\n def info(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(INFO, msg, time=time, deep=deep, extra=extra)\n\n def hint(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(HINT, msg, time=time, deep=deep, extra=extra)\n\n def debug(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(DEBUG, msg, time=time, deep=deep, extra=extra)\n\n\ndef _set_log_file(settings):\n file = settings.logfile\n name = settings.logpath\n root = settings._root_logger\n h = logging.StreamHandler(file) if name is None else logging.FileHandler(name)\n h.setFormatter(_LogFormatter())\n h.setLevel(root.level)\n if len(root.handlers) == 1:\n root.removeHandler(root.handlers[0])\n elif len(root.handlers) > 1:\n raise RuntimeError('Scanpy’s root logger somehow got more than one handler')\n root.addHandler(h)\n\n\ndef _set_log_level(settings, level: int):\n root = settings._root_logger\n root.setLevel(level)\n (h,) = root.handlers # may only be 1\n h.setLevel(level)\n\n\nclass _LogFormatter(logging.Formatter):\n def __init__(\n self, fmt='{levelname}: {message}', datefmt='%Y-%m-%d %H:%M', style='{'\n ):\n super().__init__(fmt, datefmt, style)\n\n def format(self, record: logging.LogRecord):\n format_orig = self._style._fmt\n if record.levelno == INFO:\n self._style._fmt = '{message}'\n elif record.levelno == HINT:\n self._style._fmt = '--> {message}'\n elif record.levelno == DEBUG:\n self._style._fmt = ' {message}'\n if record.time_passed:\n # strip microseconds\n if record.time_passed.microseconds:\n record.time_passed = timedelta(\n seconds=int(record.time_passed.total_seconds())\n )\n if '{time_passed}' in record.msg:\n record.msg = record.msg.replace(\n '{time_passed}', str(record.time_passed)\n )\n else:\n self._style._fmt += ' ({time_passed})'\n if record.deep:\n record.msg = f'{record.msg}: {record.deep}'\n result = logging.Formatter.format(self, record)\n self._style._fmt = format_orig\n return result\n\n\nprint_memory_usage = anndata.logging.print_memory_usage\nget_memory_usage = anndata.logging.get_memory_usage\n\n\n_DEPENDENCIES_NUMERICS = [\n 'anndata', # anndata actually shouldn't, but as long as it's in development\n 'umap',\n 'numpy',\n 'scipy',\n 'pandas',\n ('sklearn', 'scikit-learn'),\n 'statsmodels',\n ('igraph', 'python-igraph'),\n 'louvain',\n 'leidenalg',\n]\n\n\ndef _versions_dependencies(dependencies):\n # this is not the same as the requirements!\n for mod in dependencies:\n mod_name, dist_name = mod if isinstance(mod, tuple) else (mod, mod)\n try:\n imp = __import__(mod_name)\n yield dist_name, imp.__version__\n except (ImportError, AttributeError):\n pass\n\n\ndef print_header(*, file=None):\n \"\"\"\\\n Versions that might influence the numerical results.\n Matplotlib and Seaborn are excluded from this.\n \"\"\"\n\n modules = ['scanpy'] + _DEPENDENCIES_NUMERICS\n print(\n ' '.join(f'{mod}=={ver}' for mod, ver in _versions_dependencies(modules)),\n file=file or sys.stdout,\n )\n\n\ndef print_versions(*, file=None):\n \"\"\"Print print versions of imported packages\"\"\"\n if file is None: # Inform people about the behavior change\n warning('If you miss a compact list, please try `print_header`!')\n stdout = sys.stdout\n try:\n buf = sys.stdout = io.StringIO()\n sinfo(\n dependencies=True,\n excludes=[\n 'builtins',\n 'stdlib_list',\n 'importlib_metadata',\n # Special module present if test coverage being calculated\n # https://gitlab.com/joelostblom/sinfo/-/issues/10\n \"$coverage\",\n ],\n )\n finally:\n sys.stdout = stdout\n output = buf.getvalue()\n print(output, file=file)\n\n\ndef print_version_and_date(*, file=None):\n \"\"\"\\\n Useful for starting a notebook so you see when you started working.\n \"\"\"\n from . import __version__\n\n if file is None:\n file = sys.stdout\n print(\n f'Running Scanpy {__version__}, ' f'on {datetime.now():%Y-%m-%d %H:%M}.',\n file=file,\n )\n\n\ndef _copy_docs_and_signature(fn):\n return partial(update_wrapper, wrapped=fn, assigned=['__doc__', '__annotations__'])\n\n\ndef error(\n msg: str,\n *,\n time: datetime = None,\n deep: Optional[str] = None,\n extra: Optional[dict] = None,\n) -> datetime:\n \"\"\"\\\n Log message with specific level and return current time.\n\n Parameters\n ==========\n msg\n Message to display.\n time\n A time in the past. If this is passed, the time difference from then\n to now is appended to `msg` as ` (HH:MM:SS)`.\n If `msg` contains `{time_passed}`, the time difference is instead\n inserted at that position.\n deep\n If the current verbosity is higher than the log function’s level,\n this gets displayed as well\n extra\n Additional values you can specify in `msg` like `{time_passed}`.\n \"\"\"\n from ._settings import settings\n\n return settings._root_logger.error(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef warning(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.warning(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef info(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.info(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef hint(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.hint(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef debug(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.debug(msg, time=time, deep=deep, extra=extra)\n", "path": "scanpy/logging.py" } ]
[ { "content": "\"\"\"Logging and Profiling\n\"\"\"\nimport io\nimport logging\nimport sys\nfrom functools import update_wrapper, partial\nfrom logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Optional\n\nimport anndata.logging\nfrom sinfo import sinfo\n\n\nHINT = (INFO + DEBUG) // 2\nlogging.addLevelName(HINT, 'HINT')\n\n\nclass _RootLogger(logging.RootLogger):\n def __init__(self, level):\n super().__init__(level)\n self.propagate = False\n _RootLogger.manager = logging.Manager(self)\n\n def log(\n self,\n level: int,\n msg: str,\n *,\n extra: Optional[dict] = None,\n time: datetime = None,\n deep: Optional[str] = None,\n ) -> datetime:\n from . import settings\n\n now = datetime.now(timezone.utc)\n time_passed: timedelta = None if time is None else now - time\n extra = {\n **(extra or {}),\n 'deep': deep if settings.verbosity.level < level else None,\n 'time_passed': time_passed,\n }\n super().log(level, msg, extra=extra)\n return now\n\n def critical(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(CRITICAL, msg, time=time, deep=deep, extra=extra)\n\n def error(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(ERROR, msg, time=time, deep=deep, extra=extra)\n\n def warning(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(WARNING, msg, time=time, deep=deep, extra=extra)\n\n def info(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(INFO, msg, time=time, deep=deep, extra=extra)\n\n def hint(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(HINT, msg, time=time, deep=deep, extra=extra)\n\n def debug(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(DEBUG, msg, time=time, deep=deep, extra=extra)\n\n\ndef _set_log_file(settings):\n file = settings.logfile\n name = settings.logpath\n root = settings._root_logger\n h = logging.StreamHandler(file) if name is None else logging.FileHandler(name)\n h.setFormatter(_LogFormatter())\n h.setLevel(root.level)\n if len(root.handlers) == 1:\n root.removeHandler(root.handlers[0])\n elif len(root.handlers) > 1:\n raise RuntimeError('Scanpy’s root logger somehow got more than one handler')\n root.addHandler(h)\n\n\ndef _set_log_level(settings, level: int):\n root = settings._root_logger\n root.setLevel(level)\n (h,) = root.handlers # may only be 1\n h.setLevel(level)\n\n\nclass _LogFormatter(logging.Formatter):\n def __init__(\n self, fmt='{levelname}: {message}', datefmt='%Y-%m-%d %H:%M', style='{'\n ):\n super().__init__(fmt, datefmt, style)\n\n def format(self, record: logging.LogRecord):\n format_orig = self._style._fmt\n if record.levelno == INFO:\n self._style._fmt = '{message}'\n elif record.levelno == HINT:\n self._style._fmt = '--> {message}'\n elif record.levelno == DEBUG:\n self._style._fmt = ' {message}'\n if record.time_passed:\n # strip microseconds\n if record.time_passed.microseconds:\n record.time_passed = timedelta(\n seconds=int(record.time_passed.total_seconds())\n )\n if '{time_passed}' in record.msg:\n record.msg = record.msg.replace(\n '{time_passed}', str(record.time_passed)\n )\n else:\n self._style._fmt += ' ({time_passed})'\n if record.deep:\n record.msg = f'{record.msg}: {record.deep}'\n result = logging.Formatter.format(self, record)\n self._style._fmt = format_orig\n return result\n\n\nprint_memory_usage = anndata.logging.print_memory_usage\nget_memory_usage = anndata.logging.get_memory_usage\n\n\n_DEPENDENCIES_NUMERICS = [\n 'anndata', # anndata actually shouldn't, but as long as it's in development\n 'umap',\n 'numpy',\n 'scipy',\n 'pandas',\n ('sklearn', 'scikit-learn'),\n 'statsmodels',\n ('igraph', 'python-igraph'),\n 'louvain',\n 'leidenalg',\n 'pynndescent',\n]\n\n\ndef _versions_dependencies(dependencies):\n # this is not the same as the requirements!\n for mod in dependencies:\n mod_name, dist_name = mod if isinstance(mod, tuple) else (mod, mod)\n try:\n imp = __import__(mod_name)\n yield dist_name, imp.__version__\n except (ImportError, AttributeError):\n pass\n\n\ndef print_header(*, file=None):\n \"\"\"\\\n Versions that might influence the numerical results.\n Matplotlib and Seaborn are excluded from this.\n \"\"\"\n\n modules = ['scanpy'] + _DEPENDENCIES_NUMERICS\n print(\n ' '.join(f'{mod}=={ver}' for mod, ver in _versions_dependencies(modules)),\n file=file or sys.stdout,\n )\n\n\ndef print_versions(*, file=None):\n \"\"\"Print print versions of imported packages\"\"\"\n if file is None: # Inform people about the behavior change\n warning('If you miss a compact list, please try `print_header`!')\n stdout = sys.stdout\n try:\n buf = sys.stdout = io.StringIO()\n sinfo(\n dependencies=True,\n excludes=[\n 'builtins',\n 'stdlib_list',\n 'importlib_metadata',\n # Special module present if test coverage being calculated\n # https://gitlab.com/joelostblom/sinfo/-/issues/10\n \"$coverage\",\n ],\n )\n finally:\n sys.stdout = stdout\n output = buf.getvalue()\n print(output, file=file)\n\n\ndef print_version_and_date(*, file=None):\n \"\"\"\\\n Useful for starting a notebook so you see when you started working.\n \"\"\"\n from . import __version__\n\n if file is None:\n file = sys.stdout\n print(\n f'Running Scanpy {__version__}, ' f'on {datetime.now():%Y-%m-%d %H:%M}.',\n file=file,\n )\n\n\ndef _copy_docs_and_signature(fn):\n return partial(update_wrapper, wrapped=fn, assigned=['__doc__', '__annotations__'])\n\n\ndef error(\n msg: str,\n *,\n time: datetime = None,\n deep: Optional[str] = None,\n extra: Optional[dict] = None,\n) -> datetime:\n \"\"\"\\\n Log message with specific level and return current time.\n\n Parameters\n ==========\n msg\n Message to display.\n time\n A time in the past. If this is passed, the time difference from then\n to now is appended to `msg` as ` (HH:MM:SS)`.\n If `msg` contains `{time_passed}`, the time difference is instead\n inserted at that position.\n deep\n If the current verbosity is higher than the log function’s level,\n this gets displayed as well\n extra\n Additional values you can specify in `msg` like `{time_passed}`.\n \"\"\"\n from ._settings import settings\n\n return settings._root_logger.error(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef warning(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.warning(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef info(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.info(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef hint(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.hint(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef debug(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.debug(msg, time=time, deep=deep, extra=extra)\n", "path": "scanpy/logging.py" } ]
diff --git a/scanpy/logging.py b/scanpy/logging.py index c0f810b281..1f44ec0506 100644 --- a/scanpy/logging.py +++ b/scanpy/logging.py @@ -131,6 +131,7 @@ def format(self, record: logging.LogRecord): ('igraph', 'python-igraph'), 'louvain', 'leidenalg', + 'pynndescent', ]
ocadotechnology__aimmo-60
Fix warning about deprecation of TEMPLATE_DEBUG When starting aimmo locally the following message is displayed: > WARNINGS: > ?: (1_8.W001) The standalone TEMPLATE_\* settings were deprecated in Django 1.8 and the TEMPLATES dictionary takes precedence. You must put the values of the following settings into your default TEMPLATES dict: TEMPLATE_DEBUG. The value in question is in `aimmo/example_project/example_project/settings.py`: `TEMPLATE_DEBUG = DEBUG` The TEMPLATES dictionary _maybe_ the one here `aimmo/players/autoconfig.py` (?): ``` 'TEMPLATES': [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ] } } ], ```
[ { "content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS – Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any “Ocado” logos,\n# trade names or the trademark “Ocado” or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of “Ocado” as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# “Ocado” or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n'''Django settings for example_project project.'''\nimport os\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)),'db.sqlite3'),# Or path to database file if using sqlite3.\n }\n}\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')\nSTATIC_URL = '/static/'\nSECRET_KEY = 'not-a-secret'\n\nROOT_URLCONF = 'django_autoconfig.autourlconf'\n\nWSGI_APPLICATION = 'example_project.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'players',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'views': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n },\n }\n}\n\ntry:\n from example_project.local_settings import * # pylint: disable=E0611\nexcept ImportError:\n pass\n\nfrom django_autoconfig import autoconfig\nautoconfig.configure_settings(globals())\n", "path": "example_project/example_project/settings.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS – Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any “Ocado” logos,\n# trade names or the trademark “Ocado” or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of “Ocado” as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# “Ocado” or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n'''Django settings for example_project project.'''\nimport os\n\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)),'db.sqlite3'),# Or path to database file if using sqlite3.\n }\n}\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')\nSTATIC_URL = '/static/'\nSECRET_KEY = 'not-a-secret'\n\nROOT_URLCONF = 'django_autoconfig.autourlconf'\n\nWSGI_APPLICATION = 'example_project.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'players',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'views': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n },\n }\n}\n\ntry:\n from example_project.local_settings import * # pylint: disable=E0611\nexcept ImportError:\n pass\n\nfrom django_autoconfig import autoconfig\nautoconfig.configure_settings(globals())\n", "path": "example_project/example_project/settings.py" } ]
diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py index 0a3247898..c09467644 100644 --- a/example_project/example_project/settings.py +++ b/example_project/example_project/settings.py @@ -38,7 +38,6 @@ import os DEBUG = True -TEMPLATE_DEBUG = DEBUG DATABASES = { 'default': {
django-wiki__django-wiki-750
markdow error <sup id = "fnref: 1"> duplicate (footnotes) now ![1](https://fotos.subefotos.com/c83c25687ddca44c7a2b7b8ab3cf78f4o.png) ![2](https://fotos.subefotos.com/8e46f36b4f13d53f6a6655556f0b7620o.png) Here is an example with wiki == 0.2b2 ![3](https://fotos.subefotos.com/b21939eaa52be55ed81d6171e8fadd4co.png)
[ { "content": "from __future__ import unicode_literals\n\nimport bleach\n\nfrom django.conf import settings as django_settings\nfrom django.contrib.messages import constants as messages\nfrom django.core.files.storage import default_storage\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\n#: Should urls be case sensitive?\nURL_CASE_SENSITIVE = getattr(django_settings, 'WIKI_URL_CASE_SENSITIVE', False)\n\n# Non-configurable (at the moment)\nWIKI_LANGUAGE = 'markdown'\n\n#: The editor class to use -- maybe a 3rd party or your own...? You can always\n#: extend the built-in editor and customize it!\nEDITOR = getattr(\n django_settings,\n 'WIKI_EDITOR',\n 'wiki.editors.markitup.MarkItUp')\n\n#: Whether to use Bleach or not. It's not recommended to turn this off unless\n#: you know what you're doing and you don't want to use the other options.\nMARKDOWN_SANITIZE_HTML = getattr(\n django_settings,\n 'WIKI_MARKDOWN_SANITIZE_HTML',\n True)\n\n#: Arguments for the Markdown instance, for instance a list of extensions to\n#: use.\n#: See: https://pythonhosted.org/Markdown/extensions/index.html\n#:\n#: To set a custom title for TOC's::\n#:\n#: WIKI_MARKDOWN_KWARGS = {'extension_configs': {'toc': _('Contents of this article')}}\nMARKDOWN_KWARGS = {\n 'extensions': [\n 'footnotes',\n 'attr_list',\n 'smart_strong',\n 'footnotes',\n 'attr_list',\n 'def_list',\n 'tables',\n 'abbr',\n 'sane_lists',\n ],\n 'extension_configs': {\n 'toc': {\n 'title': _('Table of Contents')}},\n}\nMARKDOWN_KWARGS.update(getattr(django_settings, 'WIKI_MARKDOWN_KWARGS', {}))\n\n_default_tag_whitelists = bleach.ALLOWED_TAGS + [\n 'figure',\n 'figcaption',\n 'br',\n 'hr',\n 'p',\n 'div',\n 'img',\n 'pre',\n 'span',\n 'table',\n 'thead',\n 'tbody',\n 'th',\n 'tr',\n 'td',\n 'dl',\n 'dt',\n 'dd',\n] + ['h{}'.format(n) for n in range(8)]\n\n\n#: List of allowed tags in Markdown article contents.\nMARKDOWN_HTML_WHITELIST = _default_tag_whitelists\nMARKDOWN_HTML_WHITELIST += (\n getattr(\n django_settings,\n 'WIKI_MARKDOWN_HTML_WHITELIST',\n []\n )\n)\n\n_default_attribute_whitelist = bleach.ALLOWED_ATTRIBUTES\nfor tag in MARKDOWN_HTML_WHITELIST:\n if tag not in _default_attribute_whitelist:\n _default_attribute_whitelist[tag] = []\n _default_attribute_whitelist[tag].append('class')\n _default_attribute_whitelist[tag].append('id')\n\n_default_attribute_whitelist['img'].append('src')\n_default_attribute_whitelist['img'].append('alt')\n\n#: Dictionary of allowed attributes in Markdown article contents.\nMARKDOWN_HTML_ATTRIBUTES = _default_attribute_whitelist\nMARKDOWN_HTML_ATTRIBUTES.update(\n getattr(\n django_settings,\n 'WIKI_MARKDOWN_HTML_ATTRIBUTE_WHITELIST',\n {}\n )\n)\n\n#: Allowed inline styles in Markdown article contents, default is no styles\n#: (empty list).\nMARKDOWN_HTML_STYLES = (\n getattr(\n django_settings,\n 'WIKI_MARKDOWN_HTML_STYLES',\n []\n )\n)\n\n_project_defined_attrs = getattr(\n django_settings,\n 'WIKI_MARKDOWN_HTML_ATTRIBUTE_WHITELIST',\n False)\n\n# If styles are allowed but no custom attributes are defined, we allow styles\n# for all kinds of tags.\nif MARKDOWN_HTML_STYLES and not _project_defined_attrs:\n MARKDOWN_HTML_ATTRIBUTES['*'] = 'style'\n\n\n#: This slug is used in URLPath if an article has been deleted. The children of the\n#: URLPath of that article are moved to lost and found. They keep their permissions\n#: and all their content.\nLOST_AND_FOUND_SLUG = getattr(\n django_settings,\n 'WIKI_LOST_AND_FOUND_SLUG',\n 'lost-and-found')\n\n#: When True, this blocks new slugs that resolve to non-wiki views, stopping\n#: users creating articles that conflict with overlapping URLs from other apps.\nCHECK_SLUG_URL_AVAILABLE = getattr(\n django_settings,\n 'WIKI_CHECK_SLUG_URL_AVAILABLE',\n True)\n\n#: Do we want to log IPs of anonymous users?\nLOG_IPS_ANONYMOUS = getattr(django_settings, 'WIKI_LOG_IPS_ANONYMOUS', True)\n\n#: Do we want to log IPs of logged in users?\nLOG_IPS_USERS = getattr(django_settings, 'WIKI_LOG_IPS_USERS', False)\n\n#: Mapping from message.tag to bootstrap class names.\nMESSAGE_TAG_CSS_CLASS = getattr(\n django_settings,\n 'WIKI_MESSAGE_TAG_CSS_CLASS',\n {\n messages.DEFAULT_TAGS[messages.DEBUG]: \"alert alert-info\",\n messages.DEFAULT_TAGS[messages.ERROR]: \"alert alert-danger\",\n messages.DEFAULT_TAGS[messages.INFO]: \"alert alert-info\",\n messages.DEFAULT_TAGS[messages.SUCCESS]: \"alert alert-success\",\n messages.DEFAULT_TAGS[messages.WARNING]: \"alert alert-warning\",\n }\n)\n\n####################################\n# PERMISSIONS AND ACCOUNT HANDLING #\n####################################\n\n# NB! None of these callables need to handle anonymous users as they are treated\n# in separate settings...\n\n#: A function returning True/False if a user has permission to\n#: read contents of an article and plugins.\n#: Relevance: Viewing articles and plugins.\nCAN_READ = getattr(django_settings, 'WIKI_CAN_READ', None)\n\n#: A function returning True/False if a user has permission to\n#: change contents, i.e. add new revisions to an article.\n#: Often, plugins also use this.\n#: Relevance: Editing articles, changing revisions, editing plugins.\nCAN_WRITE = getattr(django_settings, 'WIKI_CAN_WRITE', None)\n\n#: A function returning True/False if a user has permission to assign\n#: permissions on an article.\n#: Relevance: Changing owner and group membership.\nCAN_ASSIGN = getattr(django_settings, 'WIKI_CAN_ASSIGN', None)\n\n#: A function returning True/False if the owner of an article has permission\n#: to change the group to a user's own groups.\n#: Relevance: Changing group membership.\nCAN_ASSIGN_OWNER = getattr(django_settings, 'WIKI_ASSIGN_OWNER', None)\n\n#: A function returning True/False if a user has permission to change\n#: read/write access for groups and others.\nCAN_CHANGE_PERMISSIONS = getattr(\n django_settings,\n 'WIKI_CAN_CHANGE_PERMISSIONS',\n None)\n\n#: Specifies if a user has access to soft deletion of articles.\nCAN_DELETE = getattr(django_settings, 'WIKI_CAN_DELETE', None)\n\n#: A function returning True/False if a user has permission to change\n#: moderate, ie. lock articles and permanently delete content.\nCAN_MODERATE = getattr(django_settings, 'WIKI_CAN_MODERATE', None)\n\n#: A function returning True/False if a user has permission to create\n#: new groups and users for the wiki.\nCAN_ADMIN = getattr(django_settings, 'WIKI_CAN_ADMIN', None)\n\n#: Treat anonymous (i.e. non logged in) users as the \"other\" user group.\nANONYMOUS = getattr(django_settings, 'WIKI_ANONYMOUS', True)\n\n#: Globally enable write access for anonymous users, if true anonymous users\n#: will be treated as the others_write boolean field on models.Article.\nANONYMOUS_WRITE = getattr(django_settings, 'WIKI_ANONYMOUS_WRITE', False)\n\n#: Globally enable create access for anonymous users.\n#: Defaults to ``ANONYMOUS_WRITE``.\nANONYMOUS_CREATE = getattr(\n django_settings,\n 'WIKI_ANONYMOUS_CREATE',\n ANONYMOUS_WRITE)\n\n#: Default setting to allow anonymous users upload access. Used in\n#: plugins.attachments and plugins.images, and can be overwritten in\n#: these plugins.\nANONYMOUS_UPLOAD = getattr(django_settings, 'WIKI_ANONYMOUS_UPLOAD', False)\n\n#: Sign up, login and logout views should be accessible.\nACCOUNT_HANDLING = getattr(django_settings, 'WIKI_ACCOUNT_HANDLING', True)\n\n#: Signup allowed? If it's not allowed, logged in superusers can still access\n#: the signup page to create new users.\nACCOUNT_SIGNUP_ALLOWED = ACCOUNT_HANDLING and getattr(\n django_settings, 'WIKI_ACCOUNT_SIGNUP_ALLOWED', True\n)\n\nif ACCOUNT_HANDLING:\n LOGIN_URL = reverse_lazy(\"wiki:login\")\n LOGOUT_URL = reverse_lazy(\"wiki:logout\")\n SIGNUP_URL = reverse_lazy(\"wiki:signup\")\nelse:\n LOGIN_URL = getattr(django_settings, \"LOGIN_URL\", \"/\")\n LOGOUT_URL = getattr(django_settings, \"LOGOUT_URL\", \"/\")\n SIGNUP_URL = getattr(django_settings, \"WIKI_SIGNUP_URL\", \"/\")\n\n##################\n# OTHER SETTINGS #\n##################\n\n#: Maximum amount of children to display in a menu before showing \"+more\".\n#: NEVER set this to 0 as it will wrongly inform the user that there are no\n#: children and for instance that an article can be safely deleted.\nSHOW_MAX_CHILDREN = getattr(django_settings, 'WIKI_SHOW_MAX_CHILDREN', 20)\n\n#: User Bootstrap's select widget. Switch off if you're not using Bootstrap!\nUSE_BOOTSTRAP_SELECT_WIDGET = getattr(\n django_settings,\n 'WIKI_USE_BOOTSTRAP_SELECT_WIDGET',\n True)\n\n#: Dotted name of the class used to construct urlpatterns for the wiki.\n#: Default is wiki.urls.WikiURLPatterns. To customize urls or view handlers,\n#: you can derive from this.\nURL_CONFIG_CLASS = getattr(\n django_settings,\n 'WIKI_URL_CONFIG_CLASS',\n 'wiki.urls.WikiURLPatterns')\n\n#: Search view - dotted path denoting where the search view Class is located.\nSEARCH_VIEW = getattr(\n django_settings,\n 'WIKI_SEARCH_VIEW',\n 'wiki.views.article.SearchView'\n if 'wiki.plugins.haystack' not in django_settings.INSTALLED_APPS\n else\n 'wiki.plugins.haystack.views.HaystackSearchView'\n)\n\n#: Seconds of timeout before renewing the article cache. Articles are automatically\n#: renewed whenever an edit occurs but article content may be generated from\n#: other objects that are changed.\nCACHE_TIMEOUT = getattr(django_settings, 'WIKI_CACHE_TIMEOUT', 600)\n\n#: Choose the Group model to use for permission handling. Defaults to django's auth.Group.\nGROUP_MODEL = getattr(django_settings, 'WIKI_GROUP_MODEL', 'auth.Group')\n\n###################\n# SPAM PROTECTION #\n###################\n\n#: Maximum allowed revisions per hour for any given user or IP.\nREVISIONS_PER_HOUR = getattr(django_settings, 'WIKI_REVISIONS_PER_HOUR', 60)\n\n#: Maximum allowed revisions per minute for any given user or IP.\nREVISIONS_PER_MINUTES = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_MINUTES',\n 5)\n\n#: Maximum allowed revisions per hour for any anonymous user and any IP.\nREVISIONS_PER_HOUR_ANONYMOUS = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_HOUR_ANONYMOUS',\n 10)\n\n#: Maximum allowed revisions per minute for any anonymous user and any IP.\nREVISIONS_PER_MINUTES_ANONYMOUS = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_MINUTES_ANONYMOUS',\n 2)\n\n#: Number of minutes to look back for looking up ``REVISIONS_PER_MINUTES``\n#: and ``REVISIONS_PER_MINUTES_ANONYMOUS``.\nREVISIONS_MINUTES_LOOKBACK = getattr(\n django_settings,\n 'WIKI_REVISIONS_MINUTES_LOOKBACK',\n 2)\n\n###########\n# STORAGE #\n###########\n\n#: Default Django storage backend to use for images, attachments etc.\nSTORAGE_BACKEND = getattr(\n django_settings,\n 'WIKI_STORAGE_BACKEND',\n default_storage)\n\n#: Use django-sendfile for sending out files? Otherwise the whole file is\n#: first read into memory and than send with a mime type based on the file.\nUSE_SENDFILE = getattr(django_settings, 'WIKI_ATTACHMENTS_USE_SENDFILE', False)\n", "path": "src/wiki/conf/settings.py" } ]
[ { "content": "from __future__ import unicode_literals\n\nimport bleach\n\nfrom django.conf import settings as django_settings\nfrom django.contrib.messages import constants as messages\nfrom django.core.files.storage import default_storage\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\n#: Should urls be case sensitive?\nURL_CASE_SENSITIVE = getattr(django_settings, 'WIKI_URL_CASE_SENSITIVE', False)\n\n# Non-configurable (at the moment)\nWIKI_LANGUAGE = 'markdown'\n\n#: The editor class to use -- maybe a 3rd party or your own...? You can always\n#: extend the built-in editor and customize it!\nEDITOR = getattr(\n django_settings,\n 'WIKI_EDITOR',\n 'wiki.editors.markitup.MarkItUp')\n\n#: Whether to use Bleach or not. It's not recommended to turn this off unless\n#: you know what you're doing and you don't want to use the other options.\nMARKDOWN_SANITIZE_HTML = getattr(\n django_settings,\n 'WIKI_MARKDOWN_SANITIZE_HTML',\n True)\n\n#: Arguments for the Markdown instance, for instance a list of extensions to\n#: use.\n#: See: https://pythonhosted.org/Markdown/extensions/index.html\n#:\n#: To set a custom title for TOC's::\n#:\n#: WIKI_MARKDOWN_KWARGS = {'extension_configs': {'toc': _('Contents of this article')}}\nMARKDOWN_KWARGS = {\n 'extensions': [\n 'footnotes',\n 'attr_list',\n 'smart_strong',\n 'footnotes',\n 'attr_list',\n 'def_list',\n 'tables',\n 'abbr',\n 'sane_lists',\n ],\n 'extension_configs': {\n 'toc': {\n 'title': _('Table of Contents')}},\n}\nMARKDOWN_KWARGS.update(getattr(django_settings, 'WIKI_MARKDOWN_KWARGS', {}))\n\n_default_tag_whitelists = bleach.ALLOWED_TAGS + [\n 'figure',\n 'figcaption',\n 'br',\n 'hr',\n 'p',\n 'div',\n 'img',\n 'pre',\n 'span',\n 'sup',\n 'table',\n 'thead',\n 'tbody',\n 'th',\n 'tr',\n 'td',\n 'dl',\n 'dt',\n 'dd',\n] + ['h{}'.format(n) for n in range(8)]\n\n\n#: List of allowed tags in Markdown article contents.\nMARKDOWN_HTML_WHITELIST = _default_tag_whitelists\nMARKDOWN_HTML_WHITELIST += (\n getattr(\n django_settings,\n 'WIKI_MARKDOWN_HTML_WHITELIST',\n []\n )\n)\n\n_default_attribute_whitelist = bleach.ALLOWED_ATTRIBUTES\nfor tag in MARKDOWN_HTML_WHITELIST:\n if tag not in _default_attribute_whitelist:\n _default_attribute_whitelist[tag] = []\n _default_attribute_whitelist[tag].append('class')\n _default_attribute_whitelist[tag].append('id')\n\n_default_attribute_whitelist['img'].append('src')\n_default_attribute_whitelist['img'].append('alt')\n\n#: Dictionary of allowed attributes in Markdown article contents.\nMARKDOWN_HTML_ATTRIBUTES = _default_attribute_whitelist\nMARKDOWN_HTML_ATTRIBUTES.update(\n getattr(\n django_settings,\n 'WIKI_MARKDOWN_HTML_ATTRIBUTE_WHITELIST',\n {}\n )\n)\n\n#: Allowed inline styles in Markdown article contents, default is no styles\n#: (empty list).\nMARKDOWN_HTML_STYLES = (\n getattr(\n django_settings,\n 'WIKI_MARKDOWN_HTML_STYLES',\n []\n )\n)\n\n_project_defined_attrs = getattr(\n django_settings,\n 'WIKI_MARKDOWN_HTML_ATTRIBUTE_WHITELIST',\n False)\n\n# If styles are allowed but no custom attributes are defined, we allow styles\n# for all kinds of tags.\nif MARKDOWN_HTML_STYLES and not _project_defined_attrs:\n MARKDOWN_HTML_ATTRIBUTES['*'] = 'style'\n\n\n#: This slug is used in URLPath if an article has been deleted. The children of the\n#: URLPath of that article are moved to lost and found. They keep their permissions\n#: and all their content.\nLOST_AND_FOUND_SLUG = getattr(\n django_settings,\n 'WIKI_LOST_AND_FOUND_SLUG',\n 'lost-and-found')\n\n#: When True, this blocks new slugs that resolve to non-wiki views, stopping\n#: users creating articles that conflict with overlapping URLs from other apps.\nCHECK_SLUG_URL_AVAILABLE = getattr(\n django_settings,\n 'WIKI_CHECK_SLUG_URL_AVAILABLE',\n True)\n\n#: Do we want to log IPs of anonymous users?\nLOG_IPS_ANONYMOUS = getattr(django_settings, 'WIKI_LOG_IPS_ANONYMOUS', True)\n\n#: Do we want to log IPs of logged in users?\nLOG_IPS_USERS = getattr(django_settings, 'WIKI_LOG_IPS_USERS', False)\n\n#: Mapping from message.tag to bootstrap class names.\nMESSAGE_TAG_CSS_CLASS = getattr(\n django_settings,\n 'WIKI_MESSAGE_TAG_CSS_CLASS',\n {\n messages.DEFAULT_TAGS[messages.DEBUG]: \"alert alert-info\",\n messages.DEFAULT_TAGS[messages.ERROR]: \"alert alert-danger\",\n messages.DEFAULT_TAGS[messages.INFO]: \"alert alert-info\",\n messages.DEFAULT_TAGS[messages.SUCCESS]: \"alert alert-success\",\n messages.DEFAULT_TAGS[messages.WARNING]: \"alert alert-warning\",\n }\n)\n\n####################################\n# PERMISSIONS AND ACCOUNT HANDLING #\n####################################\n\n# NB! None of these callables need to handle anonymous users as they are treated\n# in separate settings...\n\n#: A function returning True/False if a user has permission to\n#: read contents of an article and plugins.\n#: Relevance: Viewing articles and plugins.\nCAN_READ = getattr(django_settings, 'WIKI_CAN_READ', None)\n\n#: A function returning True/False if a user has permission to\n#: change contents, i.e. add new revisions to an article.\n#: Often, plugins also use this.\n#: Relevance: Editing articles, changing revisions, editing plugins.\nCAN_WRITE = getattr(django_settings, 'WIKI_CAN_WRITE', None)\n\n#: A function returning True/False if a user has permission to assign\n#: permissions on an article.\n#: Relevance: Changing owner and group membership.\nCAN_ASSIGN = getattr(django_settings, 'WIKI_CAN_ASSIGN', None)\n\n#: A function returning True/False if the owner of an article has permission\n#: to change the group to a user's own groups.\n#: Relevance: Changing group membership.\nCAN_ASSIGN_OWNER = getattr(django_settings, 'WIKI_ASSIGN_OWNER', None)\n\n#: A function returning True/False if a user has permission to change\n#: read/write access for groups and others.\nCAN_CHANGE_PERMISSIONS = getattr(\n django_settings,\n 'WIKI_CAN_CHANGE_PERMISSIONS',\n None)\n\n#: Specifies if a user has access to soft deletion of articles.\nCAN_DELETE = getattr(django_settings, 'WIKI_CAN_DELETE', None)\n\n#: A function returning True/False if a user has permission to change\n#: moderate, ie. lock articles and permanently delete content.\nCAN_MODERATE = getattr(django_settings, 'WIKI_CAN_MODERATE', None)\n\n#: A function returning True/False if a user has permission to create\n#: new groups and users for the wiki.\nCAN_ADMIN = getattr(django_settings, 'WIKI_CAN_ADMIN', None)\n\n#: Treat anonymous (i.e. non logged in) users as the \"other\" user group.\nANONYMOUS = getattr(django_settings, 'WIKI_ANONYMOUS', True)\n\n#: Globally enable write access for anonymous users, if true anonymous users\n#: will be treated as the others_write boolean field on models.Article.\nANONYMOUS_WRITE = getattr(django_settings, 'WIKI_ANONYMOUS_WRITE', False)\n\n#: Globally enable create access for anonymous users.\n#: Defaults to ``ANONYMOUS_WRITE``.\nANONYMOUS_CREATE = getattr(\n django_settings,\n 'WIKI_ANONYMOUS_CREATE',\n ANONYMOUS_WRITE)\n\n#: Default setting to allow anonymous users upload access. Used in\n#: plugins.attachments and plugins.images, and can be overwritten in\n#: these plugins.\nANONYMOUS_UPLOAD = getattr(django_settings, 'WIKI_ANONYMOUS_UPLOAD', False)\n\n#: Sign up, login and logout views should be accessible.\nACCOUNT_HANDLING = getattr(django_settings, 'WIKI_ACCOUNT_HANDLING', True)\n\n#: Signup allowed? If it's not allowed, logged in superusers can still access\n#: the signup page to create new users.\nACCOUNT_SIGNUP_ALLOWED = ACCOUNT_HANDLING and getattr(\n django_settings, 'WIKI_ACCOUNT_SIGNUP_ALLOWED', True\n)\n\nif ACCOUNT_HANDLING:\n LOGIN_URL = reverse_lazy(\"wiki:login\")\n LOGOUT_URL = reverse_lazy(\"wiki:logout\")\n SIGNUP_URL = reverse_lazy(\"wiki:signup\")\nelse:\n LOGIN_URL = getattr(django_settings, \"LOGIN_URL\", \"/\")\n LOGOUT_URL = getattr(django_settings, \"LOGOUT_URL\", \"/\")\n SIGNUP_URL = getattr(django_settings, \"WIKI_SIGNUP_URL\", \"/\")\n\n##################\n# OTHER SETTINGS #\n##################\n\n#: Maximum amount of children to display in a menu before showing \"+more\".\n#: NEVER set this to 0 as it will wrongly inform the user that there are no\n#: children and for instance that an article can be safely deleted.\nSHOW_MAX_CHILDREN = getattr(django_settings, 'WIKI_SHOW_MAX_CHILDREN', 20)\n\n#: User Bootstrap's select widget. Switch off if you're not using Bootstrap!\nUSE_BOOTSTRAP_SELECT_WIDGET = getattr(\n django_settings,\n 'WIKI_USE_BOOTSTRAP_SELECT_WIDGET',\n True)\n\n#: Dotted name of the class used to construct urlpatterns for the wiki.\n#: Default is wiki.urls.WikiURLPatterns. To customize urls or view handlers,\n#: you can derive from this.\nURL_CONFIG_CLASS = getattr(\n django_settings,\n 'WIKI_URL_CONFIG_CLASS',\n 'wiki.urls.WikiURLPatterns')\n\n#: Search view - dotted path denoting where the search view Class is located.\nSEARCH_VIEW = getattr(\n django_settings,\n 'WIKI_SEARCH_VIEW',\n 'wiki.views.article.SearchView'\n if 'wiki.plugins.haystack' not in django_settings.INSTALLED_APPS\n else\n 'wiki.plugins.haystack.views.HaystackSearchView'\n)\n\n#: Seconds of timeout before renewing the article cache. Articles are automatically\n#: renewed whenever an edit occurs but article content may be generated from\n#: other objects that are changed.\nCACHE_TIMEOUT = getattr(django_settings, 'WIKI_CACHE_TIMEOUT', 600)\n\n#: Choose the Group model to use for permission handling. Defaults to django's auth.Group.\nGROUP_MODEL = getattr(django_settings, 'WIKI_GROUP_MODEL', 'auth.Group')\n\n###################\n# SPAM PROTECTION #\n###################\n\n#: Maximum allowed revisions per hour for any given user or IP.\nREVISIONS_PER_HOUR = getattr(django_settings, 'WIKI_REVISIONS_PER_HOUR', 60)\n\n#: Maximum allowed revisions per minute for any given user or IP.\nREVISIONS_PER_MINUTES = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_MINUTES',\n 5)\n\n#: Maximum allowed revisions per hour for any anonymous user and any IP.\nREVISIONS_PER_HOUR_ANONYMOUS = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_HOUR_ANONYMOUS',\n 10)\n\n#: Maximum allowed revisions per minute for any anonymous user and any IP.\nREVISIONS_PER_MINUTES_ANONYMOUS = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_MINUTES_ANONYMOUS',\n 2)\n\n#: Number of minutes to look back for looking up ``REVISIONS_PER_MINUTES``\n#: and ``REVISIONS_PER_MINUTES_ANONYMOUS``.\nREVISIONS_MINUTES_LOOKBACK = getattr(\n django_settings,\n 'WIKI_REVISIONS_MINUTES_LOOKBACK',\n 2)\n\n###########\n# STORAGE #\n###########\n\n#: Default Django storage backend to use for images, attachments etc.\nSTORAGE_BACKEND = getattr(\n django_settings,\n 'WIKI_STORAGE_BACKEND',\n default_storage)\n\n#: Use django-sendfile for sending out files? Otherwise the whole file is\n#: first read into memory and than send with a mime type based on the file.\nUSE_SENDFILE = getattr(django_settings, 'WIKI_ATTACHMENTS_USE_SENDFILE', False)\n", "path": "src/wiki/conf/settings.py" } ]
diff --git a/src/wiki/conf/settings.py b/src/wiki/conf/settings.py index 57064633c..eb447338a 100644 --- a/src/wiki/conf/settings.py +++ b/src/wiki/conf/settings.py @@ -63,6 +63,7 @@ 'img', 'pre', 'span', + 'sup', 'table', 'thead', 'tbody',
fidals__shopelectro-719
Add canonicals to category page For example this two pages contains no canonicals: - https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/tags/li-ro_hbced/?page=2 - ~https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/?page=2~ checked - it contains canonical Add canonicals to category page For example this two pages contains no canonicals: - https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/tags/li-ro_hbced/?page=2 - ~https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/?page=2~ checked - it contains canonical
[ { "content": "from functools import partial\n\nfrom catalog.newcontext import Context, Tags\n\n\nclass Page(Context):\n\n def __init__(self, page, tags: Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n tags_qs = self._tags.qs()\n self._page.get_template_render_context = partial(\n template_context, self._page, tags_qs.as_title(), tags_qs\n )\n\n return {\n 'page': self._page,\n 'skip_canonical': tags_qs.exists(),\n }\n", "path": "shopelectro/context.py" } ]
[ { "content": "from functools import partial\n\nfrom catalog.newcontext import Context, Tags\n\n\nclass Page(Context):\n\n def __init__(self, page, tags: Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n tags_qs = self._tags.qs()\n self._page.get_template_render_context = partial(\n template_context, self._page, tags_qs.as_title(), tags_qs\n )\n\n return {\n 'page': self._page,\n }\n", "path": "shopelectro/context.py" } ]
diff --git a/shopelectro/context.py b/shopelectro/context.py index 29ebce78..17bbabdf 100644 --- a/shopelectro/context.py +++ b/shopelectro/context.py @@ -24,5 +24,4 @@ def template_context(page, tag_titles, tags): return { 'page': self._page, - 'skip_canonical': tags_qs.exists(), } diff --git a/shopelectro/tests/tests_selenium.py b/shopelectro/tests/tests_selenium.py index b16c589b..c37c78b2 100644 --- a/shopelectro/tests/tests_selenium.py +++ b/shopelectro/tests/tests_selenium.py @@ -1025,6 +1025,8 @@ def test_full_buy_goal(self): self.assertTrue('FULL_BUY_SEND' in self.reached_goals) self.assertTrue('CMN_BUY_SEND' in self.reached_goals) + # @todo #718:30m Resurrect `test_cart_page_open` test. + @unittest.skip def test_cart_page_open(self): self.buy_product() self.prevent_default('click', '.js-go-to-cart') diff --git a/shopelectro/tests/tests_views.py b/shopelectro/tests/tests_views.py index 5ca21179..28d3ff0c 100644 --- a/shopelectro/tests/tests_views.py +++ b/shopelectro/tests/tests_views.py @@ -44,15 +44,16 @@ def setUp(self): self.category = models.Category.objects.root_nodes().select_related('page').first() self.tags = models.Tag.objects.order_by(*settings.TAGS_ORDER).all() - def get_category_page( + def get_category_url( self, - category: models.Category=None, - tags: models.TagQuerySet=None, - sorting: int=None, - query_string: dict=None, + category: models.Category = None, + tags: models.TagQuerySet = None, + sorting: int = None, + query_string: dict = None, route='category', - route_kwargs: dict=None, + route_kwargs: dict = None, ): + query_string = query_string or {} route_kwargs = route_kwargs or {} category = category or self.category route_kwargs = { @@ -60,9 +61,13 @@ def get_category_page( **route_kwargs } - return self.client.get(reverse_catalog_url( + return reverse_catalog_url( route, route_kwargs, tags, sorting, query_string, - )) + ) + + def get_category_page(self, *args, **kwargs): + """See `self.get_category_url()` interface.""" + return self.client.get(self.get_category_url(*args, **kwargs)) @tag('fast') @@ -85,40 +90,6 @@ def test_category_page_contains_all_tags(self): for tag_name in tag_names: self.assertContains(response, tag_name) - def test_has_canonical_meta_tag(self): - """Test that CategoryPage should contain canonical meta tag.""" - response = self.get_category_page() - self.assertEqual(response.status_code, 200) - self.assertContains( - response, - CANONICAL_HTML_TAG.format(path=response.request['PATH_INFO']), - ) - - def test_tags_page_has_no_canonical_meta_tag(self): - """Test that CategoryTagsPage should not contain canonical meta tag.""" - # ignore CPDBear - response = self.get_category_page(tags=self.tags) - self.assertEqual(response.status_code, 200) - self.assertNotContains( - response, - CANONICAL_HTML_TAG.format(path=response.request['PATH_INFO']), - ) - - def test_paginated_tags_page_has_no_canonical_meta_tag(self): - """ - Test CategoryTagsPage with canonical tags. - - CategoryTagsPage with pagination (and sorting) options - should not contain canonical meta tag. - """ - # ignore CPDBear - response = self.get_category_page(tags=self.tags, sorting=1) - self.assertEqual(response.status_code, 200) - self.assertNotContains( - response, - CANONICAL_HTML_TAG.format(path=response.request['PATH_INFO']) - ) - def test_contains_product_with_certain_tags(self): """Category page contains Product's related by certain tags.""" tags = self.tags @@ -525,6 +496,41 @@ def test_page_db_template_with_special_chars(self): response = self.get_category_page() self.assertEqual(200, response.status_code) + def test_canonical_meta_tag(self): + """Category page should contain canonical meta tag.""" + path = self.get_category_url() + response = self.client.get(path) + self.assertEqual(response.status_code, 200) + self.assertContains( + response, + CANONICAL_HTML_TAG.format(path=path), + ) + + def test_tags_pagination_has_canonical_links(self): + """ + Paginated tags page should contain canonical link. + + Link on it's not paginated version. + """ + tags = models.Tag.objects.filter_by_products( + products=( + models.Product.objects.all() + .filter_descendants(self.category) + ) + )[:1] + + not_paginated_url = self.get_category_url( + tags=tags + ) + paginated_url = self.get_category_url( + tags=tags, + query_string={'page': 2} + ) + response = self.client.get(paginated_url) + self.assertContains( + response, CANONICAL_HTML_TAG.format(path=not_paginated_url) + ) + @tag('fast') class IndexPage(TestCase): @@ -537,9 +543,9 @@ class IndexPage(TestCase): ] } + @override_settings(MAIN_PAGE_TILE=MAIN_PAGE_TILE) def test_get_category_tile(self): - with override_settings(MAIN_PAGE_TILE=self.MAIN_PAGE_TILE): - tile = views.IndexPage.get_categories_tile() + tile = views.IndexPage.get_categories_tile() first_url, second_url, third_url = [link['url'] for link in tile['some_section']] self.assertEqual('/section/first/', first_url) self.assertEqual('/section/second/', second_url) diff --git a/templates/layout/base.html b/templates/layout/base.html index abf20725..fa67bfd7 100644 --- a/templates/layout/base.html +++ b/templates/layout/base.html @@ -8,7 +8,7 @@ <head> {% include 'layout/google_tag_manager.html' with DEBUG=DEBUG is_head_tag=True %} {% if page %} - {% include 'layout/metadata.html' with page=page skip_canonical=skip_canonical paginated=paginated only %} + {% include 'layout/metadata.html' with page=page request=request paginated=paginated only %} {% endif %} {% block stylesheets %} <link rel="stylesheet" href="{% static 'css/styles.min.css' %}"> diff --git a/templates/layout/metadata.html b/templates/layout/metadata.html index 6e15eab1..e154eb3b 100644 --- a/templates/layout/metadata.html +++ b/templates/layout/metadata.html @@ -37,16 +37,15 @@ {# seo guys desired this tag: http://prntscr.com/gt5jh1 #} <meta name="cmsmagazine" content="8a67cdaf9ded6448bd3626abd67b56e4"> -{# seo guys want just to skip canonical on several pages #} -{% if not skip_canonical %} - <link rel="canonical" href="{{ page.get_absolute_url }}"> - {% if paginated.page.has_previous %} - <link rel="prev" href="{% if paginated.page.previous_page_number == 1 %}{{ page.get_absolute_url }}{% else %}{{ page.get_absolute_url }}?page={{ paginated.page.previous_page_number }}{% endif %}"> - {% endif %} - {% if paginated.page.has_next %} - <link rel="next" href="{{ page.get_absolute_url }}?page={{ paginated.page.next_page_number }}"> - {% endif %} +{# request path is current path, but without http url's query string #} +<link rel="canonical" href="{{ request.path }}"> +{% if paginated.page.has_previous %} + <link rel="prev" href="{{ request.path }}{% if paginated.page.previous_page_number > 1 %}?page={{ paginated.page.previous_page_number }}{% endif %}"> {% endif %} +{% if paginated.page.has_next %} + <link rel="next" href="{{ request.path }}?page={{ paginated.page.next_page_number }}"> +{% endif %} + <link rel="icon" type="image/x-icon" href="{% static 'images/favicon.ico' %}"> <link rel="android-touch-icon" type="image/x-icon" href="{% static 'images/favicon.ico' %}"> <link rel="apple-touch-icon" type="image/x-icon" href="{% static 'images/favicon.ico' %}">
huggingface__optimum-669
A weird input in onnx graph from ORTModel pretrained FlauBERT ### System Info ```shell Python 3.8.10 "optimum[onnxruntime]==1.5.0" "transformers==4.25.1" ``` ### Who can help? Probably export bug so -> @lewtun, @michaelbenayoun ### Information - [ ] The official example scripts - [X] My own modified scripts ### Tasks - [ ] An officially supported task in the `examples` folder (such as GLUE/SQuAD, ...) - [X] My own task or dataset (give details below) ### Reproduction ``` from optimum.onnxruntime import ORTModelForFeatureExtraction from transformers import AutoTokenizer model = ORTModelForFeatureExtraction.from_pretrained('flaubert/flaubert_base_uncased', from_transformers=True) tokenizer = AutoTokenizer.from_pretrained("flaubert/flaubert_base_uncased") inputs = tokenizer("Le chat mange une pomme.", return_tensors="pt") pred = model(**inputs) ``` ### Expected behavior I should get a `BaseModelOutPut` with `last_hidden_state` tensor as `pred` variable (or something close to that). Actually, when I replace the FlauBERT model `flaubert/flaubert_base_uncased` by another french model CamemBERT `camembert/camembert-base` the above snippet works as expected. (I tried this on my cpu, no gpus involved) But I get an `InvalidArgument: [ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Invalid Feed Input Name:token_type_ids`. So I investigate the input of the onnx model graph and find a weird third input with static dimensions: ``` name: "input.5" type { tensor_type { elem_type: 7 shape { dim { dim_value: 2 } dim { dim_value: 8 } } } } ``` This input should probably not be there and I suspect that the export is broken in some way for FlauBert model. (I also get `TraceWarning` when exporting caused by asserts located in the `forward` method of the transformer implementation of FlauBERT but I am not sure if it is related to my issue)
[ { "content": "# coding=utf-8\n# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Model specific ONNX configurations.\"\"\"\nimport random\nfrom typing import TYPE_CHECKING, Any, List, Mapping, Optional, Tuple\n\nfrom packaging import version\n\nfrom ...utils import (\n DEFAULT_DUMMY_SHAPES,\n DummyAudioInputGenerator,\n DummyDecoderTextInputGenerator,\n DummyPastKeyValuesGenerator,\n DummySeq2SeqDecoderTextInputGenerator,\n DummySeq2SeqPastKeyValuesGenerator,\n DummyTextInputGenerator,\n DummyTimestepInputGenerator,\n DummyVisionInputGenerator,\n NormalizedConfig,\n NormalizedSeq2SeqConfig,\n NormalizedTextAndVisionConfig,\n NormalizedTextConfig,\n NormalizedVisionConfig,\n logging,\n)\nfrom .base import ConfigBehavior, OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\nfrom .config import (\n AudioOnnxConfig,\n AudioToTextOnnxConfig,\n TextAndVisionOnnxConfig,\n TextDecoderOnnxConfig,\n TextEncoderOnnxConfig,\n TextSeq2SeqOnnxConfig,\n VisionOnnxConfig,\n)\n\n\nif TYPE_CHECKING:\n from transformers import PretrainedConfig\n\n from ...utils import DummyInputGenerator\n from .base import PatchingSpec\n\nlogger = logging.get_logger(__name__)\n\n\nclass BertOnnxConfig(TextEncoderOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig\n ATOL_FOR_VALIDATION = 1e-4\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n if self.task == \"multiple-choice\":\n dynamic_axis = {0: \"batch_size\", 1: \"num_choices\", 2: \"sequence_length\"}\n else:\n dynamic_axis = {0: \"batch_size\", 1: \"sequence_length\"}\n return {\n \"input_ids\": dynamic_axis,\n \"attention_mask\": dynamic_axis,\n \"token_type_ids\": dynamic_axis,\n }\n\n\nclass AlbertOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass ConvBertOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass ElectraOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass RoFormerOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass SqueezeBertOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass MobileBertOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass XLMOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass DistilBertOnnxConfig(BertOnnxConfig):\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n if self.task == \"multiple-choice\":\n dynamic_axis = {0: \"batch_size\", 1: \"num_choices\", 2: \"sequence_length\"}\n else:\n dynamic_axis = {0: \"batch_size\", 1: \"sequence_length\"}\n return {\"input_ids\": dynamic_axis, \"attention_mask\": dynamic_axis}\n\n\nclass RobertaOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass CamembertOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass FlaubertOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass IBertOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass XLMRobertaOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass BigBirdOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass DebertaOnnxConfig(BertOnnxConfig):\n DEFAULT_ONNX_OPSET = 12\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n common_inputs = super().inputs\n if self._config.type_vocab_size == 0:\n common_inputs.pop(\"token_type_ids\")\n return common_inputs\n\n\nclass DebertaV2OnnxConfig(DebertaOnnxConfig):\n pass\n\n\nclass GPT2OnnxConfig(TextDecoderOnnxConfig):\n DEFAULT_ONNX_OPSET = 13\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_layers=\"n_layer\", num_attention_heads=\"n_head\")\n\n @property\n def values_override(self) -> Optional[Mapping[str, Any]]:\n pad_value_override = {}\n if not getattr(self._config, \"pad_token_id\", None):\n pad_value_override = {\"pad_token_id\": 0}\n super_values_override = super().values_override\n if super_values_override:\n return {**super_values_override, **pad_value_override}\n return pad_value_override\n\n\nclass GPTJOnnxConfig(GPT2OnnxConfig):\n pass\n\n\nclass CodeGenOnnxConfig(GPT2OnnxConfig):\n pass\n\n\nclass GPTNeoOnnxConfig(TextDecoderOnnxConfig):\n DEFAULT_ONNX_OPSET = 13\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_attention_heads=\"num_heads\")\n\n\nclass BloomDummyPastKeyValuesGenerator(DummyPastKeyValuesGenerator):\n def generate(self, input_name: str, framework: str = \"pt\"):\n past_key_shape = (\n self.batch_size * self.num_attention_heads,\n self.hidden_size // self.num_attention_heads,\n self.sequence_length,\n )\n past_value_shape = (\n self.batch_size * self.num_attention_heads,\n self.sequence_length,\n self.hidden_size // self.num_attention_heads,\n )\n return [\n (\n self.random_float_tensor(past_key_shape, framework=framework),\n self.random_float_tensor(past_value_shape, framework=framework),\n )\n for _ in range(self.num_layers)\n ]\n\n\nclass BloomOnnxConfig(TextDecoderOnnxConfig):\n DUMMY_INPUT_GENERATOR_CLASSES = (\n BloomDummyPastKeyValuesGenerator,\n ) + TextDecoderOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_layers=\"n_layer\", num_attention_heads=\"n_head\")\n\n def add_past_key_values(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str):\n \"\"\"\n Refer to OnnxConfigWithPast in base.py\n \"\"\"\n if direction not in [\"inputs\", \"outputs\"]:\n raise ValueError(f'direction must either be \"inputs\" or \"outputs\", but {direction} was given')\n\n name = \"past_key_values\" if direction == \"inputs\" else \"present\"\n for i in range(self._normalized_config.num_layers):\n inputs_or_outputs[f\"{name}.{i}.key\"] = {0: \"batch_size\", 2: \"past_sequence_length + sequence_length\"}\n inputs_or_outputs[f\"{name}.{i}.value\"] = {0: \"batch_size\", 1: \"past_sequence_length + sequence_length\"}\n\n\nclass T5DummySeq2SeqPastKeyValuesGenerator(DummySeq2SeqPastKeyValuesGenerator):\n def generate(self, input_name: str, framework: str = \"pt\"):\n encoder_shape = (\n self.batch_size,\n self.normalized_config.encoder_num_attention_heads,\n self.encoder_sequence_length,\n self.normalized_config.key_value_dim,\n )\n decoder_shape = (\n self.batch_size,\n self.normalized_config.decoder_num_attention_heads,\n self.sequence_length,\n self.normalized_config.key_value_dim,\n )\n return [\n (\n self.random_float_tensor(decoder_shape, framework=framework),\n self.random_float_tensor(decoder_shape, framework=framework),\n self.random_float_tensor(encoder_shape, framework=framework),\n self.random_float_tensor(encoder_shape, framework=framework),\n )\n for _ in range(self.normalized_config.decoder_num_layers)\n ]\n\n\nclass T5OnnxConfig(TextSeq2SeqOnnxConfig):\n DEFAULT_ONNX_OPSET = 13\n DUMMY_INPUT_GENERATOR_CLASSES = TextSeq2SeqOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES[:-1] + (\n T5DummySeq2SeqPastKeyValuesGenerator,\n )\n NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(\n hidden_size=\"d_model\",\n num_attention_heads=\"num_heads\",\n encoder_num_layers=\"num_layers\",\n decoder_num_layers=\"num_decoder_layers\",\n key_value_dim=\"d_kv\",\n allow_new=True,\n )\n\n\nclass MT5OnnxConfig(T5OnnxConfig):\n ATOL_FOR_VALIDATION = 1e-4\n\n\nclass LongT5OnnxConfig(T5OnnxConfig):\n pass\n\n\nclass BartDummyTextInputGenerator(DummyTextInputGenerator):\n def __init__(\n self,\n task: str,\n normalized_config: NormalizedSeq2SeqConfig,\n batch_size: int = DEFAULT_DUMMY_SHAPES[\"batch_size\"],\n sequence_length: int = DEFAULT_DUMMY_SHAPES[\"sequence_length\"],\n num_choices: int = DEFAULT_DUMMY_SHAPES[\"num_choices\"],\n random_batch_size_range: Optional[Tuple[int, int]] = None,\n random_sequence_length_range: Optional[Tuple[int, int]] = None,\n random_num_choices_range: Optional[Tuple[int, int]] = None,\n force_eos_token_id_presence: bool = True,\n **kwargs,\n ):\n super().__init__(\n task,\n normalized_config,\n batch_size=batch_size,\n sequence_length=sequence_length,\n num_choices=num_choices,\n random_batch_size_range=random_batch_size_range,\n random_sequence_length_range=random_sequence_length_range,\n random_num_choices_range=random_num_choices_range,\n )\n self.force_eos_token_id_presence = force_eos_token_id_presence\n self.eos_token_id = normalized_config.eos_token_id\n\n def generate(self, input_name: str, framework: str = \"pt\"):\n int_tensor = super().generate(input_name, framework=framework)\n # This inserts EOS_TOKEN_ID at random locations along the sequence length dimension.\n if self.force_eos_token_id_presence and \"input_ids\" in input_name and self.task == \"sequence-classification\":\n for idx in range(self.batch_size):\n if self.eos_token_id in int_tensor[idx]:\n continue\n random_idx = random.randint(1, self.sequence_length - 1)\n int_tensor[idx][random_idx] = self.eos_token_id\n\n return int_tensor\n\n\nclass BartOnnxConfig(TextSeq2SeqOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(\n encoder_num_layers=\"encoder_layers\",\n decoder_num_layers=\"decoder_layers\",\n num_layers=\"decoder_layers\", # Used for the causal-lm task past key values input generation.\n encoder_num_attention_heads=\"encoder_attention_heads\",\n decoder_num_attention_heads=\"decoder_attention_heads\",\n eos_token_id=\"eos_token_id\",\n )\n DUMMY_INPUT_GENERATOR_CLASSES = (\n BartDummyTextInputGenerator,\n {\n \"default\": DummySeq2SeqDecoderTextInputGenerator,\n \"causal-lm\": DummyDecoderTextInputGenerator,\n },\n {\n \"default\": DummySeq2SeqPastKeyValuesGenerator,\n \"causal-lm\": DummyPastKeyValuesGenerator,\n },\n )\n\n def _create_dummy_input_generator_classes(self, **kwargs) -> List[\"DummyInputGenerator\"]:\n dummy_text_input_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[0](\n self.task, self._normalized_config, **kwargs\n )\n task = \"default\" if self.task != \"causal-lm\" else \"causal-lm\"\n dummy_decoder_text_input_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[1][task](\n self.task, self._normalized_config, **kwargs\n )\n kwargs = {}\n if self.task != \"causal-lm\":\n kwargs[\"encoder_sequence_length\"] = dummy_text_input_generator.sequence_length\n\n dummy_seq2seq_past_key_values_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[2][task](\n self.task, self._normalized_config, batch_size=dummy_text_input_generator.batch_size, **kwargs\n )\n dummy_inputs_generators = [\n dummy_text_input_generator,\n dummy_decoder_text_input_generator,\n dummy_seq2seq_past_key_values_generator,\n ]\n\n return dummy_inputs_generators\n\n @property\n def inputs_for_default_and_seq2seq_lm(self):\n return super().inputs\n\n @property\n def inputs_for_causal_lm(self):\n common_inputs = {\n \"input_ids\": {0: \"batch_size\", 1: \"encoder_sequence_length\"},\n \"attention_mask\": {0: \"batch_size\", 1: \"encoder_sequence_length\"},\n }\n if self.use_past_in_inputs:\n for i in range(self._normalized_config.decoder_num_layers):\n common_inputs[f\"past_key_values.{i}.key\"] = {\n 0: \"batch_size\",\n 2: \"past_sequence_length + sequence_length\",\n }\n common_inputs[f\"past_key_values.{i}.value\"] = {\n 0: \"batch_size\",\n 2: \"past_sequence_length + sequence_length\",\n }\n\n return common_inputs\n\n @property\n def inputs_for_other_tasks(self):\n return {\n \"input_ids\": {0: \"batch_size\", 1: \"encoder_sequence_length\"},\n \"attention_mask\": {0: \"batch_size\", 1: \"encoder_sequence_length\"},\n }\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n inputs_properties = {\n \"default\": self.inputs_for_default_and_seq2seq_lm,\n \"seq2seq-lm\": self.inputs_for_default_and_seq2seq_lm,\n \"causal-lm\": self.inputs_for_causal_lm,\n \"other\": self.inputs_for_other_tasks,\n }\n return inputs_properties.get(self.task, inputs_properties[\"other\"])\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n if self.task in [\"default\", \"seq2seq-lm\"]:\n common_outputs = super().outputs\n else:\n common_outputs = super(OnnxConfigWithPast, self).outputs\n if self.use_present_in_outputs:\n for i in range(self._normalized_config.encoder_num_layers):\n common_outputs[f\"present.{i}.key\"] = {0: \"batch_size\", 2: \"past_sequence_length + sequence_length\"}\n common_outputs[f\"present.{i}.value\"] = {\n 0: \"batch_size\",\n 2: \"past_sequence_length + sequence_length\",\n }\n return common_outputs\n\n def generate_dummy_inputs(self, framework: str = \"pt\", **kwargs):\n # This will handle the attention mask padding when Bart is used for causal-lm.\n if self.task == \"causal-lm\":\n self.PAD_ATTENTION_MASK_TO_MATCH_TOTAL_SEQUENCE_LENGTH = True\n\n dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)\n\n # Setting it back to the default version.\n self.PAD_ATTENTION_MASK_TO_MATCH_TOTAL_SEQUENCE_LENGTH = False\n return dummy_inputs\n\n def flatten_past_key_values(self, flattened_output, name, idx, t):\n if self.task in [\"default\", \"seq2seq-lm\"]:\n flattened_output = super().flatten_past_key_values(flattened_output, name, idx, t)\n else:\n flattened_output = super(OnnxSeq2SeqConfigWithPast, self).flatten_past_key_values(\n flattened_output, name, idx, t\n )\n\n\nclass MBartOnnxConfig(BartOnnxConfig):\n pass\n\n\nclass M2M100OnnxConfig(BartOnnxConfig):\n pass\n\n\nclass BlenderbotOnnxConfig(BartOnnxConfig):\n pass\n\n\nclass BlenderbotSmallOnnxConfig(BartOnnxConfig):\n pass\n\n\nclass BigBirdPegasusOnnxConfig(BartOnnxConfig):\n def generate_dummy_inputs_for_validation(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]:\n if self._behavior is ConfigBehavior.ENCODER:\n # TODO: check why the attention mask is not present in the exported model\n reference_model_inputs.pop(\"attention_mask\")\n return super().generate_dummy_inputs_for_validation(reference_model_inputs)\n\n\nclass PegasusOnnxConfig(BartOnnxConfig):\n pass\n\n\nclass MarianOnnxConfig(BartOnnxConfig):\n pass\n\n\nclass ViTOnnxConfig(VisionOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedVisionConfig\n MIN_TORCH_VERSION = version.parse(\"1.11\")\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\"pixel_values\": {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"}}\n\n\nclass LevitOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass DeiTOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass BeitOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass ConvNextOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass MobileViTOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass ResNetOnnxConfig(ViTOnnxConfig):\n ATOL_FOR_VALIDATION = 1e-3\n\n\nclass DetrOnnxConfig(ViTOnnxConfig):\n DEFAULT_ONNX_OPSET = 12\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n # TODO: is pixel mask needed?\n return {**super().inputs, \"pixel_mask\": {0: \"batch_size\"}}\n\n\nclass YolosOnnxConfig(ViTOnnxConfig):\n DEFAULT_ONNX_OPSET = 12\n\n\nclass SwinOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass PoolFormerOnnxConfig(ViTOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedVisionConfig\n ATOL_FOR_VALIDATION = 2e-3\n\n\nclass SegformerOnnxConfig(YolosOnnxConfig):\n pass\n\n\nclass MobileNetV1OnnxConfig(ViTOnnxConfig):\n ATOL_FOR_VALIDATION = 1e-4\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\"pixel_values\": {0: \"batch_size\"}}\n\n\nclass MobileNetV2OnnxConfig(MobileNetV1OnnxConfig):\n pass\n\n\nclass CLIPNormalizedConfig(NormalizedTextAndVisionConfig):\n TEXT_CONFIG = \"text_config\"\n VISION_CONFIG = \"vision_config\"\n\n\nclass CLIPOnnxConfig(TextAndVisionOnnxConfig):\n NORMALIZED_CONFIG_CLASS = CLIPNormalizedConfig\n DEFAULT_ONNX_OPSET = 14\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"input_ids\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"pixel_values\": {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"},\n \"attention_mask\": {0: \"batch_size\", 1: \"sequence_length\"},\n }\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"logits_per_image\": {0: \"batch_size\"},\n \"logits_per_text\": {0: \"batch_size\"},\n \"text_embeds\": {0: \"batch_size\"},\n \"image_embeds\": {0: \"batch_size\"},\n }\n\n\nclass CLIPTextOnnxConfig(TextEncoderOnnxConfig):\n ATOL_FOR_VALIDATION = 1e-3\n DEFAULT_ONNX_OPSET = 14\n\n NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(\n vocab_size=\"vocab_size\",\n sequence_length=\"max_position_embeddings\",\n allow_new=True,\n )\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"input_ids\": {0: \"batch_size\", 1: \"sequence_length\"},\n }\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"last_hidden_state\": {0: \"batch_size\", 1: \"sequence_length\", 2: \"feature_dim\"},\n \"pooler_output\": {0: \"batch_size\", 1: \"feature_dim\"},\n }\n\n def generate_dummy_inputs(self, framework: str = \"pt\", **kwargs):\n dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)\n if framework == \"pt\":\n import torch\n\n dummy_inputs[\"input_ids\"] = dummy_inputs[\"input_ids\"].to(dtype=torch.int32)\n return dummy_inputs\n\n\nclass UNetOnnxConfig(ViTOnnxConfig):\n ATOL_FOR_VALIDATION = 1e-3\n DEFAULT_ONNX_OPSET = 14\n\n NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(\n image_size=\"sample_size\",\n num_channels=\"in_channels\",\n hidden_size=\"cross_attention_dim\",\n vocab_size=\"norm_num_groups\",\n allow_new=True,\n )\n\n DUMMY_INPUT_GENERATOR_CLASSES = (\n DummyVisionInputGenerator,\n DummyTimestepInputGenerator,\n DummySeq2SeqDecoderTextInputGenerator,\n )\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"sample\": {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"},\n \"timestep\": {0: \"steps\"},\n \"encoder_hidden_states\": {0: \"batch_size\", 1: \"sequence_length\", 2: \"feature_dim\"},\n }\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"out_sample\": {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"},\n }\n\n def output_names_for_validation(self, reference_output_names: List[str]) -> List[str]:\n return [\"sample\"]\n\n def generate_dummy_inputs(self, framework: str = \"pt\", **kwargs):\n dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)\n dummy_inputs[\"encoder_hidden_states\"] = dummy_inputs[\"encoder_hidden_states\"][0]\n return dummy_inputs\n\n\nclass VaeOnnxConfig(ViTOnnxConfig):\n ATOL_FOR_VALIDATION = 1e-3\n DEFAULT_ONNX_OPSET = 14\n\n NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(\n num_channels=\"latent_channels\",\n allow_new=True,\n )\n\n DUMMY_INPUT_GENERATOR_CLASSES = (DummyVisionInputGenerator,)\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"latent_sample\": {0: \"batch_size\", 1: \"num_channels_latent\", 2: \"height_latent\", 3: \"width_latent\"},\n }\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"sample\": {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"},\n }\n\n\nclass GroupViTOnnxConfig(CLIPOnnxConfig):\n pass\n\n\nclass OwlViTOnnxConfig(CLIPOnnxConfig):\n pass\n\n\nclass LayoutLMOnnxConfig(TextAndVisionOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(\n allow_new=True,\n MAX_2D_POSITION_EMBEDDINGS=\"max_2d_position_embeddings\",\n )\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"input_ids\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"bbox\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"attention_mask\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"token_type_ids\": {0: \"batch_size\", 1: \"sequence_length\"},\n }\n\n\nclass LayoutLMv3OnnxConfig(TextAndVisionOnnxConfig):\n MIN_TORCH_VERSION = version.parse(\"1.12\")\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(\n allow_new=True,\n MAX_2D_POSITION_EMBEDDINGS=\"max_2d_position_embeddings\",\n image_size=\"input_size\",\n )\n DEFAULT_ONNX_OPSET = 12\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n if self.task in [\"sequence-classification\", \"question-answering\"]:\n pixel_values_dynamic_axes = {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"}\n else:\n pixel_values_dynamic_axes = {0: \"batch_size\", 1: \"num_channels\"}\n return {\n \"input_ids\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"attention_mask\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"bbox\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"pixel_values\": pixel_values_dynamic_axes,\n }\n\n\nclass Data2VecTextOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass Data2VecVisionOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass Data2VecAudioOnnxConfig(AudioOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedConfig\n ATOL_FOR_VALIDATION = 1e-4\n\n\nclass PerceiverDummyInputGenerator(DummyVisionInputGenerator):\n def generate(self, input_name: str, framework: str = \"pt\"):\n input_ = super().generate(input_name, framework)\n # if input_name == \"pixel_values\":\n # input_ = input_[None, :]\n return input_\n\n\nclass PerceiverOnnxConfig(TextAndVisionOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig\n DUMMY_INPUT_GENERATOR_CLASSES = (\n PerceiverDummyInputGenerator,\n ) + TextAndVisionOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES\n\n def __init__(\n self, config: \"PretrainedConfig\", task: str = \"default\", patching_specs: Optional[List[\"PatchingSpec\"]] = None\n ):\n super().__init__(config, task=task, patching_specs=patching_specs)\n self.is_generating_dummy_inputs = False\n\n @property\n def inputs_name(self):\n if self.is_generating_dummy_inputs:\n if self.task in [\"masked-lm\", \"sequence-classification\"]:\n return \"input_ids\"\n else:\n return \"pixel_values\"\n else:\n return \"inputs\"\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n # TODO: validate that.\n dynamic_axis = {0: \"batch_size\", 1: \"sequence_length\"}\n return {\n self.inputs_name: dynamic_axis,\n # TODO: should we add the attention_mask?\n # This breaks things for image-classification, suspected bug is the DummyInputGenerators not having the\n # same num_channels / sequence_length.\n # \"attention_mask\": dynamic_axis,\n }\n\n def generate_dummy_inputs(self, framework: str = \"pt\", **kwargs):\n self.is_generating_dummy_inputs = True\n dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)\n specialized_inputs_name = self.inputs_name\n self.is_generating_dummy_inputs = True\n dummy_inputs[self.inputs_name] = dummy_inputs.pop(specialized_inputs_name)\n return dummy_inputs\n\n\nclass HubertOnnxConfig(AudioOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedConfig\n\n\nclass Wav2Vec2OnnxConfig(HubertOnnxConfig):\n pass\n\n\nclass Wav2Vec2ConformerOnnxConfig(HubertOnnxConfig):\n pass\n\n\nclass SEWOnnxConfig(HubertOnnxConfig):\n pass\n\n\nclass SEWDOnnxConfig(HubertOnnxConfig):\n DEFAULT_ONNX_OPSET = 12\n\n\nclass UniSpeechOnnxConfig(HubertOnnxConfig):\n pass\n\n\nclass UniSpeechSATOnnxConfig(HubertOnnxConfig):\n pass\n\n\nclass WavLMOnnxConfig(HubertOnnxConfig):\n DEFAULT_ONNX_OPSET = 12\n\n\nclass ASTDummyAudioInputGenerator(DummyAudioInputGenerator):\n def generate(self, input_name: str, framework: str = \"pt\"):\n shape = [self.batch_size, self.normalized_config.max_length, self.normalized_config.num_mel_bins]\n if input_name == \"input_values\":\n return self.random_float_tensor(shape, min_value=-1, max_value=1, framework=framework)\n return super().generate(input_name, framework=framework)\n\n\nclass ASTOnnxConfig(OnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(\n num_mel_bins=\"num_mel_bins\", max_length=\"max_length\", allow_new=True\n )\n DUMMY_INPUT_GENERATOR_CLASSES = (ASTDummyAudioInputGenerator,)\n ATOL_FOR_VALIDATION = 1e-4\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\"input_values\": {0: \"batch_size\"}}\n\n\n# TODO: currently disabled because an operator seems not supported by ONNX.\n# class MCTCTDummyAudioInputGenerator(DummyAudioInputGenerator):\n# def generate(self, input_name: str, framework: str = \"pt\"):\n# shape = [self.batch_size, self.sequence_length, self.normalized_config.input_features_per_channel]\n# if input_name == \"input_features\":\n# return self.random_float_tensor(shape, min_value=-1, max_value=1, framework=framework)\n# return super().generate(input_name, framework=framework)\n#\n#\n# class MCTCTOnnxConfig(OnnxConfig):\n# NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(input_features_per_channel=\"input_feat_per_channel\", allow_new=True)\n# DUMMY_INPUT_GENERATOR_CLASSES = (MCTCTDummyAudioInputGenerator,)\n# DEFAULT_ONNX_OPSET = 13\n#\n# @property\n# def inputs(self) -> Mapping[str, Mapping[int, str]]:\n# return {\"input_features\": {0: \"batch_size\", 1: \"sequence_classification\"}}\n\n\nclass WhisperOnnxConfig(AudioToTextOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig\n ATOL_FOR_VALIDATION = 1e-3\n\n\nclass Speech2TextDummyAudioInputGenerator(DummyAudioInputGenerator):\n def generate(self, input_name: str, framework: str = \"pt\"):\n shape = [self.batch_size, self.sequence_length, self.normalized_config.input_features_per_channel]\n if input_name == \"input_features\":\n return self.random_float_tensor(shape, min_value=-1, max_value=1, framework=framework)\n return super().generate(input_name, framework=framework)\n\n\nclass Speech2TextOnnxConfig(AudioToTextOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(\n input_features_per_channel=\"input_feat_per_channel\", allow_new=True\n )\n DUMMY_INPUT_GENERATOR_CLASSES = (\n Speech2TextDummyAudioInputGenerator,\n ) + AudioToTextOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES[1:]\n", "path": "optimum/exporters/onnx/model_configs.py" } ]
[ { "content": "# coding=utf-8\n# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Model specific ONNX configurations.\"\"\"\nimport random\nfrom typing import TYPE_CHECKING, Any, List, Mapping, Optional, Tuple\n\nfrom packaging import version\n\nfrom ...utils import (\n DEFAULT_DUMMY_SHAPES,\n DummyAudioInputGenerator,\n DummyDecoderTextInputGenerator,\n DummyPastKeyValuesGenerator,\n DummySeq2SeqDecoderTextInputGenerator,\n DummySeq2SeqPastKeyValuesGenerator,\n DummyTextInputGenerator,\n DummyTimestepInputGenerator,\n DummyVisionInputGenerator,\n NormalizedConfig,\n NormalizedSeq2SeqConfig,\n NormalizedTextAndVisionConfig,\n NormalizedTextConfig,\n NormalizedVisionConfig,\n logging,\n)\nfrom .base import ConfigBehavior, OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\nfrom .config import (\n AudioOnnxConfig,\n AudioToTextOnnxConfig,\n TextAndVisionOnnxConfig,\n TextDecoderOnnxConfig,\n TextEncoderOnnxConfig,\n TextSeq2SeqOnnxConfig,\n VisionOnnxConfig,\n)\n\n\nif TYPE_CHECKING:\n from transformers import PretrainedConfig\n\n from ...utils import DummyInputGenerator\n from .base import PatchingSpec\n\nlogger = logging.get_logger(__name__)\n\n\nclass BertOnnxConfig(TextEncoderOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig\n ATOL_FOR_VALIDATION = 1e-4\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n if self.task == \"multiple-choice\":\n dynamic_axis = {0: \"batch_size\", 1: \"num_choices\", 2: \"sequence_length\"}\n else:\n dynamic_axis = {0: \"batch_size\", 1: \"sequence_length\"}\n return {\n \"input_ids\": dynamic_axis,\n \"attention_mask\": dynamic_axis,\n \"token_type_ids\": dynamic_axis,\n }\n\n\nclass AlbertOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass ConvBertOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass ElectraOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass RoFormerOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass SqueezeBertOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass MobileBertOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass XLMOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass DistilBertOnnxConfig(BertOnnxConfig):\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n if self.task == \"multiple-choice\":\n dynamic_axis = {0: \"batch_size\", 1: \"num_choices\", 2: \"sequence_length\"}\n else:\n dynamic_axis = {0: \"batch_size\", 1: \"sequence_length\"}\n return {\"input_ids\": dynamic_axis, \"attention_mask\": dynamic_axis}\n\n\nclass RobertaOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass CamembertOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass FlaubertOnnxConfig(BertOnnxConfig):\n pass\n\n\nclass IBertOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass XLMRobertaOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass BigBirdOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass DebertaOnnxConfig(BertOnnxConfig):\n DEFAULT_ONNX_OPSET = 12\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n common_inputs = super().inputs\n if self._config.type_vocab_size == 0:\n common_inputs.pop(\"token_type_ids\")\n return common_inputs\n\n\nclass DebertaV2OnnxConfig(DebertaOnnxConfig):\n pass\n\n\nclass GPT2OnnxConfig(TextDecoderOnnxConfig):\n DEFAULT_ONNX_OPSET = 13\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_layers=\"n_layer\", num_attention_heads=\"n_head\")\n\n @property\n def values_override(self) -> Optional[Mapping[str, Any]]:\n pad_value_override = {}\n if not getattr(self._config, \"pad_token_id\", None):\n pad_value_override = {\"pad_token_id\": 0}\n super_values_override = super().values_override\n if super_values_override:\n return {**super_values_override, **pad_value_override}\n return pad_value_override\n\n\nclass GPTJOnnxConfig(GPT2OnnxConfig):\n pass\n\n\nclass CodeGenOnnxConfig(GPT2OnnxConfig):\n pass\n\n\nclass GPTNeoOnnxConfig(TextDecoderOnnxConfig):\n DEFAULT_ONNX_OPSET = 13\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_attention_heads=\"num_heads\")\n\n\nclass BloomDummyPastKeyValuesGenerator(DummyPastKeyValuesGenerator):\n def generate(self, input_name: str, framework: str = \"pt\"):\n past_key_shape = (\n self.batch_size * self.num_attention_heads,\n self.hidden_size // self.num_attention_heads,\n self.sequence_length,\n )\n past_value_shape = (\n self.batch_size * self.num_attention_heads,\n self.sequence_length,\n self.hidden_size // self.num_attention_heads,\n )\n return [\n (\n self.random_float_tensor(past_key_shape, framework=framework),\n self.random_float_tensor(past_value_shape, framework=framework),\n )\n for _ in range(self.num_layers)\n ]\n\n\nclass BloomOnnxConfig(TextDecoderOnnxConfig):\n DUMMY_INPUT_GENERATOR_CLASSES = (\n BloomDummyPastKeyValuesGenerator,\n ) + TextDecoderOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_layers=\"n_layer\", num_attention_heads=\"n_head\")\n\n def add_past_key_values(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str):\n \"\"\"\n Refer to OnnxConfigWithPast in base.py\n \"\"\"\n if direction not in [\"inputs\", \"outputs\"]:\n raise ValueError(f'direction must either be \"inputs\" or \"outputs\", but {direction} was given')\n\n name = \"past_key_values\" if direction == \"inputs\" else \"present\"\n for i in range(self._normalized_config.num_layers):\n inputs_or_outputs[f\"{name}.{i}.key\"] = {0: \"batch_size\", 2: \"past_sequence_length + sequence_length\"}\n inputs_or_outputs[f\"{name}.{i}.value\"] = {0: \"batch_size\", 1: \"past_sequence_length + sequence_length\"}\n\n\nclass T5DummySeq2SeqPastKeyValuesGenerator(DummySeq2SeqPastKeyValuesGenerator):\n def generate(self, input_name: str, framework: str = \"pt\"):\n encoder_shape = (\n self.batch_size,\n self.normalized_config.encoder_num_attention_heads,\n self.encoder_sequence_length,\n self.normalized_config.key_value_dim,\n )\n decoder_shape = (\n self.batch_size,\n self.normalized_config.decoder_num_attention_heads,\n self.sequence_length,\n self.normalized_config.key_value_dim,\n )\n return [\n (\n self.random_float_tensor(decoder_shape, framework=framework),\n self.random_float_tensor(decoder_shape, framework=framework),\n self.random_float_tensor(encoder_shape, framework=framework),\n self.random_float_tensor(encoder_shape, framework=framework),\n )\n for _ in range(self.normalized_config.decoder_num_layers)\n ]\n\n\nclass T5OnnxConfig(TextSeq2SeqOnnxConfig):\n DEFAULT_ONNX_OPSET = 13\n DUMMY_INPUT_GENERATOR_CLASSES = TextSeq2SeqOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES[:-1] + (\n T5DummySeq2SeqPastKeyValuesGenerator,\n )\n NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(\n hidden_size=\"d_model\",\n num_attention_heads=\"num_heads\",\n encoder_num_layers=\"num_layers\",\n decoder_num_layers=\"num_decoder_layers\",\n key_value_dim=\"d_kv\",\n allow_new=True,\n )\n\n\nclass MT5OnnxConfig(T5OnnxConfig):\n ATOL_FOR_VALIDATION = 1e-4\n\n\nclass LongT5OnnxConfig(T5OnnxConfig):\n pass\n\n\nclass BartDummyTextInputGenerator(DummyTextInputGenerator):\n def __init__(\n self,\n task: str,\n normalized_config: NormalizedSeq2SeqConfig,\n batch_size: int = DEFAULT_DUMMY_SHAPES[\"batch_size\"],\n sequence_length: int = DEFAULT_DUMMY_SHAPES[\"sequence_length\"],\n num_choices: int = DEFAULT_DUMMY_SHAPES[\"num_choices\"],\n random_batch_size_range: Optional[Tuple[int, int]] = None,\n random_sequence_length_range: Optional[Tuple[int, int]] = None,\n random_num_choices_range: Optional[Tuple[int, int]] = None,\n force_eos_token_id_presence: bool = True,\n **kwargs,\n ):\n super().__init__(\n task,\n normalized_config,\n batch_size=batch_size,\n sequence_length=sequence_length,\n num_choices=num_choices,\n random_batch_size_range=random_batch_size_range,\n random_sequence_length_range=random_sequence_length_range,\n random_num_choices_range=random_num_choices_range,\n )\n self.force_eos_token_id_presence = force_eos_token_id_presence\n self.eos_token_id = normalized_config.eos_token_id\n\n def generate(self, input_name: str, framework: str = \"pt\"):\n int_tensor = super().generate(input_name, framework=framework)\n # This inserts EOS_TOKEN_ID at random locations along the sequence length dimension.\n if self.force_eos_token_id_presence and \"input_ids\" in input_name and self.task == \"sequence-classification\":\n for idx in range(self.batch_size):\n if self.eos_token_id in int_tensor[idx]:\n continue\n random_idx = random.randint(1, self.sequence_length - 1)\n int_tensor[idx][random_idx] = self.eos_token_id\n\n return int_tensor\n\n\nclass BartOnnxConfig(TextSeq2SeqOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(\n encoder_num_layers=\"encoder_layers\",\n decoder_num_layers=\"decoder_layers\",\n num_layers=\"decoder_layers\", # Used for the causal-lm task past key values input generation.\n encoder_num_attention_heads=\"encoder_attention_heads\",\n decoder_num_attention_heads=\"decoder_attention_heads\",\n eos_token_id=\"eos_token_id\",\n )\n DUMMY_INPUT_GENERATOR_CLASSES = (\n BartDummyTextInputGenerator,\n {\n \"default\": DummySeq2SeqDecoderTextInputGenerator,\n \"causal-lm\": DummyDecoderTextInputGenerator,\n },\n {\n \"default\": DummySeq2SeqPastKeyValuesGenerator,\n \"causal-lm\": DummyPastKeyValuesGenerator,\n },\n )\n\n def _create_dummy_input_generator_classes(self, **kwargs) -> List[\"DummyInputGenerator\"]:\n dummy_text_input_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[0](\n self.task, self._normalized_config, **kwargs\n )\n task = \"default\" if self.task != \"causal-lm\" else \"causal-lm\"\n dummy_decoder_text_input_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[1][task](\n self.task, self._normalized_config, **kwargs\n )\n kwargs = {}\n if self.task != \"causal-lm\":\n kwargs[\"encoder_sequence_length\"] = dummy_text_input_generator.sequence_length\n\n dummy_seq2seq_past_key_values_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[2][task](\n self.task, self._normalized_config, batch_size=dummy_text_input_generator.batch_size, **kwargs\n )\n dummy_inputs_generators = [\n dummy_text_input_generator,\n dummy_decoder_text_input_generator,\n dummy_seq2seq_past_key_values_generator,\n ]\n\n return dummy_inputs_generators\n\n @property\n def inputs_for_default_and_seq2seq_lm(self):\n return super().inputs\n\n @property\n def inputs_for_causal_lm(self):\n common_inputs = {\n \"input_ids\": {0: \"batch_size\", 1: \"encoder_sequence_length\"},\n \"attention_mask\": {0: \"batch_size\", 1: \"encoder_sequence_length\"},\n }\n if self.use_past_in_inputs:\n for i in range(self._normalized_config.decoder_num_layers):\n common_inputs[f\"past_key_values.{i}.key\"] = {\n 0: \"batch_size\",\n 2: \"past_sequence_length + sequence_length\",\n }\n common_inputs[f\"past_key_values.{i}.value\"] = {\n 0: \"batch_size\",\n 2: \"past_sequence_length + sequence_length\",\n }\n\n return common_inputs\n\n @property\n def inputs_for_other_tasks(self):\n return {\n \"input_ids\": {0: \"batch_size\", 1: \"encoder_sequence_length\"},\n \"attention_mask\": {0: \"batch_size\", 1: \"encoder_sequence_length\"},\n }\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n inputs_properties = {\n \"default\": self.inputs_for_default_and_seq2seq_lm,\n \"seq2seq-lm\": self.inputs_for_default_and_seq2seq_lm,\n \"causal-lm\": self.inputs_for_causal_lm,\n \"other\": self.inputs_for_other_tasks,\n }\n return inputs_properties.get(self.task, inputs_properties[\"other\"])\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n if self.task in [\"default\", \"seq2seq-lm\"]:\n common_outputs = super().outputs\n else:\n common_outputs = super(OnnxConfigWithPast, self).outputs\n if self.use_present_in_outputs:\n for i in range(self._normalized_config.encoder_num_layers):\n common_outputs[f\"present.{i}.key\"] = {0: \"batch_size\", 2: \"past_sequence_length + sequence_length\"}\n common_outputs[f\"present.{i}.value\"] = {\n 0: \"batch_size\",\n 2: \"past_sequence_length + sequence_length\",\n }\n return common_outputs\n\n def generate_dummy_inputs(self, framework: str = \"pt\", **kwargs):\n # This will handle the attention mask padding when Bart is used for causal-lm.\n if self.task == \"causal-lm\":\n self.PAD_ATTENTION_MASK_TO_MATCH_TOTAL_SEQUENCE_LENGTH = True\n\n dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)\n\n # Setting it back to the default version.\n self.PAD_ATTENTION_MASK_TO_MATCH_TOTAL_SEQUENCE_LENGTH = False\n return dummy_inputs\n\n def flatten_past_key_values(self, flattened_output, name, idx, t):\n if self.task in [\"default\", \"seq2seq-lm\"]:\n flattened_output = super().flatten_past_key_values(flattened_output, name, idx, t)\n else:\n flattened_output = super(OnnxSeq2SeqConfigWithPast, self).flatten_past_key_values(\n flattened_output, name, idx, t\n )\n\n\nclass MBartOnnxConfig(BartOnnxConfig):\n pass\n\n\nclass M2M100OnnxConfig(BartOnnxConfig):\n pass\n\n\nclass BlenderbotOnnxConfig(BartOnnxConfig):\n pass\n\n\nclass BlenderbotSmallOnnxConfig(BartOnnxConfig):\n pass\n\n\nclass BigBirdPegasusOnnxConfig(BartOnnxConfig):\n def generate_dummy_inputs_for_validation(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]:\n if self._behavior is ConfigBehavior.ENCODER:\n # TODO: check why the attention mask is not present in the exported model\n reference_model_inputs.pop(\"attention_mask\")\n return super().generate_dummy_inputs_for_validation(reference_model_inputs)\n\n\nclass PegasusOnnxConfig(BartOnnxConfig):\n pass\n\n\nclass MarianOnnxConfig(BartOnnxConfig):\n pass\n\n\nclass ViTOnnxConfig(VisionOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedVisionConfig\n MIN_TORCH_VERSION = version.parse(\"1.11\")\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\"pixel_values\": {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"}}\n\n\nclass LevitOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass DeiTOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass BeitOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass ConvNextOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass MobileViTOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass ResNetOnnxConfig(ViTOnnxConfig):\n ATOL_FOR_VALIDATION = 1e-3\n\n\nclass DetrOnnxConfig(ViTOnnxConfig):\n DEFAULT_ONNX_OPSET = 12\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n # TODO: is pixel mask needed?\n return {**super().inputs, \"pixel_mask\": {0: \"batch_size\"}}\n\n\nclass YolosOnnxConfig(ViTOnnxConfig):\n DEFAULT_ONNX_OPSET = 12\n\n\nclass SwinOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass PoolFormerOnnxConfig(ViTOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedVisionConfig\n ATOL_FOR_VALIDATION = 2e-3\n\n\nclass SegformerOnnxConfig(YolosOnnxConfig):\n pass\n\n\nclass MobileNetV1OnnxConfig(ViTOnnxConfig):\n ATOL_FOR_VALIDATION = 1e-4\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\"pixel_values\": {0: \"batch_size\"}}\n\n\nclass MobileNetV2OnnxConfig(MobileNetV1OnnxConfig):\n pass\n\n\nclass CLIPNormalizedConfig(NormalizedTextAndVisionConfig):\n TEXT_CONFIG = \"text_config\"\n VISION_CONFIG = \"vision_config\"\n\n\nclass CLIPOnnxConfig(TextAndVisionOnnxConfig):\n NORMALIZED_CONFIG_CLASS = CLIPNormalizedConfig\n DEFAULT_ONNX_OPSET = 14\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"input_ids\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"pixel_values\": {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"},\n \"attention_mask\": {0: \"batch_size\", 1: \"sequence_length\"},\n }\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"logits_per_image\": {0: \"batch_size\"},\n \"logits_per_text\": {0: \"batch_size\"},\n \"text_embeds\": {0: \"batch_size\"},\n \"image_embeds\": {0: \"batch_size\"},\n }\n\n\nclass CLIPTextOnnxConfig(TextEncoderOnnxConfig):\n ATOL_FOR_VALIDATION = 1e-3\n DEFAULT_ONNX_OPSET = 14\n\n NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(\n vocab_size=\"vocab_size\",\n sequence_length=\"max_position_embeddings\",\n allow_new=True,\n )\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"input_ids\": {0: \"batch_size\", 1: \"sequence_length\"},\n }\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"last_hidden_state\": {0: \"batch_size\", 1: \"sequence_length\", 2: \"feature_dim\"},\n \"pooler_output\": {0: \"batch_size\", 1: \"feature_dim\"},\n }\n\n def generate_dummy_inputs(self, framework: str = \"pt\", **kwargs):\n dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)\n if framework == \"pt\":\n import torch\n\n dummy_inputs[\"input_ids\"] = dummy_inputs[\"input_ids\"].to(dtype=torch.int32)\n return dummy_inputs\n\n\nclass UNetOnnxConfig(ViTOnnxConfig):\n ATOL_FOR_VALIDATION = 1e-3\n DEFAULT_ONNX_OPSET = 14\n\n NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(\n image_size=\"sample_size\",\n num_channels=\"in_channels\",\n hidden_size=\"cross_attention_dim\",\n vocab_size=\"norm_num_groups\",\n allow_new=True,\n )\n\n DUMMY_INPUT_GENERATOR_CLASSES = (\n DummyVisionInputGenerator,\n DummyTimestepInputGenerator,\n DummySeq2SeqDecoderTextInputGenerator,\n )\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"sample\": {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"},\n \"timestep\": {0: \"steps\"},\n \"encoder_hidden_states\": {0: \"batch_size\", 1: \"sequence_length\", 2: \"feature_dim\"},\n }\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"out_sample\": {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"},\n }\n\n def output_names_for_validation(self, reference_output_names: List[str]) -> List[str]:\n return [\"sample\"]\n\n def generate_dummy_inputs(self, framework: str = \"pt\", **kwargs):\n dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)\n dummy_inputs[\"encoder_hidden_states\"] = dummy_inputs[\"encoder_hidden_states\"][0]\n return dummy_inputs\n\n\nclass VaeOnnxConfig(ViTOnnxConfig):\n ATOL_FOR_VALIDATION = 1e-3\n DEFAULT_ONNX_OPSET = 14\n\n NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(\n num_channels=\"latent_channels\",\n allow_new=True,\n )\n\n DUMMY_INPUT_GENERATOR_CLASSES = (DummyVisionInputGenerator,)\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"latent_sample\": {0: \"batch_size\", 1: \"num_channels_latent\", 2: \"height_latent\", 3: \"width_latent\"},\n }\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"sample\": {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"},\n }\n\n\nclass GroupViTOnnxConfig(CLIPOnnxConfig):\n pass\n\n\nclass OwlViTOnnxConfig(CLIPOnnxConfig):\n pass\n\n\nclass LayoutLMOnnxConfig(TextAndVisionOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(\n allow_new=True,\n MAX_2D_POSITION_EMBEDDINGS=\"max_2d_position_embeddings\",\n )\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\n \"input_ids\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"bbox\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"attention_mask\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"token_type_ids\": {0: \"batch_size\", 1: \"sequence_length\"},\n }\n\n\nclass LayoutLMv3OnnxConfig(TextAndVisionOnnxConfig):\n MIN_TORCH_VERSION = version.parse(\"1.12\")\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(\n allow_new=True,\n MAX_2D_POSITION_EMBEDDINGS=\"max_2d_position_embeddings\",\n image_size=\"input_size\",\n )\n DEFAULT_ONNX_OPSET = 12\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n if self.task in [\"sequence-classification\", \"question-answering\"]:\n pixel_values_dynamic_axes = {0: \"batch_size\", 1: \"num_channels\", 2: \"height\", 3: \"width\"}\n else:\n pixel_values_dynamic_axes = {0: \"batch_size\", 1: \"num_channels\"}\n return {\n \"input_ids\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"attention_mask\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"bbox\": {0: \"batch_size\", 1: \"sequence_length\"},\n \"pixel_values\": pixel_values_dynamic_axes,\n }\n\n\nclass Data2VecTextOnnxConfig(DistilBertOnnxConfig):\n pass\n\n\nclass Data2VecVisionOnnxConfig(ViTOnnxConfig):\n pass\n\n\nclass Data2VecAudioOnnxConfig(AudioOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedConfig\n ATOL_FOR_VALIDATION = 1e-4\n\n\nclass PerceiverDummyInputGenerator(DummyVisionInputGenerator):\n def generate(self, input_name: str, framework: str = \"pt\"):\n input_ = super().generate(input_name, framework)\n # if input_name == \"pixel_values\":\n # input_ = input_[None, :]\n return input_\n\n\nclass PerceiverOnnxConfig(TextAndVisionOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedTextConfig\n DUMMY_INPUT_GENERATOR_CLASSES = (\n PerceiverDummyInputGenerator,\n ) + TextAndVisionOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES\n\n def __init__(\n self, config: \"PretrainedConfig\", task: str = \"default\", patching_specs: Optional[List[\"PatchingSpec\"]] = None\n ):\n super().__init__(config, task=task, patching_specs=patching_specs)\n self.is_generating_dummy_inputs = False\n\n @property\n def inputs_name(self):\n if self.is_generating_dummy_inputs:\n if self.task in [\"masked-lm\", \"sequence-classification\"]:\n return \"input_ids\"\n else:\n return \"pixel_values\"\n else:\n return \"inputs\"\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n # TODO: validate that.\n dynamic_axis = {0: \"batch_size\", 1: \"sequence_length\"}\n return {\n self.inputs_name: dynamic_axis,\n # TODO: should we add the attention_mask?\n # This breaks things for image-classification, suspected bug is the DummyInputGenerators not having the\n # same num_channels / sequence_length.\n # \"attention_mask\": dynamic_axis,\n }\n\n def generate_dummy_inputs(self, framework: str = \"pt\", **kwargs):\n self.is_generating_dummy_inputs = True\n dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)\n specialized_inputs_name = self.inputs_name\n self.is_generating_dummy_inputs = True\n dummy_inputs[self.inputs_name] = dummy_inputs.pop(specialized_inputs_name)\n return dummy_inputs\n\n\nclass HubertOnnxConfig(AudioOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedConfig\n\n\nclass Wav2Vec2OnnxConfig(HubertOnnxConfig):\n pass\n\n\nclass Wav2Vec2ConformerOnnxConfig(HubertOnnxConfig):\n pass\n\n\nclass SEWOnnxConfig(HubertOnnxConfig):\n pass\n\n\nclass SEWDOnnxConfig(HubertOnnxConfig):\n DEFAULT_ONNX_OPSET = 12\n\n\nclass UniSpeechOnnxConfig(HubertOnnxConfig):\n pass\n\n\nclass UniSpeechSATOnnxConfig(HubertOnnxConfig):\n pass\n\n\nclass WavLMOnnxConfig(HubertOnnxConfig):\n DEFAULT_ONNX_OPSET = 12\n\n\nclass ASTDummyAudioInputGenerator(DummyAudioInputGenerator):\n def generate(self, input_name: str, framework: str = \"pt\"):\n shape = [self.batch_size, self.normalized_config.max_length, self.normalized_config.num_mel_bins]\n if input_name == \"input_values\":\n return self.random_float_tensor(shape, min_value=-1, max_value=1, framework=framework)\n return super().generate(input_name, framework=framework)\n\n\nclass ASTOnnxConfig(OnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(\n num_mel_bins=\"num_mel_bins\", max_length=\"max_length\", allow_new=True\n )\n DUMMY_INPUT_GENERATOR_CLASSES = (ASTDummyAudioInputGenerator,)\n ATOL_FOR_VALIDATION = 1e-4\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n return {\"input_values\": {0: \"batch_size\"}}\n\n\n# TODO: currently disabled because an operator seems not supported by ONNX.\n# class MCTCTDummyAudioInputGenerator(DummyAudioInputGenerator):\n# def generate(self, input_name: str, framework: str = \"pt\"):\n# shape = [self.batch_size, self.sequence_length, self.normalized_config.input_features_per_channel]\n# if input_name == \"input_features\":\n# return self.random_float_tensor(shape, min_value=-1, max_value=1, framework=framework)\n# return super().generate(input_name, framework=framework)\n#\n#\n# class MCTCTOnnxConfig(OnnxConfig):\n# NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(input_features_per_channel=\"input_feat_per_channel\", allow_new=True)\n# DUMMY_INPUT_GENERATOR_CLASSES = (MCTCTDummyAudioInputGenerator,)\n# DEFAULT_ONNX_OPSET = 13\n#\n# @property\n# def inputs(self) -> Mapping[str, Mapping[int, str]]:\n# return {\"input_features\": {0: \"batch_size\", 1: \"sequence_classification\"}}\n\n\nclass WhisperOnnxConfig(AudioToTextOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig\n ATOL_FOR_VALIDATION = 1e-3\n\n\nclass Speech2TextDummyAudioInputGenerator(DummyAudioInputGenerator):\n def generate(self, input_name: str, framework: str = \"pt\"):\n shape = [self.batch_size, self.sequence_length, self.normalized_config.input_features_per_channel]\n if input_name == \"input_features\":\n return self.random_float_tensor(shape, min_value=-1, max_value=1, framework=framework)\n return super().generate(input_name, framework=framework)\n\n\nclass Speech2TextOnnxConfig(AudioToTextOnnxConfig):\n NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(\n input_features_per_channel=\"input_feat_per_channel\", allow_new=True\n )\n DUMMY_INPUT_GENERATOR_CLASSES = (\n Speech2TextDummyAudioInputGenerator,\n ) + AudioToTextOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES[1:]\n", "path": "optimum/exporters/onnx/model_configs.py" } ]
diff --git a/optimum/exporters/onnx/model_configs.py b/optimum/exporters/onnx/model_configs.py index 0fef0b46ee..8167a459a0 100644 --- a/optimum/exporters/onnx/model_configs.py +++ b/optimum/exporters/onnx/model_configs.py @@ -119,7 +119,7 @@ class CamembertOnnxConfig(DistilBertOnnxConfig): pass -class FlaubertOnnxConfig(DistilBertOnnxConfig): +class FlaubertOnnxConfig(BertOnnxConfig): pass
mosaicml__composer-874
Fix typehints for python < 3.9 [PEP 585](https://peps.python.org/pep-0585/) introduces typehint generics for python 3.9. To remain backwards-compatible with python < 3.9, `from __future__ import annotations` must be executed, or otherwise typehints must be specified in a backwards-compatible way (e.g. in quotes). I think the simplest solution is to add the `from __future__ import annotations` call to the relevant files. ** Environment ** - Image: [nvcr.io/nvidia/pytorch:22.03-py3](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel_22-03.html#rel_22-03) - OS: Ubuntu 20.04 - Hardware: N/A - Python 3.8.12 ** To reproduce Steps to reproduce the behavior: 1. `pip install git+https://github.com/mosaicml/composer.git@a264ae95a5a9658e1f82206cf99158281c6dd8ca` (commit a264ae95a5a9658e1f82206cf99158281c6dd8ca) 2. `from composer.functional import cutmix_batch` ## Expected behavior `cutmix_batch` imports successfully. ## Actual behavior ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/__init__.py", line 3, in <module> from composer import algorithms as algorithms File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/algorithms/__init__.py", line 39, in <module> from composer.algorithms.algorithm_hparams import AlgorithmHparams File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/algorithms/algorithm_hparams.py", line 13, in <module> from composer.core.algorithm import Algorithm File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/core/__init__.py", line 13, in <module> from composer.core.engine import Engine as Engine File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/core/engine.py", line 74, in <module> from composer.profiler import ProfilerAction File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/profiler/__init__.py", line 17, in <module> from composer.profiler.dataloader_profiler import DataLoaderProfiler File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/profiler/dataloader_profiler.py", line 10, in <module> from composer.datasets.dataloader import WrappedDataLoader File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/datasets/__init__.py", line 17, in <module> from composer.datasets.ade20k import ADE20kDatasetHparams, ADE20kWebDatasetHparams File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/datasets/ade20k.py", line 24, in <module> from composer.datasets.imagenet import IMAGENET_CHANNEL_MEAN, IMAGENET_CHANNEL_STD File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/datasets/imagenet.py", line 24, in <module> from composer.datasets.ffcv_utils import write_ffcv_dataset File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/datasets/ffcv_utils.py", line 9, in <module> from composer.datasets.webdataset_utils import init_webdataset_meta File "/Users/jacob.schmidt/Projects/photosynthetic/venv/lib/python3.8/site-packages/composer/datasets/webdataset_utils.py", line 190, in <module> cache_verbose: bool = False) -> Tuple[WebDataset, dict]: NameError: name 'WebDataset' is not defined ``` Here, `webdataset` is a `dev` dependency and shouldn't need to be installed to run the code.
[ { "content": "import json\nimport logging\nimport math\nimport os\nimport subprocess\nimport textwrap\nfrom random import shuffle\nfrom typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nif TYPE_CHECKING:\n from webdataset import WebDataset\n\ntry:\n from webdataset import ShardWriter, WebDataset\n from wurlitzer import pipes\n webdataset_installed = True\nexcept ImportError:\n webdataset_installed = False\n\nlog = logging.getLogger(__name__)\n\n\ndef _require_webdataset():\n \"\"\"Hard require webdataset.\"\"\"\n if not webdataset_installed:\n raise ImportError(\n textwrap.dedent(\"\"\"\n Composer was installed without WebDataset support. To use WebDataset with Composer, run `pip install\n mosaicml[webdataset]`.\"\"\"))\n\n\ndef _create_webdataset_meta(split_dir: str, n_samples: int, n_shards: int) -> None:\n \"\"\"Write a WebDataset meta file.\n\n Args:\n split_dir (str): Directory to save the JSON file into.\n n_samples (int): Number of samples in this split.\n n_shards (int): Number of shards in this split.\n \"\"\"\n samples_per_shard = n_samples // n_shards\n n_leftover = n_samples % samples_per_shard\n obj = {\n 'n_shards': n_shards,\n 'samples_per_shard': samples_per_shard,\n 'n_leftover': n_leftover,\n }\n filename = os.path.join(split_dir, 'meta.json')\n json.dump(obj, open(filename, 'w'), sort_keys=True)\n\n\ndef create_webdataset(samples: Iterable[Dict[str, Any]],\n dataset_dir: str,\n split: str,\n n_samples: int,\n n_shards: int,\n use_tqdm: Union[bool, int] = True) -> None:\n \"\"\"Write an entire WebDataset to a local directory, given an iterable of samples.\n\n Args:\n samples (iterable of dict): Each dataset sample.\n dataset_dir (str): Output dataset directory.\n split (str): Dataset split.\n n_samples (int): Number of samples in dataset.\n n_shards (int): Number of full shards to write (may write a leftovers shard).\n use_tqdm (bool): Whether to show progress with tqdm.\n \"\"\"\n _require_webdataset()\n split_dir = os.path.join(dataset_dir, split)\n os.makedirs(split_dir)\n pattern = os.path.join(split_dir, '%05d.tar')\n samples_per_shard = n_samples // n_shards\n with pipes():\n out = ShardWriter(pattern, maxcount=samples_per_shard)\n out.verbose = 0\n if use_tqdm:\n samples = tqdm(samples, total=n_samples, leave=False)\n for sample in samples:\n out.write(sample)\n out.close()\n _create_webdataset_meta(split_dir, n_samples, n_shards)\n\n\ndef _find_samples(split_dirname):\n \"\"\"Collect and shuffle sample as pairs of (image filename, class).\n\n Args:\n split_dirname (str): Dataset split directory.\n\n Returns:\n Shuffled list of (image filename, class).\n \"\"\"\n pairs = []\n for cls, basename in enumerate(sorted(os.listdir(split_dirname))):\n class_dirname = os.path.join(split_dirname, basename)\n for basename in sorted(os.listdir(class_dirname)):\n sample_filename = os.path.join(class_dirname, basename)\n pairs.append((sample_filename, cls))\n shuffle(pairs)\n return pairs\n\n\ndef _each_sample(pairs: List[Tuple[str, int]]) -> Iterable[Dict[str, Any]]:\n \"\"\"Generator over each dataset sample.\n\n Args:\n pairs (list): List of pairs of (image filename, class ID).\n\n Yields:\n Sample dicts.\n \"\"\"\n for idx, (img_file, cls) in enumerate(pairs):\n img = open(img_file, 'rb').read()\n yield {\n '__key__': f'{idx:05d}',\n 'jpg': img,\n 'cls': cls,\n }\n\n\ndef create_webdatasets_from_image_folder(in_root: str,\n out_root: str,\n n_shards: int,\n use_tqdm: Union[bool, int] = True) -> None:\n \"\"\"Given a directory tree of classified images, create a WebDataset per dataset split.\n\n Directory tree format: (path to dataset)/(split name)/(class name)/(image file).\n\n Args:\n in_root (str): Input dataset root.\n out_root (str): Output WebDataset root.\n n_shards (int): Number of full shards to write (may write a leftovers shard).\n use_tqdm (bool): Whether to show progress with tqdm.\n \"\"\"\n for split in sorted(os.listdir(in_root)):\n in_dir = os.path.join(in_root, split)\n pairs = _find_samples(in_dir)\n create_webdataset(_each_sample(pairs), out_root, split, len(pairs), n_shards, use_tqdm)\n\n\ndef _init_webdataset_meta_from_s3(remote: str, split: Optional[str] = None) -> bytes:\n \"\"\"Read a WebDataset meta file from S3.\n\n Args:\n remote (str): S3 bucket or S3 bucket directory.\n split (str): Dataset split.\n \"\"\"\n if split is None:\n url = f'{remote}/meta.json'\n else:\n url = f'{remote}/{split}/meta.json'\n cmd = 'aws', 's3', 'cp', url, '-'\n ret = subprocess.run(cmd, capture_output=True)\n assert not ret.stderr, 'Download failed, check your credentials?'\n return ret.stdout\n\n\ndef _init_webdataset_meta_from_local(remote: str, split: Optional[str] = None) -> bytes:\n \"\"\"Read a WebDataset meta file from local filesystem.\n\n Args:\n remote (str): Local filesystem directory.\n split (str): Dataset split.\n \"\"\"\n if split is None:\n path = f'{remote}/meta.json'\n else:\n path = f'{remote}/{split}/meta.json'\n return open(path, 'rb').read()\n\n\ndef init_webdataset_meta(remote: str, split: Optional[str] = None) -> bytes:\n \"\"\"Read a WebDataset meta file.\n\n Args:\n remote (str): Dataset directory (S3 bucket or local dir).\n split (str): Dataset split. Default: ``None``.\n \"\"\"\n if remote.startswith('s3://'):\n return _init_webdataset_meta_from_s3(remote, split)\n else:\n return _init_webdataset_meta_from_local(remote, split)\n\n\ndef _init_webdataset(remote: str,\n name: str,\n split: str,\n cache_dir: Optional[str] = None,\n cache_verbose: bool = False) -> Tuple[WebDataset, dict]:\n \"\"\"Initialize a WebDataset with an optional local cache dir.\n\n Args:\n remote (str): Dataset directory (S3 bucket or local dir).\n name (str): Name of this dataset, used to locate dataset in local cache.\n split (str): Dataset split.\n cache_dir (str, optional): Root directory of local filesystem cache.\n cache_verbose (bool): WebDataset caching verbosity.\n\n Returns:\n dataset (WebDataset): The webdataset object for streaming.\n meta (dict): Dataset sample/shard statistics.\n \"\"\"\n _require_webdataset()\n if cache_dir:\n split_dir = os.path.join(cache_dir, name, split)\n meta_file = os.path.join(split_dir, 'meta.json')\n if os.path.exists(meta_file):\n text = open(meta_file).read()\n else:\n text = init_webdataset_meta(remote, split)\n if not os.path.exists(split_dir):\n os.makedirs(split_dir)\n with open(meta_file, 'wb') as out:\n out.write(text)\n else:\n split_dir = None\n text = init_webdataset_meta(remote, split)\n meta = json.loads(text)\n max_shard = meta['n_shards'] - 1\n shards = f'{{{0:05d}..{max_shard:05d}}}.tar'\n if remote.startswith('s3://'):\n urls = f'pipe: aws s3 cp {remote}/{split}/{shards} -'\n else:\n urls = f'{remote}/{split}/{shards}'\n dataset = WebDataset(urls, cache_dir=split_dir, cache_verbose=cache_verbose)\n return dataset, meta\n\n\ndef _size_webdataset(dataset: WebDataset, n_shards: int, samples_per_shard: int, n_devices: int,\n workers_per_device: int, batch_size: int, drop_last: bool) -> WebDataset:\n \"\"\"Set IterableDataset epoch boundary and length for DDP, PyTorch DataLoader compatability.\n\n Note: 'drop_last=True' with per-CPU-worker sharding will cause an incomplete batch to be dropped at the end of each\n CPU worker's sample list. Total samples dropped across all workers may sum to more than one batch.\n\n Note: 'drop_last=False' with per-CPU-worker sharding will lead to multiple incomplete batches being read from each\n device, one for each CPU worker. Unfortunately, the PyTorch DataLoader does not handle this situation well in its\n __len__ implementation, so len(dataloader) will be an underestimate of batches_per_epoch.\n\n Calculation:\n shards\n shards per worker = ------------------------------\n devices * workers per device\n\n samples per worker = samples per shard * shards per worker\n\n If drop last,\n samples per worker = (samples per worker // batch size) * batch size\n\n samples per device = samples per worker * workers per device\n\n samples per epoch = samples per device * devices\n\n Args:\n dataset (WebDataset):\n n_shards (int): Number of full shards.\n samples_per_shard (int): Number of samples per webdataset shard.\n n_devices (int): Number of devices.\n workers_per_device (int): Number of workers per device.\n batch_size (int): Batch size.\n drop_last (bool): Whether to drop partial last batches.\n \"\"\"\n workers_per_device = max(1, workers_per_device)\n\n # Ensure that shards can be split among CPU workers\n n_workers_global = n_devices * workers_per_device\n if n_shards % n_workers_global != 0:\n raise ValueError(f\"n_shards={n_shards} must be divisible by n_workers_global={n_workers_global}!\")\n\n shards_per_worker = n_shards // n_devices // workers_per_device\n expected_samples_per_worker = samples_per_shard * shards_per_worker\n if drop_last:\n samples_per_worker = (expected_samples_per_worker // batch_size) * batch_size\n samples_per_device = samples_per_worker * workers_per_device\n samples_per_epoch = samples_per_device * n_devices\n expected_samples_per_epoch = n_shards * samples_per_shard\n if samples_per_epoch != expected_samples_per_epoch:\n log.warning(\n f\"Note that 'drop_last=True' with per-CPU-worker sharding will cause an incomplete batch to be dropped at the end of ** each CPU worker's sample list **. \"\n f\"Given your training configuration, we have calculated this will reduce samples_per_epoch from {expected_samples_per_epoch} to {samples_per_epoch}.\"\n )\n else:\n samples_per_worker = expected_samples_per_worker\n samples_per_device = samples_per_worker * workers_per_device\n samples_per_epoch = samples_per_device * n_devices\n expected_batches_per_epoch = math.ceil(samples_per_worker * workers_per_device / batch_size)\n batches_per_epoch = math.ceil(samples_per_worker / batch_size) * workers_per_device\n if batches_per_epoch != expected_batches_per_epoch:\n log.warning(\n f\"Note that 'drop_last=False' with per-CPU-worker sharding will lead to multiple incomplete batches being read from each device, ** one for each CPU worker **. \"\n f\"Unfortunately, the PyTorch DataLoader does not handle this situation well in its __len__ implementation, so len(dataloader) will be an underestimate of batches_per_epoch. \"\n f\"(See https://github.com/pytorch/pytorch/blob/3d9ec11feacd69d0ff1bffe0b25a825cdf203b87/torch/utils/data/dataloader.py#L403-L411). \"\n f\"Given your training configuration, we have calculated this will increase batches_per_epoch from {expected_batches_per_epoch} -> {batches_per_epoch}.\"\n )\n # Set epoch boundary (per CPU worker).\n # Technically not needed if shards are constructed correctly, but used for safety\n dataset = dataset.with_epoch(samples_per_worker)\n # Set IterableDataset length (per device), to be read by PyTorch DataLoader\n return dataset.with_length(samples_per_device)\n\n\ndef load_webdataset(remote: str, name: str, split: str, cache_dir: Optional[str], cache_verbose: bool, shuffle: bool,\n shuffle_buffer: int, preprocess, n_devices: int, workers_per_device: int, batch_size: int,\n drop_last: bool):\n \"\"\"Load WebDataset from remote, optionally caching, with the given preprocessing and batching.\n\n Args:\n remote (str): Remote path (either an s3:// url or a directory on local filesystem).\n name (str): Name of this dataset, used to locate dataset in local cache.\n cache_dir (str, optional): Root directory of local filesystem cache.\n cache_verbose (bool): WebDataset caching verbosity.\n shuffle (bool): Whether to shuffle samples.\n shuffle_buffer (int): How many samples to buffer when shuffling.\n preprocess (Callable): What transformations to apply to the samples, as WebDataset iterator(s).\n n_devices (int): Number of devices.\n workers_per_device (int): Number of workers per device.\n batch_size (int): Batch size.\n drop_last (bool): Whether to drop partial last batches.\n \"\"\"\n dataset, meta = _init_webdataset(remote, name, split, cache_dir, cache_verbose)\n if shuffle:\n dataset = dataset.shuffle(shuffle_buffer)\n if preprocess:\n dataset = preprocess(dataset)\n return _size_webdataset(dataset, meta['n_shards'], meta['samples_per_shard'], n_devices, workers_per_device,\n batch_size, drop_last)\n", "path": "composer/datasets/webdataset_utils.py" } ]
[ { "content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport json\nimport logging\nimport math\nimport os\nimport subprocess\nimport textwrap\nfrom random import shuffle\nfrom typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nif TYPE_CHECKING:\n from webdataset import WebDataset\n\ntry:\n from webdataset import ShardWriter, WebDataset\n from wurlitzer import pipes\n webdataset_installed = True\nexcept ImportError:\n webdataset_installed = False\n\nlog = logging.getLogger(__name__)\n\n\ndef _require_webdataset():\n \"\"\"Hard require webdataset.\"\"\"\n if not webdataset_installed:\n raise ImportError(\n textwrap.dedent(\"\"\"\n Composer was installed without WebDataset support. To use WebDataset with Composer, run `pip install\n mosaicml[webdataset]`.\"\"\"))\n\n\ndef _create_webdataset_meta(split_dir: str, n_samples: int, n_shards: int) -> None:\n \"\"\"Write a WebDataset meta file.\n\n Args:\n split_dir (str): Directory to save the JSON file into.\n n_samples (int): Number of samples in this split.\n n_shards (int): Number of shards in this split.\n \"\"\"\n samples_per_shard = n_samples // n_shards\n n_leftover = n_samples % samples_per_shard\n obj = {\n 'n_shards': n_shards,\n 'samples_per_shard': samples_per_shard,\n 'n_leftover': n_leftover,\n }\n filename = os.path.join(split_dir, 'meta.json')\n json.dump(obj, open(filename, 'w'), sort_keys=True)\n\n\ndef create_webdataset(samples: Iterable[Dict[str, Any]],\n dataset_dir: str,\n split: str,\n n_samples: int,\n n_shards: int,\n use_tqdm: Union[bool, int] = True) -> None:\n \"\"\"Write an entire WebDataset to a local directory, given an iterable of samples.\n\n Args:\n samples (iterable of dict): Each dataset sample.\n dataset_dir (str): Output dataset directory.\n split (str): Dataset split.\n n_samples (int): Number of samples in dataset.\n n_shards (int): Number of full shards to write (may write a leftovers shard).\n use_tqdm (bool): Whether to show progress with tqdm.\n \"\"\"\n _require_webdataset()\n split_dir = os.path.join(dataset_dir, split)\n os.makedirs(split_dir)\n pattern = os.path.join(split_dir, '%05d.tar')\n samples_per_shard = n_samples // n_shards\n with pipes():\n out = ShardWriter(pattern, maxcount=samples_per_shard)\n out.verbose = 0\n if use_tqdm:\n samples = tqdm(samples, total=n_samples, leave=False)\n for sample in samples:\n out.write(sample)\n out.close()\n _create_webdataset_meta(split_dir, n_samples, n_shards)\n\n\ndef _find_samples(split_dirname):\n \"\"\"Collect and shuffle sample as pairs of (image filename, class).\n\n Args:\n split_dirname (str): Dataset split directory.\n\n Returns:\n Shuffled list of (image filename, class).\n \"\"\"\n pairs = []\n for cls, basename in enumerate(sorted(os.listdir(split_dirname))):\n class_dirname = os.path.join(split_dirname, basename)\n for basename in sorted(os.listdir(class_dirname)):\n sample_filename = os.path.join(class_dirname, basename)\n pairs.append((sample_filename, cls))\n shuffle(pairs)\n return pairs\n\n\ndef _each_sample(pairs: List[Tuple[str, int]]) -> Iterable[Dict[str, Any]]:\n \"\"\"Generator over each dataset sample.\n\n Args:\n pairs (list): List of pairs of (image filename, class ID).\n\n Yields:\n Sample dicts.\n \"\"\"\n for idx, (img_file, cls) in enumerate(pairs):\n img = open(img_file, 'rb').read()\n yield {\n '__key__': f'{idx:05d}',\n 'jpg': img,\n 'cls': cls,\n }\n\n\ndef create_webdatasets_from_image_folder(in_root: str,\n out_root: str,\n n_shards: int,\n use_tqdm: Union[bool, int] = True) -> None:\n \"\"\"Given a directory tree of classified images, create a WebDataset per dataset split.\n\n Directory tree format: (path to dataset)/(split name)/(class name)/(image file).\n\n Args:\n in_root (str): Input dataset root.\n out_root (str): Output WebDataset root.\n n_shards (int): Number of full shards to write (may write a leftovers shard).\n use_tqdm (bool): Whether to show progress with tqdm.\n \"\"\"\n for split in sorted(os.listdir(in_root)):\n in_dir = os.path.join(in_root, split)\n pairs = _find_samples(in_dir)\n create_webdataset(_each_sample(pairs), out_root, split, len(pairs), n_shards, use_tqdm)\n\n\ndef _init_webdataset_meta_from_s3(remote: str, split: Optional[str] = None) -> bytes:\n \"\"\"Read a WebDataset meta file from S3.\n\n Args:\n remote (str): S3 bucket or S3 bucket directory.\n split (str): Dataset split.\n \"\"\"\n if split is None:\n url = f'{remote}/meta.json'\n else:\n url = f'{remote}/{split}/meta.json'\n cmd = 'aws', 's3', 'cp', url, '-'\n ret = subprocess.run(cmd, capture_output=True)\n assert not ret.stderr, 'Download failed, check your credentials?'\n return ret.stdout\n\n\ndef _init_webdataset_meta_from_local(remote: str, split: Optional[str] = None) -> bytes:\n \"\"\"Read a WebDataset meta file from local filesystem.\n\n Args:\n remote (str): Local filesystem directory.\n split (str): Dataset split.\n \"\"\"\n if split is None:\n path = f'{remote}/meta.json'\n else:\n path = f'{remote}/{split}/meta.json'\n return open(path, 'rb').read()\n\n\ndef init_webdataset_meta(remote: str, split: Optional[str] = None) -> bytes:\n \"\"\"Read a WebDataset meta file.\n\n Args:\n remote (str): Dataset directory (S3 bucket or local dir).\n split (str): Dataset split. Default: ``None``.\n \"\"\"\n if remote.startswith('s3://'):\n return _init_webdataset_meta_from_s3(remote, split)\n else:\n return _init_webdataset_meta_from_local(remote, split)\n\n\ndef _init_webdataset(remote: str,\n name: str,\n split: str,\n cache_dir: Optional[str] = None,\n cache_verbose: bool = False) -> Tuple[WebDataset, dict]:\n \"\"\"Initialize a WebDataset with an optional local cache dir.\n\n Args:\n remote (str): Dataset directory (S3 bucket or local dir).\n name (str): Name of this dataset, used to locate dataset in local cache.\n split (str): Dataset split.\n cache_dir (str, optional): Root directory of local filesystem cache.\n cache_verbose (bool): WebDataset caching verbosity.\n\n Returns:\n dataset (WebDataset): The webdataset object for streaming.\n meta (dict): Dataset sample/shard statistics.\n \"\"\"\n _require_webdataset()\n if cache_dir:\n split_dir = os.path.join(cache_dir, name, split)\n meta_file = os.path.join(split_dir, 'meta.json')\n if os.path.exists(meta_file):\n text = open(meta_file).read()\n else:\n text = init_webdataset_meta(remote, split)\n if not os.path.exists(split_dir):\n os.makedirs(split_dir)\n with open(meta_file, 'wb') as out:\n out.write(text)\n else:\n split_dir = None\n text = init_webdataset_meta(remote, split)\n meta = json.loads(text)\n max_shard = meta['n_shards'] - 1\n shards = f'{{{0:05d}..{max_shard:05d}}}.tar'\n if remote.startswith('s3://'):\n urls = f'pipe: aws s3 cp {remote}/{split}/{shards} -'\n else:\n urls = f'{remote}/{split}/{shards}'\n dataset = WebDataset(urls, cache_dir=split_dir, cache_verbose=cache_verbose)\n return dataset, meta\n\n\ndef _size_webdataset(dataset: WebDataset, n_shards: int, samples_per_shard: int, n_devices: int,\n workers_per_device: int, batch_size: int, drop_last: bool) -> WebDataset:\n \"\"\"Set IterableDataset epoch boundary and length for DDP, PyTorch DataLoader compatability.\n\n Note: 'drop_last=True' with per-CPU-worker sharding will cause an incomplete batch to be dropped at the end of each\n CPU worker's sample list. Total samples dropped across all workers may sum to more than one batch.\n\n Note: 'drop_last=False' with per-CPU-worker sharding will lead to multiple incomplete batches being read from each\n device, one for each CPU worker. Unfortunately, the PyTorch DataLoader does not handle this situation well in its\n __len__ implementation, so len(dataloader) will be an underestimate of batches_per_epoch.\n\n Calculation:\n shards\n shards per worker = ------------------------------\n devices * workers per device\n\n samples per worker = samples per shard * shards per worker\n\n If drop last,\n samples per worker = (samples per worker // batch size) * batch size\n\n samples per device = samples per worker * workers per device\n\n samples per epoch = samples per device * devices\n\n Args:\n dataset (WebDataset):\n n_shards (int): Number of full shards.\n samples_per_shard (int): Number of samples per webdataset shard.\n n_devices (int): Number of devices.\n workers_per_device (int): Number of workers per device.\n batch_size (int): Batch size.\n drop_last (bool): Whether to drop partial last batches.\n \"\"\"\n workers_per_device = max(1, workers_per_device)\n\n # Ensure that shards can be split among CPU workers\n n_workers_global = n_devices * workers_per_device\n if n_shards % n_workers_global != 0:\n raise ValueError(f\"n_shards={n_shards} must be divisible by n_workers_global={n_workers_global}!\")\n\n shards_per_worker = n_shards // n_devices // workers_per_device\n expected_samples_per_worker = samples_per_shard * shards_per_worker\n if drop_last:\n samples_per_worker = (expected_samples_per_worker // batch_size) * batch_size\n samples_per_device = samples_per_worker * workers_per_device\n samples_per_epoch = samples_per_device * n_devices\n expected_samples_per_epoch = n_shards * samples_per_shard\n if samples_per_epoch != expected_samples_per_epoch:\n log.warning(\n f\"Note that 'drop_last=True' with per-CPU-worker sharding will cause an incomplete batch to be dropped at the end of ** each CPU worker's sample list **. \"\n f\"Given your training configuration, we have calculated this will reduce samples_per_epoch from {expected_samples_per_epoch} to {samples_per_epoch}.\"\n )\n else:\n samples_per_worker = expected_samples_per_worker\n samples_per_device = samples_per_worker * workers_per_device\n samples_per_epoch = samples_per_device * n_devices\n expected_batches_per_epoch = math.ceil(samples_per_worker * workers_per_device / batch_size)\n batches_per_epoch = math.ceil(samples_per_worker / batch_size) * workers_per_device\n if batches_per_epoch != expected_batches_per_epoch:\n log.warning(\n f\"Note that 'drop_last=False' with per-CPU-worker sharding will lead to multiple incomplete batches being read from each device, ** one for each CPU worker **. \"\n f\"Unfortunately, the PyTorch DataLoader does not handle this situation well in its __len__ implementation, so len(dataloader) will be an underestimate of batches_per_epoch. \"\n f\"(See https://github.com/pytorch/pytorch/blob/3d9ec11feacd69d0ff1bffe0b25a825cdf203b87/torch/utils/data/dataloader.py#L403-L411). \"\n f\"Given your training configuration, we have calculated this will increase batches_per_epoch from {expected_batches_per_epoch} -> {batches_per_epoch}.\"\n )\n # Set epoch boundary (per CPU worker).\n # Technically not needed if shards are constructed correctly, but used for safety\n dataset = dataset.with_epoch(samples_per_worker)\n # Set IterableDataset length (per device), to be read by PyTorch DataLoader\n return dataset.with_length(samples_per_device)\n\n\ndef load_webdataset(remote: str, name: str, split: str, cache_dir: Optional[str], cache_verbose: bool, shuffle: bool,\n shuffle_buffer: int, preprocess, n_devices: int, workers_per_device: int, batch_size: int,\n drop_last: bool):\n \"\"\"Load WebDataset from remote, optionally caching, with the given preprocessing and batching.\n\n Args:\n remote (str): Remote path (either an s3:// url or a directory on local filesystem).\n name (str): Name of this dataset, used to locate dataset in local cache.\n cache_dir (str, optional): Root directory of local filesystem cache.\n cache_verbose (bool): WebDataset caching verbosity.\n shuffle (bool): Whether to shuffle samples.\n shuffle_buffer (int): How many samples to buffer when shuffling.\n preprocess (Callable): What transformations to apply to the samples, as WebDataset iterator(s).\n n_devices (int): Number of devices.\n workers_per_device (int): Number of workers per device.\n batch_size (int): Batch size.\n drop_last (bool): Whether to drop partial last batches.\n \"\"\"\n dataset, meta = _init_webdataset(remote, name, split, cache_dir, cache_verbose)\n if shuffle:\n dataset = dataset.shuffle(shuffle_buffer)\n if preprocess:\n dataset = preprocess(dataset)\n return _size_webdataset(dataset, meta['n_shards'], meta['samples_per_shard'], n_devices, workers_per_device,\n batch_size, drop_last)\n", "path": "composer/datasets/webdataset_utils.py" } ]
diff --git a/composer/datasets/webdataset_utils.py b/composer/datasets/webdataset_utils.py index 6cf872440e..db5b08fa1a 100644 --- a/composer/datasets/webdataset_utils.py +++ b/composer/datasets/webdataset_utils.py @@ -1,3 +1,7 @@ +# Copyright 2021 MosaicML. All Rights Reserved. + +from __future__ import annotations + import json import logging import math
pwndbg__pwndbg-616
bp breaks on pie binaries before run <!-- Before reporting a new issue, make sure that we do not have any duplicates already open. If there is one it might be good to take part in the discussion there. Please make sure you have checked that the issue persists on LATEST pwndbg version. Below is a template for BUG REPORTS. Don't include it if this is a FEATURE REQUEST. --> ### Description In the windbg commands while setting a breakpoint using `bp` the address is converted to int https://github.com/pwndbg/pwndbg/blob/ca17c6dbb0d1bc40ef060331aa48dad0675c5df9/pwndbg/commands/windbg.py#L297 using `pwndbg.inthook.xint`. This verifies if the argument is actually an address but in case of pie binaries it could be possible that the address is loaded later and then the argument is casted to `uint32_t` ### Steps to reproduce ```sh [/tmp] tail hello.c #include <stdio.h> int main(int argc, char **argv) { puts("Hello World"); return 0; } [/tmp] make hello 17:41:43 cc hello.c -o hello [/tmp] gdb -q hello 17:41:47 pwndbg: loaded 177 commands. Type pwndbg [filter] for a list. pwndbg: created $rebase, $ida gdb functions (can be used with print/break) Reading symbols from hello...(no debugging symbols found)...done. pwndbg> bp 0x00005555555546b0 Breakpoint 1 at 0x555546b0 pwndbg> bl Num Type Disp Enb Address What 1 breakpoint keep y 0x00000000555546b0 pwndbg> r Starting program: /tmp/hello Warning: Cannot insert breakpoint 1. Cannot access memory at address 0x555546b0 ``` ### My setup ```sh [pwndbg] git --no-pager log -1 --stat 18:07:21 ☁ dev ☀ commit ca17c6dbb0d1bc40ef060331aa48dad0675c5df9 Author: Alisson Bezerra <[email protected]> Date: Tue Apr 9 05:54:00 2019 -0300 Add xuntil command (#604) pwndbg/commands/peda.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) [pwndbg] lsb_release -a 18:08:01 ☁ dev ☀ No LSB modules are available. Distributor ID: Debian Description: Debian GNU/Linux 9.8 (stretch) Release: 9.8 Codename: stretch [pwndbg] gdb -q 18:10:56 ☁ dev ☀ pwndbg: loaded 178 commands. Type pwndbg [filter] for a list. pwndbg: created $rebase, $ida gdb functions (can be used with print/break) pwndbg> show version GNU gdb (Debian 7.12-6) 7.12.0.20161007-git Copyright (C) 2016 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html> This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Type "show copying" and "show warranty" for details. This GDB was configured as "x86_64-linux-gnu". Type "show configuration" for configuration details. For bug reporting instructions, please see: <http://www.gnu.org/software/gdb/bugs/>. Find the GDB manual and other documentation resources online at: <http://www.gnu.org/software/gdb/documentation/>. For help, type "help". Type "apropos word" to search for commands related to "word". pwndbg> py import sys; print(sys.version) 3.5.3 (default, Sep 27 2018, 17:25:39) [GCC 6.3.0 20170516] pwndbg> ``` <!-- Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case). NOTE: We are currently supporting only Ubuntu installations. It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there). If you would like to change this situation - help us improving pwndbg and supporting other distros! This can be displayed in pwndbg through `version` command. If it is somehow unavailable, use: * `show version` - for gdb * `py import sys; print(sys.version)` - for python * pwndbg version/git commit id -->
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCommon types, and routines for manually loading types from file\nvia GCC.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport glob\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.gcc\nimport pwndbg.memoize\n\nmodule = sys.modules[__name__]\n\n\ndef is_pointer(value):\n type = value\n\n if isinstance(value, gdb.Value):\n type = value.type\n\n type = type.strip_typedefs()\n return type.code == gdb.TYPE_CODE_PTR\n\n\ndef lookup_types(*types):\n for type_str in types:\n try:\n return gdb.lookup_type(type_str)\n except Exception as e:\n exc = e\n raise exc\n\n\[email protected]\[email protected]\ndef update():\n\n module.char = gdb.lookup_type('char')\n module.ulong = lookup_types('unsigned long', 'uint', 'u32')\n module.long = lookup_types('long', 'int', 'i32')\n module.uchar = lookup_types('unsigned char', 'ubyte', 'u8')\n module.ushort = lookup_types('unsigned short', 'ushort', 'u16')\n module.uint = lookup_types('unsigned int', 'uint', 'u32')\n module.void = lookup_types('void', '()')\n module.uint8 = module.uchar\n module.uint16 = module.ushort\n module.uint32 = module.uint\n module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64')\n\n module.int8 = lookup_types('char', 'i8')\n module.int16 = lookup_types('short', 'i16')\n module.int32 = lookup_types('int', 'i32')\n module.int64 = lookup_types('long long', 'long', 'i64')\n\n module.ssize_t = module.long\n module.size_t = module.ulong\n\n module.pvoid = void.pointer()\n module.ppvoid = pvoid.pointer()\n module.pchar = char.pointer()\n\n module.ptrsize = pvoid.sizeof\n\n if pvoid.sizeof == 4: module.ptrdiff = uint32\n if pvoid.sizeof == 8: module.ptrdiff = uint64\n\n module.null = gdb.Value(0).cast(void)\n\n# Call it once so we load all of the types\nupdate()\n\ntempdir = tempfile.gettempdir() + '/pwndbg'\nif not os.path.exists(tempdir):\n os.mkdir(tempdir)\n\n# Trial and error until things work\nblacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',\n'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',\n'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',\n'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',\n'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',\n'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',\n'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',\n'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',\n'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',\n'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']\n\ndef load(name):\n \"\"\"Load symbol by name from headers in standard system include directory\"\"\"\n try:\n return gdb.lookup_type(name)\n except gdb.error:\n pass\n\n # s, _ = gdb.lookup_symbol(name)\n\n # Try to find an architecture-specific include path\n arch = pwndbg.arch.current.split(':')[0]\n\n include_dir = glob.glob('/usr/%s*/include' % arch)\n\n if include_dir:\n include_dir = include_dir[0]\n else:\n include_dir = '/usr/include'\n\n source = '#include <fstream>\\n'\n\n for subdir in ['', 'sys', 'netinet']:\n dirname = os.path.join(include_dir, subdir)\n for path in glob.glob(os.path.join(dirname, '*.h')):\n if any(b in path for b in blacklist):\n continue\n print(path)\n source += '#include \"%s\"\\n' % path\n\n\n source += '''\n{name} foo;\n'''.format(**locals())\n\n filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))\n\n with open(filename, 'w+') as f:\n f.write(source)\n f.flush()\n os.fsync(f.fileno())\n\n compile(filename)\n\n return gdb.lookup_type(name)\n\ndef compile(filename=None, address=0):\n \"\"\"Compile and extract symbols from specified file\"\"\"\n if filename is None:\n print(\"Specify a filename to compile.\")\n return\n\n objectname = os.path.splitext(filename)[0] + \".o\"\n\n if not os.path.exists(objectname):\n gcc = pwndbg.gcc.which()\n gcc += ['-w', '-c', '-g', filename, '-o', objectname]\n try:\n subprocess.check_output(gcc)\n except subprocess.CalledProcessError as e:\n return\n\n add_symbol_file(objectname, address)\n\ndef add_symbol_file(filename=None, address=0):\n \"\"\"Read additional symbol table information from the object file filename\"\"\"\n if filename is None:\n print(\"Specify a symbol file to add.\")\n return\n\n with pwndbg.events.Pause():\n gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)\n\ndef read_gdbvalue(type_name, addr):\n \"\"\" Read the memory contents at addr and interpret them as a GDB value with the given type \"\"\"\n gdb_type = pwndbg.typeinfo.load(type_name)\n return gdb.Value(addr).cast(gdb_type.pointer()).dereference()\n", "path": "pwndbg/typeinfo.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCommon types, and routines for manually loading types from file\nvia GCC.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport glob\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.gcc\nimport pwndbg.memoize\n\nmodule = sys.modules[__name__]\n\n\ndef is_pointer(value):\n type = value\n\n if isinstance(value, gdb.Value):\n type = value.type\n\n type = type.strip_typedefs()\n return type.code == gdb.TYPE_CODE_PTR\n\n\ndef lookup_types(*types):\n for type_str in types:\n try:\n return gdb.lookup_type(type_str)\n except Exception as e:\n exc = e\n raise exc\n\n\[email protected]_objfile\[email protected]\[email protected]\ndef update():\n\n module.char = gdb.lookup_type('char')\n module.ulong = lookup_types('unsigned long', 'uint', 'u32')\n module.long = lookup_types('long', 'int', 'i32')\n module.uchar = lookup_types('unsigned char', 'ubyte', 'u8')\n module.ushort = lookup_types('unsigned short', 'ushort', 'u16')\n module.uint = lookup_types('unsigned int', 'uint', 'u32')\n module.void = lookup_types('void', '()')\n module.uint8 = module.uchar\n module.uint16 = module.ushort\n module.uint32 = module.uint\n module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64')\n\n module.int8 = lookup_types('char', 'i8')\n module.int16 = lookup_types('short', 'i16')\n module.int32 = lookup_types('int', 'i32')\n module.int64 = lookup_types('long long', 'long', 'i64')\n\n module.ssize_t = module.long\n module.size_t = module.ulong\n\n module.pvoid = void.pointer()\n module.ppvoid = pvoid.pointer()\n module.pchar = char.pointer()\n\n module.ptrsize = pvoid.sizeof\n\n if pvoid.sizeof == 4: module.ptrdiff = uint32\n if pvoid.sizeof == 8: module.ptrdiff = uint64\n\n module.null = gdb.Value(0).cast(void)\n\n# Call it once so we load all of the types\nupdate()\n\ntempdir = tempfile.gettempdir() + '/pwndbg'\nif not os.path.exists(tempdir):\n os.mkdir(tempdir)\n\n# Trial and error until things work\nblacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',\n'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',\n'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',\n'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',\n'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',\n'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',\n'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',\n'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',\n'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',\n'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']\n\ndef load(name):\n \"\"\"Load symbol by name from headers in standard system include directory\"\"\"\n try:\n return gdb.lookup_type(name)\n except gdb.error:\n pass\n\n # s, _ = gdb.lookup_symbol(name)\n\n # Try to find an architecture-specific include path\n arch = pwndbg.arch.current.split(':')[0]\n\n include_dir = glob.glob('/usr/%s*/include' % arch)\n\n if include_dir:\n include_dir = include_dir[0]\n else:\n include_dir = '/usr/include'\n\n source = '#include <fstream>\\n'\n\n for subdir in ['', 'sys', 'netinet']:\n dirname = os.path.join(include_dir, subdir)\n for path in glob.glob(os.path.join(dirname, '*.h')):\n if any(b in path for b in blacklist):\n continue\n print(path)\n source += '#include \"%s\"\\n' % path\n\n\n source += '''\n{name} foo;\n'''.format(**locals())\n\n filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))\n\n with open(filename, 'w+') as f:\n f.write(source)\n f.flush()\n os.fsync(f.fileno())\n\n compile(filename)\n\n return gdb.lookup_type(name)\n\ndef compile(filename=None, address=0):\n \"\"\"Compile and extract symbols from specified file\"\"\"\n if filename is None:\n print(\"Specify a filename to compile.\")\n return\n\n objectname = os.path.splitext(filename)[0] + \".o\"\n\n if not os.path.exists(objectname):\n gcc = pwndbg.gcc.which()\n gcc += ['-w', '-c', '-g', filename, '-o', objectname]\n try:\n subprocess.check_output(gcc)\n except subprocess.CalledProcessError as e:\n return\n\n add_symbol_file(objectname, address)\n\ndef add_symbol_file(filename=None, address=0):\n \"\"\"Read additional symbol table information from the object file filename\"\"\"\n if filename is None:\n print(\"Specify a symbol file to add.\")\n return\n\n with pwndbg.events.Pause():\n gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)\n\ndef read_gdbvalue(type_name, addr):\n \"\"\" Read the memory contents at addr and interpret them as a GDB value with the given type \"\"\"\n gdb_type = pwndbg.typeinfo.load(type_name)\n return gdb.Value(addr).cast(gdb_type.pointer()).dereference()\n", "path": "pwndbg/typeinfo.py" } ]
diff --git a/pwndbg/typeinfo.py b/pwndbg/typeinfo.py index 17ffd85dd29..57ec3d5f231 100644 --- a/pwndbg/typeinfo.py +++ b/pwndbg/typeinfo.py @@ -43,6 +43,7 @@ def lookup_types(*types): raise exc [email protected]_objfile @pwndbg.events.start @pwndbg.events.stop def update():
mitmproxy__mitmproxy-2754
Transitive import of mitmproxy.version causes warning Since #1837, we import `.script`, will imports `.flow`, which imports `.version`. This causes the following warning in pytest: ``` test/mitmproxy/test_version.py::test_version /Users/kriechi/.pyenv/versions/3.5.3/lib/python3.5/runpy.py:125: RuntimeWarning: 'mitmproxy.version' found in sys.modules after import of package 'mitmproxy', but prior to execution of 'mitmproxy.version'; this may result in unpredictable behaviour warn(RuntimeWarning(msg)) -- Docs: http://doc.pytest.org/en/latest/warnings.html ``` [Note](http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html#the-double-import-trap) > This next trap exists in all current versions of Python, including 3.3, and can be summed up in the following general guideline: “Never add a package directory, or any directory inside a package, directly to the Python path”. > The reason this is problematic is that every module in that directory is now potentially accessible under two different names: as a top level module (since the directory is on sys.path) and as a submodule of the package (if the higher level directory containing the package itself is also on sys.path). Maybe using the approach described [here](https://stackoverflow.com/questions/27947639/how-to-properly-create-a-pyinstaller-hook-or-maybe-hidden-import) works better?
[ { "content": "import os\nimport subprocess\n\n# The actual version string. For precompiled binaries, this will be changed to include the build\n# tag, e.g. \"3.0.0.dev0042-0xcafeabc\"\nVERSION = \"3.0.0\"\nPATHOD = \"pathod \" + VERSION\nMITMPROXY = \"mitmproxy \" + VERSION\n\n# Serialization format version. This is displayed nowhere, it just needs to be incremented by one\n# for each change in the file format.\nFLOW_FORMAT_VERSION = 5\n\n\ndef get_version(dev: bool = False, build: bool = False, refresh: bool = False) -> str:\n \"\"\"\n Return a detailed version string, sourced either from a hardcoded VERSION constant\n or obtained dynamically using git.\n\n Args:\n dev: If True, non-tagged releases will include a \".devXXXX\" suffix, where XXXX is the number\n of commits since the last tagged release.\n build: If True, non-tagged releases will include a \"-0xXXXXXXX\" suffix, where XXXXXXX are\n the first seven digits of the commit hash.\n refresh: If True, always try to use git instead of a potentially hardcoded constant.\n \"\"\"\n\n mitmproxy_version = VERSION\n\n if \"dev\" in VERSION and not refresh:\n pass # There is a hardcoded build tag, so we just use what's there.\n elif dev or build:\n here = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n try:\n git_describe = subprocess.check_output(\n ['git', 'describe', '--tags', '--long'],\n stderr=subprocess.STDOUT,\n cwd=here,\n )\n last_tag, tag_dist, commit = git_describe.decode().strip().rsplit(\"-\", 2)\n commit = commit.lstrip(\"g\")[:7]\n tag_dist = int(tag_dist)\n except Exception:\n pass\n else:\n # Remove current suffix\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n\n # Add suffix for non-tagged releases\n if tag_dist > 0:\n mitmproxy_version += \".dev{tag_dist}\".format(tag_dist=tag_dist)\n # The wheel build tag (we use the commit) must start with a digit, so we include \"0x\"\n mitmproxy_version += \"-0x{commit}\".format(commit=commit)\n\n if not dev:\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n elif not build:\n mitmproxy_version = mitmproxy_version.split(\"-0x\")[0]\n\n return mitmproxy_version\n\n\nif __name__ == \"__main__\":\n print(VERSION)\n", "path": "mitmproxy/version.py" } ]
[ { "content": "import os\nimport subprocess\n\n# The actual version string. For precompiled binaries, this will be changed to include the build\n# tag, e.g. \"3.0.0.dev0042-0xcafeabc\"\nVERSION = \"3.0.0\"\nPATHOD = \"pathod \" + VERSION\nMITMPROXY = \"mitmproxy \" + VERSION\n\n# Serialization format version. This is displayed nowhere, it just needs to be incremented by one\n# for each change in the file format.\nFLOW_FORMAT_VERSION = 5\n\n\ndef get_version(dev: bool = False, build: bool = False, refresh: bool = False) -> str:\n \"\"\"\n Return a detailed version string, sourced either from a hardcoded VERSION constant\n or obtained dynamically using git.\n\n Args:\n dev: If True, non-tagged releases will include a \".devXXXX\" suffix, where XXXX is the number\n of commits since the last tagged release.\n build: If True, non-tagged releases will include a \"-0xXXXXXXX\" suffix, where XXXXXXX are\n the first seven digits of the commit hash.\n refresh: If True, always try to use git instead of a potentially hardcoded constant.\n \"\"\"\n\n mitmproxy_version = VERSION\n\n if \"dev\" in VERSION and not refresh:\n pass # There is a hardcoded build tag, so we just use what's there.\n elif dev or build:\n here = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n try:\n git_describe = subprocess.check_output(\n ['git', 'describe', '--tags', '--long'],\n stderr=subprocess.STDOUT,\n cwd=here,\n )\n last_tag, tag_dist, commit = git_describe.decode().strip().rsplit(\"-\", 2)\n commit = commit.lstrip(\"g\")[:7]\n tag_dist = int(tag_dist)\n except Exception:\n pass\n else:\n # Remove current suffix\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n\n # Add suffix for non-tagged releases\n if tag_dist > 0:\n mitmproxy_version += \".dev{tag_dist}\".format(tag_dist=tag_dist)\n # The wheel build tag (we use the commit) must start with a digit, so we include \"0x\"\n mitmproxy_version += \"-0x{commit}\".format(commit=commit)\n\n if not dev:\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n elif not build:\n mitmproxy_version = mitmproxy_version.split(\"-0x\")[0]\n\n return mitmproxy_version\n\n\nif __name__ == \"__main__\": # pragma: no cover\n print(VERSION)\n", "path": "mitmproxy/version.py" } ]
diff --git a/mitmproxy/version.py b/mitmproxy/version.py index 44ec32d4cb..20a303e84a 100644 --- a/mitmproxy/version.py +++ b/mitmproxy/version.py @@ -60,5 +60,5 @@ def get_version(dev: bool = False, build: bool = False, refresh: bool = False) - return mitmproxy_version -if __name__ == "__main__": +if __name__ == "__main__": # pragma: no cover print(VERSION) diff --git a/test/mitmproxy/test_version.py b/test/mitmproxy/test_version.py index 6e36ffd894..b5b33ba127 100644 --- a/test/mitmproxy/test_version.py +++ b/test/mitmproxy/test_version.py @@ -1,3 +1,4 @@ +import pathlib import runpy import subprocess from unittest import mock @@ -6,7 +7,9 @@ def test_version(capsys): - runpy.run_module('mitmproxy.version', run_name='__main__') + here = pathlib.Path(__file__).absolute().parent + version_file = here / ".." / ".." / "mitmproxy" / "version.py" + runpy.run_path(str(version_file), run_name='__main__') stdout, stderr = capsys.readouterr() assert len(stdout) > 0 assert stdout.strip() == version.VERSION