id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/DoorPi-2.4.1.8.tar.gz/DoorPi-2.4.1.8/doorpi/sipphone/pjsua_lib/SipPhoneAccountCallBack.py |
import logging
logger = logging.getLogger(__name__)
logger.debug("%s loaded", __name__)
import threading
import datetime
import time
import os
import pjsua as pj
from doorpi import DoorPi
from SipPhoneCallCallBack import SipPhoneCallCallBack as CallCallback
class SipPhoneAccountCallBack(pj.AccountCallback):
sem = None
def __init__(self, account = None):
logger.debug("__init__")
pj.AccountCallback.__init__(self, account)
DoorPi().event_handler.register_event('BeforeCallIncoming', __name__)
DoorPi().event_handler.register_event('OnCallReconnect', __name__)
DoorPi().event_handler.register_event('AfterCallReconnect', __name__)
DoorPi().event_handler.register_event('OnCallBusy', __name__)
DoorPi().event_handler.register_event('AfterCallBusy', __name__)
DoorPi().event_handler.register_event('OnCallIncoming', __name__)
DoorPi().event_handler.register_event('AfterCallIncoming', __name__)
DoorPi().event_handler.register_event('OnCallReject', __name__)
DoorPi().event_handler.register_event('AfterCallReject', __name__)
#DoorPi().event_handler.register_event('AfterAccountRegState', __name__)
def __del__(self):
self.destroy()
def destroy(self):
logger.debug("destroy")
DoorPi().event_handler.unregister_source(__name__, True)
def wait(self):
self.sem = threading.Semaphore(0)
self.sem.acquire()
def on_reg_state(self):
if self.sem:
if self.account.info().reg_status >= 200:
self.sem.release()
#DoorPi().event_handler('AfterAccountRegState', __name__)
#logger.debug(self.account.info.reg_status)
def answer_call(self, call):
DoorPi().sipphone.current_callcallback = CallCallback()
call.set_callback(DoorPi().sipphone.current_callcallback)
DoorPi().sipphone.current_call = call
DoorPi().sipphone.current_call.answer(code = 200)
def on_incoming_call(self, call):
# SIP-Status-Codes: http://de.wikipedia.org/wiki/SIP-Status-Codes
# 200 = OK
# 401 = Unauthorized
# 403 = Forbidden
# 486 = Busy Here
# 494 = Security Agreement Required
logger.debug("on_incoming_call")
logger.info("Incoming call from %s", str(call.info().remote_uri))
DoorPi().event_handler('BeforeCallIncoming', __name__)
call.answer(180)
if DoorPi().sipphone.current_call is not None and DoorPi().sipphone.current_call.is_valid():
logger.debug("Incoming call while another call is active")
logger.debug("- incoming.remote_uri: %s", call.info().remote_uri)
logger.debug("- current.remote_uri : %s", DoorPi().sipphone.current_call.info().remote_uri)
if call.info().remote_uri == DoorPi().sipphone.current_call.info().remote_uri:
logger.info("Current call is incoming call - quitting current and connecting to incoming. Maybe connection reset?")
DoorPi().event_handler('OnCallReconnect', __name__, {'remote_uri': call.info().remote_uri})
DoorPi().current_call.hangup()
self.answer_call(call)
DoorPi().event_handler('AfterCallReconnect', __name__)
return
else:
logger.info("Incoming and current call are different - sending busy signal to incoming call")
DoorPi().event_handler('OnCallBusy', __name__, {'remote_uri': call.info().remote_uri})
call.answer(code = 494, reason = "Security Agreement Required")
DoorPi().event_handler('AfterCallBusy', __name__)
return
if DoorPi().sipphone.is_admin_number(call.info().remote_uri):
logger.debug("Incoming call from trusted admin number %s -> autoanswer", call.info().remote_uri)
DoorPi().event_handler('OnCallIncoming', __name__, {'remote_uri': call.info().remote_uri})
self.answer_call(call)
DoorPi().event_handler('AfterCallIncoming', __name__)
return
else:
logger.debug("Incoming call ist not from a trusted admin number %s -> sending busy signal", call.info().remote_uri)
DoorPi().event_handler('OnCallReject', __name__, {'remote_uri': call.info().remote_uri})
call.answer(code = 494, reason = "Security Agreement Required")
DoorPi().event_handler('AfterCallReject', __name__)
return | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/plugins/a11yhelp/dialogs/lang/lv.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
CKEDITOR.plugins.setLang("a11yhelp","lv",{title:"Pieejamības instrukcija",contents:"Palīdzības saturs. Lai aizvērtu ciet šo dialogu nospiediet ESC.",legend:[{name:"Galvenais",items:[{name:"Redaktora rīkjosla",legend:"Nospiediet ${toolbarFocus} lai pārvietotos uz rīkjoslu. Lai pārvietotos uz nākošo vai iepriekšējo rīkjoslas grupu izmantojiet pogu TAB un SHIFT+TAB. Lai pārvietotos uz nākošo vai iepriekšējo rīkjoslas pogu izmantojiet Kreiso vai Labo bultiņu. Nospiediet Atstarpi vai ENTER lai aktivizētu rīkjosla pogu."},
{name:"Redaktora dialoga logs",legend:"Inside a dialog, press TAB to navigate to the next dialog element, press SHIFT+TAB to move to the previous dialog element, press ENTER to submit the dialog, press ESC to cancel the dialog. When a dialog has multiple tabs, the tab list can be reached either with ALT+F10 or with TAB as part of the dialog tabbing order. With tab list focused, move to the next and previous tab with RIGHT and LEFT ARROW, respectively."},{name:"Redaktora satura izvēle",legend:"Nospiediet ${contextMenu} vai APPLICATION KEY lai atvērtu satura izvēlni. Lai pārvietotos uz nākošo izvēlnes opciju izmantojiet pogu TAB vai pogu Bultiņu uz leju. Lai pārvietotos uz iepriekšējo opciju izmantojiet SHIFT+TAB vai pogu Bultiņa uz augšu. Nospiediet SPACE vai ENTER lai izvelētos izvēlnes opciju. Atveriet tekošajā opcija apakšizvēlni ar SAPCE vai ENTER ka ari to var izdarīt ar Labo bultiņu. Lai atgrieztos atpakaļ uz sakuma izvēlni nospiediet ESC vai Kreiso bultiņu. Lai aizvērtu ciet izvēlnes saturu nospiediet ESC."},
{name:"Redaktora saraksta lauks",legend:"Saraksta laukā, lai pārvietotos uz nākošo saraksta elementu nospiediet TAB vai pogu Bultiņa uz leju. Lai pārvietotos uz iepriekšējo saraksta elementu nospiediet SHIFT+TAB vai pogu Bultiņa uz augšu. Nospiediet SPACE vai ENTER lai izvēlētos saraksta opcijas. Nospiediet ESC lai aizvērtu saraksta lauku."},{name:"Redaktora elementa ceļa josla",legend:"Nospiediet ${elementsPathFocus} lai pārvietotos uz elementa ceļa joslu. Lai pārvietotos uz nākošo elementa pogu izmantojiet TAB vai Labo bultiņu. Lai pārvietotos uz iepriekšējo elementa pogu izmantojiet SHIFT+TAB vai Kreiso bultiņu. Nospiediet SPACE vai ENTER lai izvēlētos elementu redaktorā."}]},
{name:"Komandas",items:[{name:"Komanda atcelt darbību",legend:"Nospiediet ${undo}"},{name:"Komanda atkārtot darbību",legend:"Nospiediet ${redo}"},{name:"Treknraksta komanda",legend:"Nospiediet ${bold}"},{name:"Kursīva komanda",legend:"Nospiediet ${italic}"},{name:"Apakšsvītras komanda ",legend:"Nospiediet ${underline}"},{name:"Hipersaites komanda",legend:"Nospiediet ${link}"},{name:"Rīkjoslas aizvēršanas komanda",legend:"Nospiediet ${toolbarCollapse}"},{name:"Piekļūt iepriekšējai fokusa vietas komandai",
legend:"Nospiediet ${accessPreviousSpace} lai piekļūtu tuvākajai nepieejamajai fokusa vietai pirms kursora. Piemēram: diviem blakus esošiem līnijas HR elementiem. Atkārtojiet taustiņu kombināciju lai piekļūtu pie tālākām vietām."},{name:"Piekļūt nākošā fokusa apgabala komandai",legend:"Nospiediet ${accessNextSpace} lai piekļūtu tuvākajai nepieejamajai fokusa vietai pēc kursora. Piemēram: diviem blakus esošiem līnijas HR elementiem. Atkārtojiet taustiņu kombināciju lai piekļūtu pie tālākām vietām."},
{name:"Pieejamības palīdzība",legend:"Nospiediet ${a11yHelp}"},{name:" Paste as plain text",legend:"Press ${pastetext}",legendEdge:"Press ${pastetext}, followed by ${paste}"}]}],tab:"Tab",pause:"Pause",capslock:"Caps Lock",escape:"Escape",pageUp:"Page Up",pageDown:"Page Down",leftArrow:"Left Arrow",upArrow:"Up Arrow",rightArrow:"Right Arrow",downArrow:"Down Arrow",insert:"Insert",leftWindowKey:"Left Windows key",rightWindowKey:"Right Windows key",selectKey:"Select key",numpad0:"Numpad 0",numpad1:"Numpad 1",
numpad2:"Numpad 2",numpad3:"Numpad 3",numpad4:"Numpad 4",numpad5:"Numpad 5",numpad6:"Numpad 6",numpad7:"Numpad 7",numpad8:"Numpad 8",numpad9:"Numpad 9",multiply:"Multiply",add:"Add",subtract:"Subtract",decimalPoint:"Decimal Point",divide:"Divide",f1:"F1",f2:"F2",f3:"F3",f4:"F4",f5:"F5",f6:"F6",f7:"F7",f8:"F8",f9:"F9",f10:"F10",f11:"F11",f12:"F12",numLock:"Num Lock",scrollLock:"Scroll Lock",semiColon:"Semicolon",equalSign:"Equal Sign",comma:"Comma",dash:"Dash",period:"Period",forwardSlash:"Forward Slash",
graveAccent:"Grave Accent",openBracket:"Open Bracket",backSlash:"Backslash",closeBracket:"Close Bracket",singleQuote:"Single Quote"}); | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/lang/gl.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['gl']={"editor":"Editor de texto mellorado","editorPanel":"Panel do editor de texto mellorado","common":{"editorHelp":"Prema ALT 0 para obter axuda","browseServer":"Examinar o servidor","url":"URL","protocol":"Protocolo","upload":"Enviar","uploadSubmit":"Enviar ao servidor","image":"Imaxe","flash":"Flash","form":"Formulario","checkbox":"Caixa de selección","radio":"Botón de opción","textField":"Campo de texto","textarea":"Área de texto","hiddenField":"Campo agochado","button":"Botón","select":"Campo de selección","imageButton":"Botón de imaxe","notSet":"<sen estabelecer>","id":"ID","name":"Nome","langDir":"Dirección de escritura do idioma","langDirLtr":"Esquerda a dereita (LTR)","langDirRtl":"Dereita a esquerda (RTL)","langCode":"Código do idioma","longDescr":"Descrición completa do URL","cssClass":"Clases da folla de estilos","advisoryTitle":"Título","cssStyle":"Estilo","ok":"Aceptar","cancel":"Cancelar","close":"Pechar","preview":"Vista previa","resize":"Redimensionar","generalTab":"Xeral","advancedTab":"Avanzado","validateNumberFailed":"Este valor non é un número.","confirmNewPage":"Calquera cambio que non gardara neste contido perderase.\r\nConfirma que quere cargar unha páxina nova?","confirmCancel":"Algunhas das opcións foron cambiadas.\r\nConfirma que quere pechar o diálogo?","options":"Opcións","target":"Destino","targetNew":"Nova xanela (_blank)","targetTop":"Xanela principal (_top)","targetSelf":"Mesma xanela (_self)","targetParent":"Xanela superior (_parent)","langDirLTR":"Esquerda a dereita (LTR)","langDirRTL":"Dereita a esquerda (RTL)","styles":"Estilo","cssClasses":"Clases da folla de estilos","width":"Largo","height":"Alto","align":"Aliñamento","left":"Esquerda","right":"Dereita","center":"Centro","justify":"Xustificado","alignLeft":"Aliñar á esquerda","alignRight":"Aliñar á dereita","alignCenter":"Aliñar ao centro","alignTop":"Arriba","alignMiddle":"Centro","alignBottom":"Abaixo","alignNone":"Ningún","invalidValue":"Valor incorrecto.","invalidHeight":"O alto debe ser un número.","invalidWidth":"O largo debe ser un número.","invalidLength":"O valor especificado para o campo «%1» debe ser un número positivo con ou sen unha unidade de medida correcta (%2).","invalidCssLength":"O valor especificado para o campo «%1» debe ser un número positivo con ou sen unha unidade de medida CSS correcta (px, %, in, cm, mm, em, ex, pt, ou pc).","invalidHtmlLength":"O valor especificado para o campo «%1» debe ser un número positivo con ou sen unha unidade de medida HTML correcta (px ou %).","invalidInlineStyle":"O valor especificado no estilo en liña debe consistir nunha ou máis tuplas co formato «nome : valor», separadas por punto e coma.","cssLengthTooltip":"Escriba un número para o valor en píxeles ou un número cunha unidade CSS correcta (px, %, in, cm, mm, em, ex, pt, ou pc).","unavailable":"%1<span class=\"cke_accessibility\">, non dispoñíbel</span>","keyboard":{"8":"Ir atrás","13":"Intro","16":"Maiús","17":"Ctrl","18":"Alt","32":"Espazo","35":"Fin","36":"Inicio","46":"Supr","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Orde"},"keyboardShortcut":"Atallo de teclado","optionDefault":"Predeterminado"},"about":{"copy":"Copyright © $1. Todos os dereitos reservados.","dlgTitle":"Sobre o CKEditor 4","moreInfo":"Para obter información sobre a licenza, visite o noso sitio web:"},"basicstyles":{"bold":"Negra","italic":"Cursiva","strike":"Riscado","subscript":"Subíndice","superscript":"Superíndice","underline":"Subliñado"},"bidi":{"ltr":"Dirección do texto de esquerda a dereita","rtl":"Dirección do texto de dereita a esquerda"},"blockquote":{"toolbar":"Cita"},"notification":{"closed":"Notificación pechada."},"toolbar":{"toolbarCollapse":"Contraer a barra de ferramentas","toolbarExpand":"Expandir a barra de ferramentas","toolbarGroups":{"document":"Documento","clipboard":"Portapapeis/desfacer","editing":"Edición","forms":"Formularios","basicstyles":"Estilos básicos","paragraph":"Paragrafo","links":"Ligazóns","insert":"Inserir","styles":"Estilos","colors":"Cores","tools":"Ferramentas"},"toolbars":"Barras de ferramentas do editor"},"clipboard":{"copy":"Copiar","copyError":"Os axustes de seguranza do seu navegador non permiten que o editor realice automaticamente as tarefas de copia. Use o teclado para iso (Ctrl/Cmd+C).","cut":"Cortar","cutError":"Os axustes de seguranza do seu navegador non permiten que o editor realice automaticamente as tarefas de corte. Use o teclado para iso (Ctrl/Cmd+X).","paste":"Pegar","pasteNotification":"Prema %1 para pegar. O seu navegador non admite pegar co botón da barra de ferramentas ou coa opción do menú contextual.","pasteArea":"Zona de pegado","pasteMsg":"Pegue o contido dentro da área de abaixo e prema Aceptar."},"colorbutton":{"auto":"Automático","bgColorTitle":"Cor do fondo","colors":{"000":"Negro","800000":"Marrón escuro","8B4513":"Ocre","2F4F4F":"Pizarra escuro","008080":"Verde azulado","000080":"Azul mariño","4B0082":"Índigo","696969":"Gris escuro","B22222":"Ladrillo","A52A2A":"Marrón","DAA520":"Dourado escuro","006400":"Verde escuro","40E0D0":"Turquesa","0000CD":"Azul medio","800080":"Púrpura","808080":"Gris","F00":"Vermello","FF8C00":"Laranxa escuro","FFD700":"Dourado","008000":"Verde","0FF":"Cian","00F":"Azul","EE82EE":"Violeta","A9A9A9":"Gris medio","FFA07A":"Salmón claro","FFA500":"Laranxa","FFFF00":"Amarelo","00FF00":"Lima","AFEEEE":"Turquesa pálido","ADD8E6":"Azul claro","DDA0DD":"Violeta pálido","D3D3D3":"Verde claro","FFF0F5":"Lavanda vermello","FAEBD7":"Branco antigo","FFFFE0":"Amarelo claro","F0FFF0":"Mel","F0FFFF":"Azul celeste","F0F8FF":"Azul pálido","E6E6FA":"Lavanda","FFF":"Branco","1ABC9C":"Cian forte","2ECC71":"Esmeralda","3498DB":"Azul brillante","9B59B6":"Amatista","4E5F70":"Azul agrisado","F1C40F":"Amarelo vívido","16A085":"Cian escuro","27AE60":"Esmeralda escuro","2980B9":"Azul forte","8E44AD":"Violeta escuro","2C3E50":"Azul desaturado","F39C12":"Laranxa","E67E22":"Cenoria","E74C3C":"Vermello pálido","ECF0F1":"Plata brillante","95A5A6":"Cian agrisado claro","DDD":"Gris claro","D35400":"Cabaza","C0392B":"Vermello forte","BDC3C7":"Plata","7F8C8D":"Cian agrisado","999":"Gris escuro"},"more":"Máis cores...","panelTitle":"Cores","textColorTitle":"Cor do texto"},"colordialog":{"clear":"Limpar","highlight":"Resaltar","options":"Opcións de cor","selected":"Cor seleccionado","title":"Seleccione unha cor"},"templates":{"button":"Modelos","emptyListMsg":"(Non hai modelos definidos)","insertOption":"Substituír o contido actual","options":"Opcións de modelos","selectPromptMsg":"Seleccione o modelo a abrir no editor","title":"Modelos de contido"},"contextmenu":{"options":"Opcións do menú contextual"},"copyformatting":{"label":"Copy Formatting","notification":{"copied":"Formatting copied","applied":"Formatting applied","canceled":"Formatting canceled","failed":"Formatting failed. You cannot apply styles without copying them first."}},"div":{"IdInputLabel":"ID","advisoryTitleInputLabel":"Título informativo","cssClassInputLabel":"Clases da folla de estilos","edit":"Editar Div","inlineStyleInputLabel":"Estilo de liña","langDirLTRLabel":"Esquerda a dereita (LTR)","langDirLabel":"Dirección de escritura do idioma","langDirRTLLabel":"Dereita a esquerda (RTL)","languageCodeInputLabel":"Código do idioma","remove":"Retirar Div","styleSelectLabel":"Estilo","title":"Crear un contedor Div","toolbar":"Crear un contedor Div"},"elementspath":{"eleLabel":"Ruta dos elementos","eleTitle":"Elemento %1"},"filetools":{"loadError":"Produciuse un erro durante a lectura do ficheiro.","networkError":"Produciuse un erro na rede durante o envío do ficheiro.","httpError404":"Produciuse un erro HTTP durante o envío do ficheiro (404: Ficheiro non atopado).","httpError403":"Produciuse un erro HTTP durante o envío do ficheiro (403: Acceso denegado).","httpError":"Produciuse un erro HTTP durante o envío do ficheiro (erro de estado: %1).","noUrlError":"Non foi definido o URL para o envío.","responseError":"Resposta incorrecta do servidor."},"find":{"find":"Buscar","findOptions":"Buscar opcións","findWhat":"Texto a buscar:","matchCase":"Coincidir Mai./min.","matchCyclic":"Coincidencia cíclica","matchWord":"Coincidencia coa palabra completa","notFoundMsg":"Non se atopou o texto indicado.","replace":"Substituir","replaceAll":"Substituír todo","replaceSuccessMsg":"%1 concorrencia(s) substituída(s).","replaceWith":"Substituír con:","title":"Buscar e substituír"},"fakeobjects":{"anchor":"Ancoraxe","flash":"Animación «Flash»","hiddenfield":"Campo agochado","iframe":"IFrame","unknown":"Obxecto descoñecido"},"flash":{"access":"Acceso de scripts","accessAlways":"Sempre","accessNever":"Nunca","accessSameDomain":"Mesmo dominio","alignAbsBottom":"Abs Inferior","alignAbsMiddle":"Abs centro","alignBaseline":"Liña de base","alignTextTop":"Tope do texto","bgcolor":"Cor do fondo","chkFull":"Permitir pantalla completa","chkLoop":"Repetir","chkMenu":"Activar o menú do «Flash»","chkPlay":"Reprodución auomática","flashvars":"Opcións do «Flash»","hSpace":"Esp. Horiz.","properties":"Propiedades do «Flash»","propertiesTab":"Propiedades","quality":"Calidade","qualityAutoHigh":"Alta, automática","qualityAutoLow":"Baixa, automática","qualityBest":"A mellor","qualityHigh":"Alta","qualityLow":"Baixa","qualityMedium":"Media","scale":"Escalar","scaleAll":"Amosar todo","scaleFit":"Encaixar axustando","scaleNoBorder":"Sen bordo","title":"Propiedades do «Flash»","vSpace":"Esp.Vert.","validateHSpace":"O espazado horizontal debe ser un número.","validateSrc":"O URL non pode estar baleiro.","validateVSpace":"O espazado vertical debe ser un número.","windowMode":"Modo da xanela","windowModeOpaque":"Opaca","windowModeTransparent":"Transparente","windowModeWindow":"Xanela"},"font":{"fontSize":{"label":"Tamaño","voiceLabel":"Tamaño da letra","panelTitle":"Tamaño da letra"},"label":"Tipo de letra","panelTitle":"Nome do tipo de letra","voiceLabel":"Tipo de letra"},"forms":{"button":{"title":"Propiedades do botón","text":"Texto (Valor)","type":"Tipo","typeBtn":"Botón","typeSbm":"Enviar","typeRst":"Restabelever"},"checkboxAndRadio":{"checkboxTitle":"Propiedades da caixa de selección","radioTitle":"Propiedades do botón de opción","value":"Valor","selected":"Seleccionado","required":"Requirido"},"form":{"title":"Propiedades do formulario","menu":"Propiedades do formulario","action":"Acción","method":"Método","encoding":"Codificación"},"hidden":{"title":"Propiedades do campo agochado","name":"Nome","value":"Valor"},"select":{"title":"Propiedades do campo de selección","selectInfo":"Información","opAvail":"Opcións dispoñíbeis","value":"Valor","size":"Tamaño","lines":"liñas","chkMulti":"Permitir múltiplas seleccións","required":"Requirido","opText":"Texto","opValue":"Valor","btnAdd":"Engadir","btnModify":"Modificar","btnUp":"Subir","btnDown":"Baixar","btnSetValue":"Estabelecer como valor seleccionado","btnDelete":"Eliminar"},"textarea":{"title":"Propiedades da área de texto","cols":"Columnas","rows":"Filas"},"textfield":{"title":"Propiedades do campo de texto","name":"Nome","value":"Valor","charWidth":"Largo do carácter","maxChars":"Núm. máximo de caracteres","required":"Requirido","type":"Tipo","typeText":"Texto","typePass":"Contrasinal","typeEmail":"Correo","typeSearch":"Buscar","typeTel":"Número de teléfono","typeUrl":"URL"}},"format":{"label":"Formato","panelTitle":"Formato do parágrafo","tag_address":"Enderezo","tag_div":"Normal (DIV)","tag_h1":"Enacabezado 1","tag_h2":"Encabezado 2","tag_h3":"Encabezado 3","tag_h4":"Encabezado 4","tag_h5":"Encabezado 5","tag_h6":"Encabezado 6","tag_p":"Normal","tag_pre":"Formatado"},"horizontalrule":{"toolbar":"Inserir unha liña horizontal"},"iframe":{"border":"Amosar o bordo do marco","noUrl":"Escriba o enderezo do iframe","scrolling":"Activar as barras de desprazamento","title":"Propiedades do iFrame","toolbar":"IFrame"},"image":{"alt":"Texto alternativo","border":"Bordo","btnUpload":"Enviar ao servidor","button2Img":"Quere converter o botón da imaxe seleccionada nunha imaxe sinxela?","hSpace":"Esp.Horiz.","img2Button":"Quere converter a imaxe seleccionada nun botón de imaxe?","infoTab":"Información da imaxe","linkTab":"Ligazón","lockRatio":"Proporcional","menu":"Propiedades da imaxe","resetSize":"Tamaño orixinal","title":"Propiedades da imaxe","titleButton":"Propiedades do botón de imaxe","upload":"Cargar","urlMissing":"Non se atopa o URL da imaxe.","vSpace":"Esp.Vert.","validateBorder":"O bordo debe ser un número.","validateHSpace":"O espazado horizontal debe ser un número.","validateVSpace":"O espazado vertical debe ser un número."},"indent":{"indent":"Aumentar a sangría","outdent":"Reducir a sangría"},"smiley":{"options":"Opcións de emoticonas","title":"Inserir unha emoticona","toolbar":"Emoticona"},"language":{"button":"Estabelezer o idioma","remove":"Retirar o idioma"},"link":{"acccessKey":"Chave de acceso","advanced":"Avanzado","advisoryContentType":"Tipo de contido informativo","advisoryTitle":"Título","anchor":{"toolbar":"Ancoraxe","menu":"Editar a ancoraxe","title":"Propiedades da ancoraxe","name":"Nome da ancoraxe","errorName":"Escriba o nome da ancoraxe","remove":"Retirar a ancoraxe"},"anchorId":"Polo ID do elemento","anchorName":"Polo nome da ancoraxe","charset":"Codificación do recurso ligado","cssClasses":"Clases da folla de estilos","download":"Forzar a descarga","displayText":"Amosar o texto","emailAddress":"Enderezo de correo","emailBody":"Corpo da mensaxe","emailSubject":"Asunto da mensaxe","id":"ID","info":"Información da ligazón","langCode":"Código do idioma","langDir":"Dirección de escritura do idioma","langDirLTR":"Esquerda a dereita (LTR)","langDirRTL":"Dereita a esquerda (RTL)","menu":"Editar a ligazón","name":"Nome","noAnchors":"(Non hai ancoraxes dispoñíbeis no documento)","noEmail":"Escriba o enderezo de correo","noUrl":"Escriba a ligazón URL","noTel":"Escriba o número de teléfono","other":"<other>","phoneNumber":"Número de teléfono","popupDependent":"Dependente (Netscape)","popupFeatures":"Características da xanela emerxente","popupFullScreen":"Pantalla completa (IE)","popupLeft":"Posición esquerda","popupLocationBar":"Barra de localización","popupMenuBar":"Barra do menú","popupResizable":"Redimensionábel","popupScrollBars":"Barras de desprazamento","popupStatusBar":"Barra de estado","popupToolbar":"Barra de ferramentas","popupTop":"Posición superior","rel":"Relación","selectAnchor":"Seleccionar unha ancoraxe","styles":"Estilo","tabIndex":"Índice de tabulación","target":"Destino","targetFrame":"<marco>","targetFrameName":"Nome do marco de destino","targetPopup":"<xanela emerxente>","targetPopupName":"Nome da xanela emerxente","title":"Ligazón","toAnchor":"Ligar coa ancoraxe no testo","toEmail":"Correo","toUrl":"URL","toPhone":"Teléfono","toolbar":"Ligazón","type":"Tipo de ligazón","unlink":"Eliminar a ligazón","upload":"Enviar"},"list":{"bulletedlist":"Inserir/retirar lista viñeteada","numberedlist":"Inserir/retirar lista numerada"},"liststyle":{"bulletedTitle":"Propiedades da lista viñeteada","circle":"Circulo","decimal":"Decimal (1, 2, 3, etc.)","disc":"Disc","lowerAlpha":"Alfabeto en minúsculas (a, b, c, d, e, etc.)","lowerRoman":"Números romanos en minúsculas (i, ii, iii, iv, v, etc.)","none":"Ningún","notset":"<sen estabelecer>","numberedTitle":"Propiedades da lista numerada","square":"Cadrado","start":"Inicio","type":"Tipo","upperAlpha":"Alfabeto en maiúsculas (A, B, C, D, E, etc.)","upperRoman":"Números romanos en maiúsculas (I, II, III, IV, V, etc.)","validateStartNumber":"O número de inicio da lista debe ser un número enteiro."},"magicline":{"title":"Inserir aquí o parágrafo"},"maximize":{"maximize":"Maximizar","minimize":"Minimizar"},"newpage":{"toolbar":"Páxina nova"},"pagebreak":{"alt":"Quebra de páxina","toolbar":"Inserir quebra de páxina"},"pastetext":{"button":"Pegar como texto simple","pasteNotification":"Prema %1 para pegar. O seu navegador non admite pegar co botón da barra de ferramentas ou coa opción do menú contextual.","title":"Pegar como texto simple"},"pastefromword":{"confirmCleanup":"O texto que quere pegar semella ser copiado desde o Word. Quere depuralo antes de pegalo?","error":"Non foi posíbel depurar os datos pegados por mor dun erro interno","title":"Pegar desde Word","toolbar":"Pegar desde Word"},"preview":{"preview":"Vista previa"},"print":{"toolbar":"Imprimir"},"removeformat":{"toolbar":"Retirar o formato"},"save":{"toolbar":"Gardar"},"selectall":{"toolbar":"Seleccionar todo"},"showblocks":{"toolbar":"Amosar os bloques"},"sourcearea":{"toolbar":"Orixe"},"specialchar":{"options":"Opcións de caracteres especiais","title":"Seleccione un carácter especial","toolbar":"Inserir un carácter especial"},"scayt":{"btn_about":"About SCAYT","btn_dictionaries":"Dictionaries","btn_disable":"Disable SCAYT","btn_enable":"Enable SCAYT","btn_langs":"Languages","btn_options":"Options","text_title":"Spell Check As You Type"},"stylescombo":{"label":"Estilos","panelTitle":"Estilos de formatando","panelTitle1":"Estilos de bloque","panelTitle2":"Estilos de liña","panelTitle3":"Estilos de obxecto"},"table":{"border":"Tamaño do bordo","caption":"Título","cell":{"menu":"Cela","insertBefore":"Inserir a cela á esquerda","insertAfter":"Inserir a cela á dereita","deleteCell":"Eliminar celas","merge":"Combinar celas","mergeRight":"Combinar á dereita","mergeDown":"Combinar cara abaixo","splitHorizontal":"Dividir a cela en horizontal","splitVertical":"Dividir a cela en vertical","title":"Propiedades da cela","cellType":"Tipo de cela","rowSpan":"Expandir filas","colSpan":"Expandir columnas","wordWrap":"Axustar ao contido","hAlign":"Aliñación horizontal","vAlign":"Aliñación vertical","alignBaseline":"Liña de base","bgColor":"Cor do fondo","borderColor":"Cor do bordo","data":"Datos","header":"Cabeceira","yes":"Si","no":"Non","invalidWidth":"O largo da cela debe ser un número.","invalidHeight":"O alto da cela debe ser un número.","invalidRowSpan":"A expansión de filas debe ser un número enteiro.","invalidColSpan":"A expansión de columnas debe ser un número enteiro.","chooseColor":"Escoller"},"cellPad":"Marxe interior da cela","cellSpace":"Marxe entre celas","column":{"menu":"Columna","insertBefore":"Inserir a columna á esquerda","insertAfter":"Inserir a columna á dereita","deleteColumn":"Borrar Columnas"},"columns":"Columnas","deleteTable":"Borrar Táboa","headers":"Cabeceiras","headersBoth":"Ambas","headersColumn":"Primeira columna","headersNone":"Ningún","headersRow":"Primeira fila","heightUnit":"height unit","invalidBorder":"O tamaño do bordo debe ser un número.","invalidCellPadding":"A marxe interior debe ser un número positivo.","invalidCellSpacing":"A marxe entre celas debe ser un número positivo.","invalidCols":"O número de columnas debe ser un número maior que 0.","invalidHeight":"O alto da táboa debe ser un número.","invalidRows":"O número de filas debe ser un número maior que 0","invalidWidth":"O largo da táboa debe ser un número.","menu":"Propiedades da táboa","row":{"menu":"Fila","insertBefore":"Inserir a fila por riba","insertAfter":"Inserir a fila por baixo","deleteRow":"Eliminar filas"},"rows":"Filas","summary":"Resumo","title":"Propiedades da táboa","toolbar":"Taboa","widthPc":"porcentaxe","widthPx":"píxeles","widthUnit":"unidade do largo"},"undo":{"redo":"Refacer","undo":"Desfacer"},"widget":{"move":"Prema e arrastre para mover","label":"Trebello %1"},"uploadwidget":{"abort":"Envío interrompido polo usuario.","doneOne":"Ficheiro enviado satisfactoriamente.","doneMany":"%1 ficheiros enviados satisfactoriamente.","uploadOne":"Enviando o ficheiro ({percentage}%)...","uploadMany":"Enviando ficheiros, {current} de {max} feito o ({percentage}%)..."},"wsc":{"btnIgnore":"Ignorar","btnIgnoreAll":"Ignorar Todas","btnReplace":"Substituir","btnReplaceAll":"Substituir Todas","btnUndo":"Desfacer","changeTo":"Cambiar a","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"O corrector ortográfico non está instalado. ¿Quere descargalo agora?","manyChanges":"Corrección ortográfica rematada: %1 verbas substituidas","noChanges":"Corrección ortográfica rematada: Non se substituiu nengunha verba","noMispell":"Corrección ortográfica rematada: Non se atoparon erros","noSuggestions":"- Sen candidatos -","notAvailable":"Sorry, but service is unavailable now.","notInDic":"Non está no diccionario","oneChange":"Corrección ortográfica rematada: Unha verba substituida","progress":"Corrección ortográfica en progreso...","title":"Spell Checker","toolbar":"Corrección Ortográfica"}}; | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ace/mode-curly.js | ace.define("ace/mode/doc_comment_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var DocCommentHighlightRules = function() {
this.$rules = {
"start" : [ {
token : "comment.doc.tag",
regex : "@[\\w\\d_]+" // TODO: fix email addresses
}, {
token : "comment.doc.tag",
regex : "\\bTODO\\b"
}, {
defaultToken : "comment.doc"
}]
};
};
oop.inherits(DocCommentHighlightRules, TextHighlightRules);
DocCommentHighlightRules.getStartRule = function(start) {
return {
token : "comment.doc", // doc comment
regex : "\\/\\*(?=\\*)",
next : start
};
};
DocCommentHighlightRules.getEndRule = function (start) {
return {
token : "comment.doc", // closing comment
regex : "\\*\\/",
next : start
};
};
exports.DocCommentHighlightRules = DocCommentHighlightRules;
});
ace.define("ace/mode/javascript_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/doc_comment_highlight_rules","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var DocCommentHighlightRules = require("./doc_comment_highlight_rules").DocCommentHighlightRules;
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var JavaScriptHighlightRules = function() {
var keywordMapper = this.createKeywordMapper({
"variable.language":
"Array|Boolean|Date|Function|Iterator|Number|Object|RegExp|String|Proxy|" + // Constructors
"Namespace|QName|XML|XMLList|" + // E4X
"ArrayBuffer|Float32Array|Float64Array|Int16Array|Int32Array|Int8Array|" +
"Uint16Array|Uint32Array|Uint8Array|Uint8ClampedArray|" +
"Error|EvalError|InternalError|RangeError|ReferenceError|StopIteration|" + // Errors
"SyntaxError|TypeError|URIError|" +
"decodeURI|decodeURIComponent|encodeURI|encodeURIComponent|eval|isFinite|" + // Non-constructor functions
"isNaN|parseFloat|parseInt|" +
"JSON|Math|" + // Other
"this|arguments|prototype|window|document" , // Pseudo
"keyword":
"const|yield|import|get|set|" +
"break|case|catch|continue|default|delete|do|else|finally|for|function|" +
"if|in|instanceof|new|return|switch|throw|try|typeof|let|var|while|with|debugger|" +
"__parent__|__count__|escape|unescape|with|__proto__|" +
"class|enum|extends|super|export|implements|private|public|interface|package|protected|static",
"storage.type":
"const|let|var|function",
"constant.language":
"null|Infinity|NaN|undefined",
"support.function":
"alert",
"constant.language.boolean": "true|false"
}, "identifier");
var kwBeforeRe = "case|do|else|finally|in|instanceof|return|throw|try|typeof|yield|void";
var identifierRe = "[a-zA-Z\\$_\u00a1-\uffff][a-zA-Z\\d\\$_\u00a1-\uffff]*\\b";
var escapedRe = "\\\\(?:x[0-9a-fA-F]{2}|" + // hex
"u[0-9a-fA-F]{4}|" + // unicode
"[0-2][0-7]{0,2}|" + // oct
"3[0-6][0-7]?|" + // oct
"37[0-7]?|" + // oct
"[4-7][0-7]?|" + //oct
".)";
this.$rules = {
"no_regex" : [
{
token : "comment",
regex : "\\/\\/",
next : "line_comment"
},
DocCommentHighlightRules.getStartRule("doc-start"),
{
token : "comment", // multi line comment
regex : /\/\*/,
next : "comment"
}, {
token : "string",
regex : "'(?=.)",
next : "qstring"
}, {
token : "string",
regex : '"(?=.)',
next : "qqstring"
}, {
token : "constant.numeric", // hex
regex : /0[xX][0-9a-fA-F]+\b/
}, {
token : "constant.numeric", // float
regex : /[+-]?\d+(?:(?:\.\d*)?(?:[eE][+-]?\d+)?)?\b/
}, {
token : [
"storage.type", "punctuation.operator", "support.function",
"punctuation.operator", "entity.name.function", "text","keyword.operator"
],
regex : "(" + identifierRe + ")(\\.)(prototype)(\\.)(" + identifierRe +")(\\s*)(=)",
next: "function_arguments"
}, {
token : [
"storage.type", "punctuation.operator", "entity.name.function", "text",
"keyword.operator", "text", "storage.type", "text", "paren.lparen"
],
regex : "(" + identifierRe + ")(\\.)(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"entity.name.function", "text", "keyword.operator", "text", "storage.type",
"text", "paren.lparen"
],
regex : "(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"storage.type", "punctuation.operator", "entity.name.function", "text",
"keyword.operator", "text",
"storage.type", "text", "entity.name.function", "text", "paren.lparen"
],
regex : "(" + identifierRe + ")(\\.)(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s+)(\\w+)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"storage.type", "text", "entity.name.function", "text", "paren.lparen"
],
regex : "(function)(\\s+)(" + identifierRe + ")(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"entity.name.function", "text", "punctuation.operator",
"text", "storage.type", "text", "paren.lparen"
],
regex : "(" + identifierRe + ")(\\s*)(:)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"text", "text", "storage.type", "text", "paren.lparen"
],
regex : "(:)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : "keyword",
regex : "(?:" + kwBeforeRe + ")\\b",
next : "start"
}, {
token : ["punctuation.operator", "support.function"],
regex : /(\.)(s(?:h(?:ift|ow(?:Mod(?:elessDialog|alDialog)|Help))|croll(?:X|By(?:Pages|Lines)?|Y|To)?|t(?:op|rike)|i(?:n|zeToContent|debar|gnText)|ort|u(?:p|b(?:str(?:ing)?)?)|pli(?:ce|t)|e(?:nd|t(?:Re(?:sizable|questHeader)|M(?:i(?:nutes|lliseconds)|onth)|Seconds|Ho(?:tKeys|urs)|Year|Cursor|Time(?:out)?|Interval|ZOptions|Date|UTC(?:M(?:i(?:nutes|lliseconds)|onth)|Seconds|Hours|Date|FullYear)|FullYear|Active)|arch)|qrt|lice|avePreferences|mall)|h(?:ome|andleEvent)|navigate|c(?:har(?:CodeAt|At)|o(?:s|n(?:cat|textual|firm)|mpile)|eil|lear(?:Timeout|Interval)?|a(?:ptureEvents|ll)|reate(?:StyleSheet|Popup|EventObject))|t(?:o(?:GMTString|S(?:tring|ource)|U(?:TCString|pperCase)|Lo(?:caleString|werCase))|est|a(?:n|int(?:Enabled)?))|i(?:s(?:NaN|Finite)|ndexOf|talics)|d(?:isableExternalCapture|ump|etachEvent)|u(?:n(?:shift|taint|escape|watch)|pdateCommands)|j(?:oin|avaEnabled)|p(?:o(?:p|w)|ush|lugins.refresh|a(?:ddings|rse(?:Int|Float)?)|r(?:int|ompt|eference))|e(?:scape|nableExternalCapture|val|lementFromPoint|x(?:p|ec(?:Script|Command)?))|valueOf|UTC|queryCommand(?:State|Indeterm|Enabled|Value)|f(?:i(?:nd|le(?:ModifiedDate|Size|CreatedDate|UpdatedDate)|xed)|o(?:nt(?:size|color)|rward)|loor|romCharCode)|watch|l(?:ink|o(?:ad|g)|astIndexOf)|a(?:sin|nchor|cos|t(?:tachEvent|ob|an(?:2)?)|pply|lert|b(?:s|ort))|r(?:ou(?:nd|teEvents)|e(?:size(?:By|To)|calc|turnValue|place|verse|l(?:oad|ease(?:Capture|Events)))|andom)|g(?:o|et(?:ResponseHeader|M(?:i(?:nutes|lliseconds)|onth)|Se(?:conds|lection)|Hours|Year|Time(?:zoneOffset)?|Da(?:y|te)|UTC(?:M(?:i(?:nutes|lliseconds)|onth)|Seconds|Hours|Da(?:y|te)|FullYear)|FullYear|A(?:ttention|llResponseHeaders)))|m(?:in|ove(?:B(?:y|elow)|To(?:Absolute)?|Above)|ergeAttributes|a(?:tch|rgins|x))|b(?:toa|ig|o(?:ld|rderWidths)|link|ack))\b(?=\()/
}, {
token : ["punctuation.operator", "support.function.dom"],
regex : /(\.)(s(?:ub(?:stringData|mit)|plitText|e(?:t(?:NamedItem|Attribute(?:Node)?)|lect))|has(?:ChildNodes|Feature)|namedItem|c(?:l(?:ick|o(?:se|neNode))|reate(?:C(?:omment|DATASection|aption)|T(?:Head|extNode|Foot)|DocumentFragment|ProcessingInstruction|E(?:ntityReference|lement)|Attribute))|tabIndex|i(?:nsert(?:Row|Before|Cell|Data)|tem)|open|delete(?:Row|C(?:ell|aption)|T(?:Head|Foot)|Data)|focus|write(?:ln)?|a(?:dd|ppend(?:Child|Data))|re(?:set|place(?:Child|Data)|move(?:NamedItem|Child|Attribute(?:Node)?)?)|get(?:NamedItem|Element(?:sBy(?:Name|TagName)|ById)|Attribute(?:Node)?)|blur)\b(?=\()/
}, {
token : ["punctuation.operator", "support.constant"],
regex : /(\.)(s(?:ystemLanguage|cr(?:ipts|ollbars|een(?:X|Y|Top|Left))|t(?:yle(?:Sheets)?|atus(?:Text|bar)?)|ibling(?:Below|Above)|ource|uffixes|e(?:curity(?:Policy)?|l(?:ection|f)))|h(?:istory|ost(?:name)?|as(?:h|Focus))|y|X(?:MLDocument|SLDocument)|n(?:ext|ame(?:space(?:s|URI)|Prop))|M(?:IN_VALUE|AX_VALUE)|c(?:haracterSet|o(?:n(?:structor|trollers)|okieEnabled|lorDepth|mp(?:onents|lete))|urrent|puClass|l(?:i(?:p(?:boardData)?|entInformation)|osed|asses)|alle(?:e|r)|rypto)|t(?:o(?:olbar|p)|ext(?:Transform|Indent|Decoration|Align)|ags)|SQRT(?:1_2|2)|i(?:n(?:ner(?:Height|Width)|put)|ds|gnoreCase)|zIndex|o(?:scpu|n(?:readystatechange|Line)|uter(?:Height|Width)|p(?:sProfile|ener)|ffscreenBuffering)|NEGATIVE_INFINITY|d(?:i(?:splay|alog(?:Height|Top|Width|Left|Arguments)|rectories)|e(?:scription|fault(?:Status|Ch(?:ecked|arset)|View)))|u(?:ser(?:Profile|Language|Agent)|n(?:iqueID|defined)|pdateInterval)|_content|p(?:ixelDepth|ort|ersonalbar|kcs11|l(?:ugins|atform)|a(?:thname|dding(?:Right|Bottom|Top|Left)|rent(?:Window|Layer)?|ge(?:X(?:Offset)?|Y(?:Offset)?))|r(?:o(?:to(?:col|type)|duct(?:Sub)?|mpter)|e(?:vious|fix)))|e(?:n(?:coding|abledPlugin)|x(?:ternal|pando)|mbeds)|v(?:isibility|endor(?:Sub)?|Linkcolor)|URLUnencoded|P(?:I|OSITIVE_INFINITY)|f(?:ilename|o(?:nt(?:Size|Family|Weight)|rmName)|rame(?:s|Element)|gColor)|E|whiteSpace|l(?:i(?:stStyleType|n(?:eHeight|kColor))|o(?:ca(?:tion(?:bar)?|lName)|wsrc)|e(?:ngth|ft(?:Context)?)|a(?:st(?:M(?:odified|atch)|Index|Paren)|yer(?:s|X)|nguage))|a(?:pp(?:MinorVersion|Name|Co(?:deName|re)|Version)|vail(?:Height|Top|Width|Left)|ll|r(?:ity|guments)|Linkcolor|bove)|r(?:ight(?:Context)?|e(?:sponse(?:XML|Text)|adyState))|global|x|m(?:imeTypes|ultiline|enubar|argin(?:Right|Bottom|Top|Left))|L(?:N(?:10|2)|OG(?:10E|2E))|b(?:o(?:ttom|rder(?:Width|RightWidth|BottomWidth|Style|Color|TopWidth|LeftWidth))|ufferDepth|elow|ackground(?:Color|Image)))\b/
}, {
token : ["support.constant"],
regex : /that\b/
}, {
token : ["storage.type", "punctuation.operator", "support.function.firebug"],
regex : /(console)(\.)(warn|info|log|error|time|trace|timeEnd|assert)\b/
}, {
token : keywordMapper,
regex : identifierRe
}, {
token : "keyword.operator",
regex : /--|\+\+|[!$%&*+\-~]|===|==|=|!=|!==|<=|>=|<<=|>>=|>>>=|<>|<|>|!|&&|\|\||\?\:|\*=|%=|\+=|\-=|&=|\^=/,
next : "start"
}, {
token : "punctuation.operator",
regex : /\?|\:|\,|\;|\./,
next : "start"
}, {
token : "paren.lparen",
regex : /[\[({]/,
next : "start"
}, {
token : "paren.rparen",
regex : /[\])}]/
}, {
token : "keyword.operator",
regex : /\/=?/,
next : "start"
}, {
token: "comment",
regex: /^#!.*$/
}
],
"start": [
DocCommentHighlightRules.getStartRule("doc-start"),
{
token : "comment", // multi line comment
regex : "\\/\\*",
next : "comment_regex_allowed"
}, {
token : "comment",
regex : "\\/\\/",
next : "line_comment_regex_allowed"
}, {
token: "string.regexp",
regex: "\\/",
next: "regex"
}, {
token : "text",
regex : "\\s+|^$",
next : "start"
}, {
token: "empty",
regex: "",
next: "no_regex"
}
],
"regex": [
{
token: "regexp.keyword.operator",
regex: "\\\\(?:u[\\da-fA-F]{4}|x[\\da-fA-F]{2}|.)"
}, {
token: "string.regexp",
regex: "/[sxngimy]*",
next: "no_regex"
}, {
token : "invalid",
regex: /\{\d+\b,?\d*\}[+*]|[+*$^?][+*]|[$^][?]|\?{3,}/
}, {
token : "constant.language.escape",
regex: /\(\?[:=!]|\)|\{\d+\b,?\d*\}|[+*]\?|[()$^+*?.]/
}, {
token : "constant.language.delimiter",
regex: /\|/
}, {
token: "constant.language.escape",
regex: /\[\^?/,
next: "regex_character_class"
}, {
token: "empty",
regex: "$",
next: "no_regex"
}, {
defaultToken: "string.regexp"
}
],
"regex_character_class": [
{
token: "regexp.keyword.operator",
regex: "\\\\(?:u[\\da-fA-F]{4}|x[\\da-fA-F]{2}|.)"
}, {
token: "constant.language.escape",
regex: "]",
next: "regex"
}, {
token: "constant.language.escape",
regex: "-"
}, {
token: "empty",
regex: "$",
next: "no_regex"
}, {
defaultToken: "string.regexp.charachterclass"
}
],
"function_arguments": [
{
token: "variable.parameter",
regex: identifierRe
}, {
token: "punctuation.operator",
regex: "[, ]+"
}, {
token: "punctuation.operator",
regex: "$"
}, {
token: "empty",
regex: "",
next: "no_regex"
}
],
"comment_regex_allowed" : [
{token : "comment", regex : "\\*\\/", next : "start"},
{defaultToken : "comment"}
],
"comment" : [
{token : "comment", regex : "\\*\\/", next : "no_regex"},
{defaultToken : "comment"}
],
"line_comment_regex_allowed" : [
{token : "comment", regex : "$|^", next : "start"},
{defaultToken : "comment"}
],
"line_comment" : [
{token : "comment", regex : "$|^", next : "no_regex"},
{defaultToken : "comment"}
],
"qqstring" : [
{
token : "constant.language.escape",
regex : escapedRe
}, {
token : "string",
regex : "\\\\$",
next : "qqstring"
}, {
token : "string",
regex : '"|$',
next : "no_regex"
}, {
defaultToken: "string"
}
],
"qstring" : [
{
token : "constant.language.escape",
regex : escapedRe
}, {
token : "string",
regex : "\\\\$",
next : "qstring"
}, {
token : "string",
regex : "'|$",
next : "no_regex"
}, {
defaultToken: "string"
}
]
};
this.embedRules(DocCommentHighlightRules, "doc-",
[ DocCommentHighlightRules.getEndRule("no_regex") ]);
};
oop.inherits(JavaScriptHighlightRules, TextHighlightRules);
exports.JavaScriptHighlightRules = JavaScriptHighlightRules;
});
ace.define("ace/mode/matching_brace_outdent",["require","exports","module","ace/range"], function(require, exports, module) {
"use strict";
var Range = require("../range").Range;
var MatchingBraceOutdent = function() {};
(function() {
this.checkOutdent = function(line, input) {
if (! /^\s+$/.test(line))
return false;
return /^\s*\}/.test(input);
};
this.autoOutdent = function(doc, row) {
var line = doc.getLine(row);
var match = line.match(/^(\s*\})/);
if (!match) return 0;
var column = match[1].length;
var openBracePos = doc.findMatchingBracket({row: row, column: column});
if (!openBracePos || openBracePos.row == row) return 0;
var indent = this.$getIndent(doc.getLine(openBracePos.row));
doc.replace(new Range(row, 0, row, column-1), indent);
};
this.$getIndent = function(line) {
return line.match(/^\s*/)[0];
};
}).call(MatchingBraceOutdent.prototype);
exports.MatchingBraceOutdent = MatchingBraceOutdent;
});
ace.define("ace/mode/behaviour/cstyle",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/token_iterator","ace/lib/lang"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Behaviour = require("../behaviour").Behaviour;
var TokenIterator = require("../../token_iterator").TokenIterator;
var lang = require("../../lib/lang");
var SAFE_INSERT_IN_TOKENS =
["text", "paren.rparen", "punctuation.operator"];
var SAFE_INSERT_BEFORE_TOKENS =
["text", "paren.rparen", "punctuation.operator", "comment"];
var context;
var contextCache = {}
var initContext = function(editor) {
var id = -1;
if (editor.multiSelect) {
id = editor.selection.id;
if (contextCache.rangeCount != editor.multiSelect.rangeCount)
contextCache = {rangeCount: editor.multiSelect.rangeCount};
}
if (contextCache[id])
return context = contextCache[id];
context = contextCache[id] = {
autoInsertedBrackets: 0,
autoInsertedRow: -1,
autoInsertedLineEnd: "",
maybeInsertedBrackets: 0,
maybeInsertedRow: -1,
maybeInsertedLineStart: "",
maybeInsertedLineEnd: ""
};
};
var CstyleBehaviour = function() {
this.add("braces", "insertion", function(state, action, editor, session, text) {
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
if (text == '{') {
initContext(editor);
var selection = editor.getSelectionRange();
var selected = session.doc.getTextRange(selection);
if (selected !== "" && selected !== "{" && editor.getWrapBehavioursEnabled()) {
return {
text: '{' + selected + '}',
selection: false
};
} else if (CstyleBehaviour.isSaneInsertion(editor, session)) {
if (/[\]\}\)]/.test(line[cursor.column]) || editor.inMultiSelectMode) {
CstyleBehaviour.recordAutoInsert(editor, session, "}");
return {
text: '{}',
selection: [1, 1]
};
} else {
CstyleBehaviour.recordMaybeInsert(editor, session, "{");
return {
text: '{',
selection: [1, 1]
};
}
}
} else if (text == '}') {
initContext(editor);
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar == '}') {
var matching = session.$findOpeningBracket('}', {column: cursor.column + 1, row: cursor.row});
if (matching !== null && CstyleBehaviour.isAutoInsertedClosing(cursor, line, text)) {
CstyleBehaviour.popAutoInsertedClosing();
return {
text: '',
selection: [1, 1]
};
}
}
} else if (text == "\n" || text == "\r\n") {
initContext(editor);
var closing = "";
if (CstyleBehaviour.isMaybeInsertedClosing(cursor, line)) {
closing = lang.stringRepeat("}", context.maybeInsertedBrackets);
CstyleBehaviour.clearMaybeInsertedClosing();
}
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar === '}') {
var openBracePos = session.findMatchingBracket({row: cursor.row, column: cursor.column+1}, '}');
if (!openBracePos)
return null;
var next_indent = this.$getIndent(session.getLine(openBracePos.row));
} else if (closing) {
var next_indent = this.$getIndent(line);
} else {
CstyleBehaviour.clearMaybeInsertedClosing();
return;
}
var indent = next_indent + session.getTabString();
return {
text: '\n' + indent + '\n' + next_indent + closing,
selection: [1, indent.length, 1, indent.length]
};
} else {
CstyleBehaviour.clearMaybeInsertedClosing();
}
});
this.add("braces", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && selected == '{') {
initContext(editor);
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.end.column, range.end.column + 1);
if (rightChar == '}') {
range.end.column++;
return range;
} else {
context.maybeInsertedBrackets--;
}
}
});
this.add("parens", "insertion", function(state, action, editor, session, text) {
if (text == '(') {
initContext(editor);
var selection = editor.getSelectionRange();
var selected = session.doc.getTextRange(selection);
if (selected !== "" && editor.getWrapBehavioursEnabled()) {
return {
text: '(' + selected + ')',
selection: false
};
} else if (CstyleBehaviour.isSaneInsertion(editor, session)) {
CstyleBehaviour.recordAutoInsert(editor, session, ")");
return {
text: '()',
selection: [1, 1]
};
}
} else if (text == ')') {
initContext(editor);
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar == ')') {
var matching = session.$findOpeningBracket(')', {column: cursor.column + 1, row: cursor.row});
if (matching !== null && CstyleBehaviour.isAutoInsertedClosing(cursor, line, text)) {
CstyleBehaviour.popAutoInsertedClosing();
return {
text: '',
selection: [1, 1]
};
}
}
}
});
this.add("parens", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && selected == '(') {
initContext(editor);
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.start.column + 1, range.start.column + 2);
if (rightChar == ')') {
range.end.column++;
return range;
}
}
});
this.add("brackets", "insertion", function(state, action, editor, session, text) {
if (text == '[') {
initContext(editor);
var selection = editor.getSelectionRange();
var selected = session.doc.getTextRange(selection);
if (selected !== "" && editor.getWrapBehavioursEnabled()) {
return {
text: '[' + selected + ']',
selection: false
};
} else if (CstyleBehaviour.isSaneInsertion(editor, session)) {
CstyleBehaviour.recordAutoInsert(editor, session, "]");
return {
text: '[]',
selection: [1, 1]
};
}
} else if (text == ']') {
initContext(editor);
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar == ']') {
var matching = session.$findOpeningBracket(']', {column: cursor.column + 1, row: cursor.row});
if (matching !== null && CstyleBehaviour.isAutoInsertedClosing(cursor, line, text)) {
CstyleBehaviour.popAutoInsertedClosing();
return {
text: '',
selection: [1, 1]
};
}
}
}
});
this.add("brackets", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && selected == '[') {
initContext(editor);
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.start.column + 1, range.start.column + 2);
if (rightChar == ']') {
range.end.column++;
return range;
}
}
});
this.add("string_dquotes", "insertion", function(state, action, editor, session, text) {
if (text == '"' || text == "'") {
initContext(editor);
var quote = text;
var selection = editor.getSelectionRange();
var selected = session.doc.getTextRange(selection);
if (selected !== "" && selected !== "'" && selected != '"' && editor.getWrapBehavioursEnabled()) {
return {
text: quote + selected + quote,
selection: false
};
} else {
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var leftChar = line.substring(cursor.column-1, cursor.column);
if (leftChar == '\\') {
return null;
}
var tokens = session.getTokens(selection.start.row);
var col = 0, token;
var quotepos = -1; // Track whether we're inside an open quote.
for (var x = 0; x < tokens.length; x++) {
token = tokens[x];
if (token.type == "string") {
quotepos = -1;
} else if (quotepos < 0) {
quotepos = token.value.indexOf(quote);
}
if ((token.value.length + col) > selection.start.column) {
break;
}
col += tokens[x].value.length;
}
if (!token || (quotepos < 0 && token.type !== "comment" && (token.type !== "string" || ((selection.start.column !== token.value.length+col-1) && token.value.lastIndexOf(quote) === token.value.length-1)))) {
if (!CstyleBehaviour.isSaneInsertion(editor, session))
return;
return {
text: quote + quote,
selection: [1,1]
};
} else if (token && token.type === "string") {
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar == quote) {
return {
text: '',
selection: [1, 1]
};
}
}
}
}
});
this.add("string_dquotes", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && (selected == '"' || selected == "'")) {
initContext(editor);
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.start.column + 1, range.start.column + 2);
if (rightChar == selected) {
range.end.column++;
return range;
}
}
});
};
CstyleBehaviour.isSaneInsertion = function(editor, session) {
var cursor = editor.getCursorPosition();
var iterator = new TokenIterator(session, cursor.row, cursor.column);
if (!this.$matchTokenType(iterator.getCurrentToken() || "text", SAFE_INSERT_IN_TOKENS)) {
var iterator2 = new TokenIterator(session, cursor.row, cursor.column + 1);
if (!this.$matchTokenType(iterator2.getCurrentToken() || "text", SAFE_INSERT_IN_TOKENS))
return false;
}
iterator.stepForward();
return iterator.getCurrentTokenRow() !== cursor.row ||
this.$matchTokenType(iterator.getCurrentToken() || "text", SAFE_INSERT_BEFORE_TOKENS);
};
CstyleBehaviour.$matchTokenType = function(token, types) {
return types.indexOf(token.type || token) > -1;
};
CstyleBehaviour.recordAutoInsert = function(editor, session, bracket) {
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
if (!this.isAutoInsertedClosing(cursor, line, context.autoInsertedLineEnd[0]))
context.autoInsertedBrackets = 0;
context.autoInsertedRow = cursor.row;
context.autoInsertedLineEnd = bracket + line.substr(cursor.column);
context.autoInsertedBrackets++;
};
CstyleBehaviour.recordMaybeInsert = function(editor, session, bracket) {
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
if (!this.isMaybeInsertedClosing(cursor, line))
context.maybeInsertedBrackets = 0;
context.maybeInsertedRow = cursor.row;
context.maybeInsertedLineStart = line.substr(0, cursor.column) + bracket;
context.maybeInsertedLineEnd = line.substr(cursor.column);
context.maybeInsertedBrackets++;
};
CstyleBehaviour.isAutoInsertedClosing = function(cursor, line, bracket) {
return context.autoInsertedBrackets > 0 &&
cursor.row === context.autoInsertedRow &&
bracket === context.autoInsertedLineEnd[0] &&
line.substr(cursor.column) === context.autoInsertedLineEnd;
};
CstyleBehaviour.isMaybeInsertedClosing = function(cursor, line) {
return context.maybeInsertedBrackets > 0 &&
cursor.row === context.maybeInsertedRow &&
line.substr(cursor.column) === context.maybeInsertedLineEnd &&
line.substr(0, cursor.column) == context.maybeInsertedLineStart;
};
CstyleBehaviour.popAutoInsertedClosing = function() {
context.autoInsertedLineEnd = context.autoInsertedLineEnd.substr(1);
context.autoInsertedBrackets--;
};
CstyleBehaviour.clearMaybeInsertedClosing = function() {
if (context) {
context.maybeInsertedBrackets = 0;
context.maybeInsertedRow = -1;
}
};
oop.inherits(CstyleBehaviour, Behaviour);
exports.CstyleBehaviour = CstyleBehaviour;
});
ace.define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Range = require("../../range").Range;
var BaseFoldMode = require("./fold_mode").FoldMode;
var FoldMode = exports.FoldMode = function(commentRegex) {
if (commentRegex) {
this.foldingStartMarker = new RegExp(
this.foldingStartMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.start)
);
this.foldingStopMarker = new RegExp(
this.foldingStopMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.end)
);
}
};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.foldingStartMarker = /(\{|\[)[^\}\]]*$|^\s*(\/\*)/;
this.foldingStopMarker = /^[^\[\{]*(\}|\])|^[\s\*]*(\*\/)/;
this.getFoldWidgetRange = function(session, foldStyle, row, forceMultiline) {
var line = session.getLine(row);
var match = line.match(this.foldingStartMarker);
if (match) {
var i = match.index;
if (match[1])
return this.openingBracketBlock(session, match[1], row, i);
var range = session.getCommentFoldRange(row, i + match[0].length, 1);
if (range && !range.isMultiLine()) {
if (forceMultiline) {
range = this.getSectionRange(session, row);
} else if (foldStyle != "all")
range = null;
}
return range;
}
if (foldStyle === "markbegin")
return;
var match = line.match(this.foldingStopMarker);
if (match) {
var i = match.index + match[0].length;
if (match[1])
return this.closingBracketBlock(session, match[1], row, i);
return session.getCommentFoldRange(row, i, -1);
}
};
this.getSectionRange = function(session, row) {
var line = session.getLine(row);
var startIndent = line.search(/\S/);
var startRow = row;
var startColumn = line.length;
row = row + 1;
var endRow = row;
var maxRow = session.getLength();
while (++row < maxRow) {
line = session.getLine(row);
var indent = line.search(/\S/);
if (indent === -1)
continue;
if (startIndent > indent)
break;
var subRange = this.getFoldWidgetRange(session, "all", row);
if (subRange) {
if (subRange.start.row <= startRow) {
break;
} else if (subRange.isMultiLine()) {
row = subRange.end.row;
} else if (startIndent == indent) {
break;
}
}
endRow = row;
}
return new Range(startRow, startColumn, endRow, session.getLine(endRow).length);
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/javascript",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/javascript_highlight_rules","ace/mode/matching_brace_outdent","ace/range","ace/worker/worker_client","ace/mode/behaviour/cstyle","ace/mode/folding/cstyle"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var JavaScriptHighlightRules = require("./javascript_highlight_rules").JavaScriptHighlightRules;
var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent;
var Range = require("../range").Range;
var WorkerClient = require("../worker/worker_client").WorkerClient;
var CstyleBehaviour = require("./behaviour/cstyle").CstyleBehaviour;
var CStyleFoldMode = require("./folding/cstyle").FoldMode;
var Mode = function() {
this.HighlightRules = JavaScriptHighlightRules;
this.$outdent = new MatchingBraceOutdent();
this.$behaviour = new CstyleBehaviour();
this.foldingRules = new CStyleFoldMode();
};
oop.inherits(Mode, TextMode);
(function() {
this.lineCommentStart = "//";
this.blockComment = {start: "/*", end: "*/"};
this.getNextLineIndent = function(state, line, tab) {
var indent = this.$getIndent(line);
var tokenizedLine = this.getTokenizer().getLineTokens(line, state);
var tokens = tokenizedLine.tokens;
var endState = tokenizedLine.state;
if (tokens.length && tokens[tokens.length-1].type == "comment") {
return indent;
}
if (state == "start" || state == "no_regex") {
var match = line.match(/^.*(?:\bcase\b.*\:|[\{\(\[])\s*$/);
if (match) {
indent += tab;
}
} else if (state == "doc-start") {
if (endState == "start" || endState == "no_regex") {
return "";
}
var match = line.match(/^\s*(\/?)\*/);
if (match) {
if (match[1]) {
indent += " ";
}
indent += "* ";
}
}
return indent;
};
this.checkOutdent = function(state, line, input) {
return this.$outdent.checkOutdent(line, input);
};
this.autoOutdent = function(state, doc, row) {
this.$outdent.autoOutdent(doc, row);
};
this.createWorker = function(session) {
var worker = new WorkerClient(["ace"], "ace/mode/javascript_worker", "JavaScriptWorker");
worker.attachToDocument(session.getDocument());
worker.on("jslint", function(results) {
session.setAnnotations(results.data);
});
worker.on("terminate", function() {
session.clearAnnotations();
});
return worker;
};
this.$id = "ace/mode/javascript";
}).call(Mode.prototype);
exports.Mode = Mode;
});
ace.define("ace/mode/css_highlight_rules",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var lang = require("../lib/lang");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var supportType = exports.supportType = "animation-fill-mode|alignment-adjust|alignment-baseline|animation-delay|animation-direction|animation-duration|animation-iteration-count|animation-name|animation-play-state|animation-timing-function|animation|appearance|azimuth|backface-visibility|background-attachment|background-break|background-clip|background-color|background-image|background-origin|background-position|background-repeat|background-size|background|baseline-shift|binding|bleed|bookmark-label|bookmark-level|bookmark-state|bookmark-target|border-bottom|border-bottom-color|border-bottom-left-radius|border-bottom-right-radius|border-bottom-style|border-bottom-width|border-collapse|border-color|border-image|border-image-outset|border-image-repeat|border-image-slice|border-image-source|border-image-width|border-left|border-left-color|border-left-style|border-left-width|border-radius|border-right|border-right-color|border-right-style|border-right-width|border-spacing|border-style|border-top|border-top-color|border-top-left-radius|border-top-right-radius|border-top-style|border-top-width|border-width|border|bottom|box-align|box-decoration-break|box-direction|box-flex-group|box-flex|box-lines|box-ordinal-group|box-orient|box-pack|box-shadow|box-sizing|break-after|break-before|break-inside|caption-side|clear|clip|color-profile|color|column-count|column-fill|column-gap|column-rule|column-rule-color|column-rule-style|column-rule-width|column-span|column-width|columns|content|counter-increment|counter-reset|crop|cue-after|cue-before|cue|cursor|direction|display|dominant-baseline|drop-initial-after-adjust|drop-initial-after-align|drop-initial-before-adjust|drop-initial-before-align|drop-initial-size|drop-initial-value|elevation|empty-cells|fit|fit-position|float-offset|float|font-family|font-size|font-size-adjust|font-stretch|font-style|font-variant|font-weight|font|grid-columns|grid-rows|hanging-punctuation|height|hyphenate-after|hyphenate-before|hyphenate-character|hyphenate-lines|hyphenate-resource|hyphens|icon|image-orientation|image-rendering|image-resolution|inline-box-align|left|letter-spacing|line-height|line-stacking-ruby|line-stacking-shift|line-stacking-strategy|line-stacking|list-style-image|list-style-position|list-style-type|list-style|margin-bottom|margin-left|margin-right|margin-top|margin|mark-after|mark-before|mark|marks|marquee-direction|marquee-play-count|marquee-speed|marquee-style|max-height|max-width|min-height|min-width|move-to|nav-down|nav-index|nav-left|nav-right|nav-up|opacity|orphans|outline-color|outline-offset|outline-style|outline-width|outline|overflow-style|overflow-x|overflow-y|overflow|padding-bottom|padding-left|padding-right|padding-top|padding|page-break-after|page-break-before|page-break-inside|page-policy|page|pause-after|pause-before|pause|perspective-origin|perspective|phonemes|pitch-range|pitch|play-during|pointer-events|position|presentation-level|punctuation-trim|quotes|rendering-intent|resize|rest-after|rest-before|rest|richness|right|rotation-point|rotation|ruby-align|ruby-overhang|ruby-position|ruby-span|size|speak-header|speak-numeral|speak-punctuation|speak|speech-rate|stress|string-set|table-layout|target-name|target-new|target-position|target|text-align-last|text-align|text-decoration|text-emphasis|text-height|text-indent|text-justify|text-outline|text-shadow|text-transform|text-wrap|top|transform-origin|transform-style|transform|transition-delay|transition-duration|transition-property|transition-timing-function|transition|unicode-bidi|vertical-align|visibility|voice-balance|voice-duration|voice-family|voice-pitch-range|voice-pitch|voice-rate|voice-stress|voice-volume|volume|white-space-collapse|white-space|widows|width|word-break|word-spacing|word-wrap|z-index";
var supportFunction = exports.supportFunction = "rgb|rgba|url|attr|counter|counters";
var supportConstant = exports.supportConstant = "absolute|after-edge|after|all-scroll|all|alphabetic|always|antialiased|armenian|auto|avoid-column|avoid-page|avoid|balance|baseline|before-edge|before|below|bidi-override|block-line-height|block|bold|bolder|border-box|both|bottom|box|break-all|break-word|capitalize|caps-height|caption|center|central|char|circle|cjk-ideographic|clone|close-quote|col-resize|collapse|column|consider-shifts|contain|content-box|cover|crosshair|cubic-bezier|dashed|decimal-leading-zero|decimal|default|disabled|disc|disregard-shifts|distribute-all-lines|distribute-letter|distribute-space|distribute|dotted|double|e-resize|ease-in|ease-in-out|ease-out|ease|ellipsis|end|exclude-ruby|fill|fixed|georgian|glyphs|grid-height|groove|hand|hanging|hebrew|help|hidden|hiragana-iroha|hiragana|horizontal|icon|ideograph-alpha|ideograph-numeric|ideograph-parenthesis|ideograph-space|ideographic|inactive|include-ruby|inherit|initial|inline-block|inline-box|inline-line-height|inline-table|inline|inset|inside|inter-ideograph|inter-word|invert|italic|justify|katakana-iroha|katakana|keep-all|last|left|lighter|line-edge|line-through|line|linear|list-item|local|loose|lower-alpha|lower-greek|lower-latin|lower-roman|lowercase|lr-tb|ltr|mathematical|max-height|max-size|medium|menu|message-box|middle|move|n-resize|ne-resize|newspaper|no-change|no-close-quote|no-drop|no-open-quote|no-repeat|none|normal|not-allowed|nowrap|nw-resize|oblique|open-quote|outset|outside|overline|padding-box|page|pointer|pre-line|pre-wrap|pre|preserve-3d|progress|relative|repeat-x|repeat-y|repeat|replaced|reset-size|ridge|right|round|row-resize|rtl|s-resize|scroll|se-resize|separate|slice|small-caps|small-caption|solid|space|square|start|static|status-bar|step-end|step-start|steps|stretch|strict|sub|super|sw-resize|table-caption|table-cell|table-column-group|table-column|table-footer-group|table-header-group|table-row-group|table-row|table|tb-rl|text-after-edge|text-before-edge|text-bottom|text-size|text-top|text|thick|thin|transparent|underline|upper-alpha|upper-latin|upper-roman|uppercase|use-script|vertical-ideographic|vertical-text|visible|w-resize|wait|whitespace|z-index|zero";
var supportConstantColor = exports.supportConstantColor = "aqua|black|blue|fuchsia|gray|green|lime|maroon|navy|olive|orange|purple|red|silver|teal|white|yellow";
var supportConstantFonts = exports.supportConstantFonts = "arial|century|comic|courier|garamond|georgia|helvetica|impact|lucida|symbol|system|tahoma|times|trebuchet|utopia|verdana|webdings|sans-serif|serif|monospace";
var numRe = exports.numRe = "\\-?(?:(?:[0-9]+)|(?:[0-9]*\\.[0-9]+))";
var pseudoElements = exports.pseudoElements = "(\\:+)\\b(after|before|first-letter|first-line|moz-selection|selection)\\b";
var pseudoClasses = exports.pseudoClasses = "(:)\\b(active|checked|disabled|empty|enabled|first-child|first-of-type|focus|hover|indeterminate|invalid|last-child|last-of-type|link|not|nth-child|nth-last-child|nth-last-of-type|nth-of-type|only-child|only-of-type|required|root|target|valid|visited)\\b";
var CssHighlightRules = function() {
var keywordMapper = this.createKeywordMapper({
"support.function": supportFunction,
"support.constant": supportConstant,
"support.type": supportType,
"support.constant.color": supportConstantColor,
"support.constant.fonts": supportConstantFonts
}, "text", true);
this.$rules = {
"start" : [{
token : "comment", // multi line comment
regex : "\\/\\*",
push : "comment"
}, {
token: "paren.lparen",
regex: "\\{",
push: "ruleset"
}, {
token: "string",
regex: "@.*?{",
push: "media"
}, {
token: "keyword",
regex: "#[a-z0-9-_]+"
}, {
token: "variable",
regex: "\\.[a-z0-9-_]+"
}, {
token: "string",
regex: ":[a-z0-9-_]+"
}, {
token: "constant",
regex: "[a-z0-9-_]+"
}, {
caseInsensitive: true
}],
"media" : [{
token : "comment", // multi line comment
regex : "\\/\\*",
push : "comment"
}, {
token: "paren.lparen",
regex: "\\{",
push: "ruleset"
}, {
token: "string",
regex: "\\}",
next: "pop"
}, {
token: "keyword",
regex: "#[a-z0-9-_]+"
}, {
token: "variable",
regex: "\\.[a-z0-9-_]+"
}, {
token: "string",
regex: ":[a-z0-9-_]+"
}, {
token: "constant",
regex: "[a-z0-9-_]+"
}, {
caseInsensitive: true
}],
"comment" : [{
token : "comment",
regex : "\\*\\/",
next : "pop"
}, {
defaultToken : "comment"
}],
"ruleset" : [
{
token : "paren.rparen",
regex : "\\}",
next: "pop"
}, {
token : "comment", // multi line comment
regex : "\\/\\*",
push : "comment"
}, {
token : "string", // single line
regex : '["](?:(?:\\\\.)|(?:[^"\\\\]))*?["]'
}, {
token : "string", // single line
regex : "['](?:(?:\\\\.)|(?:[^'\\\\]))*?[']"
}, {
token : ["constant.numeric", "keyword"],
regex : "(" + numRe + ")(ch|cm|deg|em|ex|fr|gd|grad|Hz|in|kHz|mm|ms|pc|pt|px|rad|rem|s|turn|vh|vm|vw|%)"
}, {
token : "constant.numeric",
regex : numRe
}, {
token : "constant.numeric", // hex6 color
regex : "#[a-f0-9]{6}"
}, {
token : "constant.numeric", // hex3 color
regex : "#[a-f0-9]{3}"
}, {
token : ["punctuation", "entity.other.attribute-name.pseudo-element.css"],
regex : pseudoElements
}, {
token : ["punctuation", "entity.other.attribute-name.pseudo-class.css"],
regex : pseudoClasses
}, {
token : ["support.function", "string", "support.function"],
regex : "(url\\()(.*)(\\))"
}, {
token : keywordMapper,
regex : "\\-?[a-zA-Z_][a-zA-Z0-9_\\-]*"
}, {
caseInsensitive: true
}]
};
this.normalizeRules();
};
oop.inherits(CssHighlightRules, TextHighlightRules);
exports.CssHighlightRules = CssHighlightRules;
});
ace.define("ace/mode/behaviour/css",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/mode/behaviour/cstyle","ace/token_iterator"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Behaviour = require("../behaviour").Behaviour;
var CstyleBehaviour = require("./cstyle").CstyleBehaviour;
var TokenIterator = require("../../token_iterator").TokenIterator;
var CssBehaviour = function () {
this.inherit(CstyleBehaviour);
this.add("colon", "insertion", function (state, action, editor, session, text) {
if (text === ':') {
var cursor = editor.getCursorPosition();
var iterator = new TokenIterator(session, cursor.row, cursor.column);
var token = iterator.getCurrentToken();
if (token && token.value.match(/\s+/)) {
token = iterator.stepBackward();
}
if (token && token.type === 'support.type') {
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar === ':') {
return {
text: '',
selection: [1, 1]
}
}
if (!line.substring(cursor.column).match(/^\s*;/)) {
return {
text: ':;',
selection: [1, 1]
}
}
}
}
});
this.add("colon", "deletion", function (state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && selected === ':') {
var cursor = editor.getCursorPosition();
var iterator = new TokenIterator(session, cursor.row, cursor.column);
var token = iterator.getCurrentToken();
if (token && token.value.match(/\s+/)) {
token = iterator.stepBackward();
}
if (token && token.type === 'support.type') {
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.end.column, range.end.column + 1);
if (rightChar === ';') {
range.end.column ++;
return range;
}
}
}
});
this.add("semicolon", "insertion", function (state, action, editor, session, text) {
if (text === ';') {
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar === ';') {
return {
text: '',
selection: [1, 1]
}
}
}
});
}
oop.inherits(CssBehaviour, CstyleBehaviour);
exports.CssBehaviour = CssBehaviour;
});
ace.define("ace/mode/css",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/css_highlight_rules","ace/mode/matching_brace_outdent","ace/worker/worker_client","ace/mode/behaviour/css","ace/mode/folding/cstyle"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var CssHighlightRules = require("./css_highlight_rules").CssHighlightRules;
var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent;
var WorkerClient = require("../worker/worker_client").WorkerClient;
var CssBehaviour = require("./behaviour/css").CssBehaviour;
var CStyleFoldMode = require("./folding/cstyle").FoldMode;
var Mode = function() {
this.HighlightRules = CssHighlightRules;
this.$outdent = new MatchingBraceOutdent();
this.$behaviour = new CssBehaviour();
this.foldingRules = new CStyleFoldMode();
};
oop.inherits(Mode, TextMode);
(function() {
this.foldingRules = "cStyle";
this.blockComment = {start: "/*", end: "*/"};
this.getNextLineIndent = function(state, line, tab) {
var indent = this.$getIndent(line);
var tokens = this.getTokenizer().getLineTokens(line, state).tokens;
if (tokens.length && tokens[tokens.length-1].type == "comment") {
return indent;
}
var match = line.match(/^.*\{\s*$/);
if (match) {
indent += tab;
}
return indent;
};
this.checkOutdent = function(state, line, input) {
return this.$outdent.checkOutdent(line, input);
};
this.autoOutdent = function(state, doc, row) {
this.$outdent.autoOutdent(doc, row);
};
this.createWorker = function(session) {
var worker = new WorkerClient(["ace"], "ace/mode/css_worker", "Worker");
worker.attachToDocument(session.getDocument());
worker.on("csslint", function(e) {
session.setAnnotations(e.data);
});
worker.on("terminate", function() {
session.clearAnnotations();
});
return worker;
};
this.$id = "ace/mode/css";
}).call(Mode.prototype);
exports.Mode = Mode;
});
ace.define("ace/mode/xml_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var XmlHighlightRules = function(normalize) {
this.$rules = {
start : [
{token : "string.cdata.xml", regex : "<\\!\\[CDATA\\[", next : "cdata"},
{
token : ["punctuation.xml-decl.xml", "keyword.xml-decl.xml"],
regex : "(<\\?)(xml)(?=[\\s])", next : "xml_decl", caseInsensitive: true
},
{
token : ["punctuation.instruction.xml", "keyword.instruction.xml"],
regex : "(<\\?)([-_a-zA-Z0-9]+)", next : "processing_instruction",
},
{token : "comment.xml", regex : "<\\!--", next : "comment"},
{
token : ["xml-pe.doctype.xml", "xml-pe.doctype.xml"],
regex : "(<\\!)(DOCTYPE)(?=[\\s])", next : "doctype", caseInsensitive: true
},
{include : "tag"},
{token : "text.end-tag-open.xml", regex: "</"},
{token : "text.tag-open.xml", regex: "<"},
{include : "reference"},
{defaultToken : "text.xml"}
],
xml_decl : [{
token : "entity.other.attribute-name.decl-attribute-name.xml",
regex : "(?:[-_a-zA-Z0-9]+:)?[-_a-zA-Z0-9]+"
}, {
token : "keyword.operator.decl-attribute-equals.xml",
regex : "="
}, {
include: "whitespace"
}, {
include: "string"
}, {
token : "punctuation.xml-decl.xml",
regex : "\\?>",
next : "start"
}],
processing_instruction : [
{token : "punctuation.instruction.xml", regex : "\\?>", next : "start"},
{defaultToken : "instruction.xml"}
],
doctype : [
{include : "whitespace"},
{include : "string"},
{token : "xml-pe.doctype.xml", regex : ">", next : "start"},
{token : "xml-pe.xml", regex : "[-_a-zA-Z0-9:]+"},
{token : "punctuation.int-subset", regex : "\\[", push : "int_subset"}
],
int_subset : [{
token : "text.xml",
regex : "\\s+"
}, {
token: "punctuation.int-subset.xml",
regex: "]",
next: "pop"
}, {
token : ["punctuation.markup-decl.xml", "keyword.markup-decl.xml"],
regex : "(<\\!)([-_a-zA-Z0-9]+)",
push : [{
token : "text",
regex : "\\s+"
},
{
token : "punctuation.markup-decl.xml",
regex : ">",
next : "pop"
},
{include : "string"}]
}],
cdata : [
{token : "string.cdata.xml", regex : "\\]\\]>", next : "start"},
{token : "text.xml", regex : "\\s+"},
{token : "text.xml", regex : "(?:[^\\]]|\\](?!\\]>))+"}
],
comment : [
{token : "comment.xml", regex : "-->", next : "start"},
{defaultToken : "comment.xml"}
],
reference : [{
token : "constant.language.escape.reference.xml",
regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)"
}],
attr_reference : [{
token : "constant.language.escape.reference.attribute-value.xml",
regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)"
}],
tag : [{
token : ["meta.tag.punctuation.tag-open.xml", "meta.tag.punctuation.end-tag-open.xml", "meta.tag.tag-name.xml"],
regex : "(?:(<)|(</))((?:[-_a-zA-Z0-9]+:)?[-_a-zA-Z0-9]+)",
next: [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : "start"}
]
}],
tag_whitespace : [
{token : "text.tag-whitespace.xml", regex : "\\s+"}
],
whitespace : [
{token : "text.whitespace.xml", regex : "\\s+"}
],
string: [{
token : "string.xml",
regex : "'",
push : [
{token : "string.xml", regex: "'", next: "pop"},
{defaultToken : "string.xml"}
]
}, {
token : "string.xml",
regex : '"',
push : [
{token : "string.xml", regex: '"', next: "pop"},
{defaultToken : "string.xml"}
]
}],
attributes: [{
token : "entity.other.attribute-name.xml",
regex : "(?:[-_a-zA-Z0-9]+:)?[-_a-zA-Z0-9]+"
}, {
token : "keyword.operator.attribute-equals.xml",
regex : "="
}, {
include: "tag_whitespace"
}, {
include: "attribute_value"
}],
attribute_value: [{
token : "string.attribute-value.xml",
regex : "'",
push : [
{token : "string.attribute-value.xml", regex: "'", next: "pop"},
{include : "attr_reference"},
{defaultToken : "string.attribute-value.xml"}
]
}, {
token : "string.attribute-value.xml",
regex : '"',
push : [
{token : "string.attribute-value.xml", regex: '"', next: "pop"},
{include : "attr_reference"},
{defaultToken : "string.attribute-value.xml"}
]
}]
};
if (this.constructor === XmlHighlightRules)
this.normalizeRules();
};
(function() {
this.embedTagRules = function(HighlightRules, prefix, tag){
this.$rules.tag.unshift({
token : ["meta.tag.punctuation.tag-open.xml", "meta.tag." + tag + ".tag-name.xml"],
regex : "(<)(" + tag + "(?=\\s|>|$))",
next: [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : prefix + "start"}
]
});
this.$rules[tag + "-end"] = [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next: "start",
onMatch : function(value, currentState, stack) {
stack.splice(0);
return this.token;
}}
]
this.embedRules(HighlightRules, prefix, [{
token: ["meta.tag.punctuation.end-tag-open.xml", "meta.tag." + tag + ".tag-name.xml"],
regex : "(</)(" + tag + "(?=\\s|>|$))",
next: tag + "-end"
}, {
token: "string.cdata.xml",
regex : "<\\!\\[CDATA\\["
}, {
token: "string.cdata.xml",
regex : "\\]\\]>"
}]);
};
}).call(TextHighlightRules.prototype);
oop.inherits(XmlHighlightRules, TextHighlightRules);
exports.XmlHighlightRules = XmlHighlightRules;
});
ace.define("ace/mode/html_highlight_rules",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/css_highlight_rules","ace/mode/javascript_highlight_rules","ace/mode/xml_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var lang = require("../lib/lang");
var CssHighlightRules = require("./css_highlight_rules").CssHighlightRules;
var JavaScriptHighlightRules = require("./javascript_highlight_rules").JavaScriptHighlightRules;
var XmlHighlightRules = require("./xml_highlight_rules").XmlHighlightRules;
var tagMap = lang.createMap({
a : 'anchor',
button : 'form',
form : 'form',
img : 'image',
input : 'form',
label : 'form',
option : 'form',
script : 'script',
select : 'form',
textarea : 'form',
style : 'style',
table : 'table',
tbody : 'table',
td : 'table',
tfoot : 'table',
th : 'table',
tr : 'table'
});
var HtmlHighlightRules = function() {
XmlHighlightRules.call(this);
this.addRules({
attributes: [{
include : "tag_whitespace"
}, {
token : "entity.other.attribute-name.xml",
regex : "[-_a-zA-Z0-9:]+"
}, {
token : "keyword.operator.attribute-equals.xml",
regex : "=",
push : [{
include: "tag_whitespace"
}, {
token : "string.unquoted.attribute-value.html",
regex : "[^<>='\"`\\s]+",
next : "pop"
}, {
token : "empty",
regex : "",
next : "pop"
}]
}, {
include : "attribute_value"
}],
tag: [{
token : function(start, tag) {
var group = tagMap[tag];
return ["meta.tag.punctuation." + (start == "<" ? "" : "end-") + "tag-open.xml",
"meta.tag" + (group ? "." + group : "") + ".tag-name.xml"];
},
regex : "(</?)([-_a-zA-Z0-9:]+)",
next: "tag_stuff"
}],
tag_stuff: [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : "start"}
],
});
this.embedTagRules(CssHighlightRules, "css-", "style");
this.embedTagRules(JavaScriptHighlightRules, "js-", "script");
if (this.constructor === HtmlHighlightRules)
this.normalizeRules();
};
oop.inherits(HtmlHighlightRules, XmlHighlightRules);
exports.HtmlHighlightRules = HtmlHighlightRules;
});
ace.define("ace/mode/behaviour/xml",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/token_iterator"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Behaviour = require("../behaviour").Behaviour;
var TokenIterator = require("../../token_iterator").TokenIterator;
function is(token, type) {
return token.type.lastIndexOf(type + ".xml") > -1;
}
var XmlBehaviour = function () {
this.add("string_dquotes", "insertion", function (state, action, editor, session, text) {
if (text == '"' || text == "'") {
var quote = text;
var selected = session.doc.getTextRange(editor.getSelectionRange());
if (selected !== "" && selected !== "'" && selected != '"' && editor.getWrapBehavioursEnabled()) {
return {
text: quote + selected + quote,
selection: false
};
}
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
var iterator = new TokenIterator(session, cursor.row, cursor.column);
var token = iterator.getCurrentToken();
if (rightChar == quote && (is(token, "attribute-value") || is(token, "string"))) {
return {
text: "",
selection: [1, 1]
};
}
if (!token)
token = iterator.stepBackward();
if (!token)
return;
while (is(token, "tag-whitespace") || is(token, "whitespace")) {
token = iterator.stepBackward();
}
var rightSpace = !rightChar || rightChar.match(/\s/);
if (is(token, "attribute-equals") && (rightSpace || rightChar == '>') || (is(token, "decl-attribute-equals") && (rightSpace || rightChar == '?'))) {
return {
text: quote + quote,
selection: [1, 1]
};
}
}
});
this.add("string_dquotes", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && (selected == '"' || selected == "'")) {
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.start.column + 1, range.start.column + 2);
if (rightChar == selected) {
range.end.column++;
return range;
}
}
});
this.add("autoclosing", "insertion", function (state, action, editor, session, text) {
if (text == '>') {
var position = editor.getCursorPosition();
var iterator = new TokenIterator(session, position.row, position.column);
var token = iterator.getCurrentToken() || iterator.stepBackward();
if (!token || !(is(token, "tag-name") || is(token, "tag-whitespace") || is(token, "attribute-name") || is(token, "attribute-equals") || is(token, "attribute-value")))
return;
if (is(token, "reference.attribute-value"))
return;
if (is(token, "attribute-value")) {
var firstChar = token.value.charAt(0);
if (firstChar == '"' || firstChar == "'") {
var lastChar = token.value.charAt(token.value.length - 1);
var tokenEnd = iterator.getCurrentTokenColumn() + token.value.length;
if (tokenEnd > position.column || tokenEnd == position.column && firstChar != lastChar)
return;
}
}
while (!is(token, "tag-name")) {
token = iterator.stepBackward();
}
var tokenRow = iterator.getCurrentTokenRow();
var tokenColumn = iterator.getCurrentTokenColumn();
if (is(iterator.stepBackward(), "end-tag-open"))
return;
var element = token.value;
if (tokenRow == position.row)
element = element.substring(0, position.column - tokenColumn);
if (this.voidElements.hasOwnProperty(element.toLowerCase()))
return;
return {
text: '>' + '</' + element + '>',
selection: [1, 1]
};
}
});
this.add('autoindent', 'insertion', function (state, action, editor, session, text) {
if (text == "\n") {
var cursor = editor.getCursorPosition();
var line = session.getLine(cursor.row);
var rightChars = line.substring(cursor.column, cursor.column + 2);
if (rightChars == '</') {
var next_indent = this.$getIndent(line);
var indent = next_indent + session.getTabString();
return {
text: '\n' + indent + '\n' + next_indent,
selection: [1, indent.length, 1, indent.length]
};
}
}
});
};
oop.inherits(XmlBehaviour, Behaviour);
exports.XmlBehaviour = XmlBehaviour;
});
ace.define("ace/mode/folding/mixed",["require","exports","module","ace/lib/oop","ace/mode/folding/fold_mode"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var BaseFoldMode = require("./fold_mode").FoldMode;
var FoldMode = exports.FoldMode = function(defaultMode, subModes) {
this.defaultMode = defaultMode;
this.subModes = subModes;
};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.$getMode = function(state) {
if (typeof state != "string")
state = state[0];
for (var key in this.subModes) {
if (state.indexOf(key) === 0)
return this.subModes[key];
}
return null;
};
this.$tryMode = function(state, session, foldStyle, row) {
var mode = this.$getMode(state);
return (mode ? mode.getFoldWidget(session, foldStyle, row) : "");
};
this.getFoldWidget = function(session, foldStyle, row) {
return (
this.$tryMode(session.getState(row-1), session, foldStyle, row) ||
this.$tryMode(session.getState(row), session, foldStyle, row) ||
this.defaultMode.getFoldWidget(session, foldStyle, row)
);
};
this.getFoldWidgetRange = function(session, foldStyle, row) {
var mode = this.$getMode(session.getState(row-1));
if (!mode || !mode.getFoldWidget(session, foldStyle, row))
mode = this.$getMode(session.getState(row));
if (!mode || !mode.getFoldWidget(session, foldStyle, row))
mode = this.defaultMode;
return mode.getFoldWidgetRange(session, foldStyle, row);
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/folding/xml",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/range","ace/mode/folding/fold_mode","ace/token_iterator"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var lang = require("../../lib/lang");
var Range = require("../../range").Range;
var BaseFoldMode = require("./fold_mode").FoldMode;
var TokenIterator = require("../../token_iterator").TokenIterator;
var FoldMode = exports.FoldMode = function(voidElements, optionalEndTags) {
BaseFoldMode.call(this);
this.voidElements = oop.mixin(voidElements || {}, optionalEndTags || {});
};
oop.inherits(FoldMode, BaseFoldMode);
var Tag = function() {
this.tagName = "";
this.closing = false;
this.selfClosing = false;
this.start = {row: 0, column: 0};
this.end = {row: 0, column: 0};
};
function is(token, type) {
return token.type.lastIndexOf(type + ".xml") > -1;
}
(function() {
this.getFoldWidget = function(session, foldStyle, row) {
var tag = this._getFirstTagInLine(session, row);
if (!tag)
return "";
if (tag.closing || (!tag.tagName && tag.selfClosing))
return foldStyle == "markbeginend" ? "end" : "";
if (!tag.tagName || tag.selfClosing || this.voidElements.hasOwnProperty(tag.tagName.toLowerCase()))
return "";
if (this._findEndTagInLine(session, row, tag.tagName, tag.end.column))
return "";
return "start";
};
this._getFirstTagInLine = function(session, row) {
var tokens = session.getTokens(row);
var tag = new Tag();
for (var i = 0; i < tokens.length; i++) {
var token = tokens[i];
if (is(token, "tag-open")) {
tag.end.column = tag.start.column + token.value.length;
tag.closing = is(token, "end-tag-open");
token = tokens[++i];
if (!token)
return null;
tag.tagName = token.value;
tag.end.column += token.value.length;
for (i++; i < tokens.length; i++) {
token = tokens[i];
tag.end.column += token.value.length;
if (is(token, "tag-close")) {
tag.selfClosing = token.value == '/>';
break;
}
}
return tag;
} else if (is(token, "tag-close")) {
tag.selfClosing = token.value == '/>';
return tag;
}
tag.start.column += token.value.length;
}
return null;
};
this._findEndTagInLine = function(session, row, tagName, startColumn) {
var tokens = session.getTokens(row);
var column = 0;
for (var i = 0; i < tokens.length; i++) {
var token = tokens[i];
column += token.value.length;
if (column < startColumn)
continue;
if (is(token, "end-tag-open")) {
token = tokens[i + 1];
if (token && token.value == tagName)
return true;
}
}
return false;
};
this._readTagForward = function(iterator) {
var token = iterator.getCurrentToken();
if (!token)
return null;
var tag = new Tag();
do {
if (is(token, "tag-open")) {
tag.closing = is(token, "end-tag-open");
tag.start.row = iterator.getCurrentTokenRow();
tag.start.column = iterator.getCurrentTokenColumn();
} else if (is(token, "tag-name")) {
tag.tagName = token.value;
} else if (is(token, "tag-close")) {
tag.selfClosing = token.value == "/>";
tag.end.row = iterator.getCurrentTokenRow();
tag.end.column = iterator.getCurrentTokenColumn() + token.value.length;
iterator.stepForward();
return tag;
}
} while(token = iterator.stepForward());
return null;
};
this._readTagBackward = function(iterator) {
var token = iterator.getCurrentToken();
if (!token)
return null;
var tag = new Tag();
do {
if (is(token, "tag-open")) {
tag.closing = is(token, "end-tag-open");
tag.start.row = iterator.getCurrentTokenRow();
tag.start.column = iterator.getCurrentTokenColumn();
iterator.stepBackward();
return tag;
} else if (is(token, "tag-name")) {
tag.tagName = token.value;
} else if (is(token, "tag-close")) {
tag.selfClosing = token.value == "/>";
tag.end.row = iterator.getCurrentTokenRow();
tag.end.column = iterator.getCurrentTokenColumn() + token.value.length;
}
} while(token = iterator.stepBackward());
return null;
};
this._pop = function(stack, tag) {
while (stack.length) {
var top = stack[stack.length-1];
if (!tag || top.tagName == tag.tagName) {
return stack.pop();
}
else if (this.voidElements.hasOwnProperty(tag.tagName)) {
return;
}
else if (this.voidElements.hasOwnProperty(top.tagName)) {
stack.pop();
continue;
} else {
return null;
}
}
};
this.getFoldWidgetRange = function(session, foldStyle, row) {
var firstTag = this._getFirstTagInLine(session, row);
if (!firstTag)
return null;
var isBackward = firstTag.closing || firstTag.selfClosing;
var stack = [];
var tag;
if (!isBackward) {
var iterator = new TokenIterator(session, row, firstTag.start.column);
var start = {
row: row,
column: firstTag.start.column + firstTag.tagName.length + 2
};
while (tag = this._readTagForward(iterator)) {
if (tag.selfClosing) {
if (!stack.length) {
tag.start.column += tag.tagName.length + 2;
tag.end.column -= 2;
return Range.fromPoints(tag.start, tag.end);
} else
continue;
}
if (tag.closing) {
this._pop(stack, tag);
if (stack.length == 0)
return Range.fromPoints(start, tag.start);
}
else {
stack.push(tag);
}
}
}
else {
var iterator = new TokenIterator(session, row, firstTag.end.column);
var end = {
row: row,
column: firstTag.start.column
};
while (tag = this._readTagBackward(iterator)) {
if (tag.selfClosing) {
if (!stack.length) {
tag.start.column += tag.tagName.length + 2;
tag.end.column -= 2;
return Range.fromPoints(tag.start, tag.end);
} else
continue;
}
if (!tag.closing) {
this._pop(stack, tag);
if (stack.length == 0) {
tag.start.column += tag.tagName.length + 2;
return Range.fromPoints(tag.start, end);
}
}
else {
stack.push(tag);
}
}
}
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/folding/html",["require","exports","module","ace/lib/oop","ace/mode/folding/mixed","ace/mode/folding/xml","ace/mode/folding/cstyle"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var MixedFoldMode = require("./mixed").FoldMode;
var XmlFoldMode = require("./xml").FoldMode;
var CStyleFoldMode = require("./cstyle").FoldMode;
var FoldMode = exports.FoldMode = function(voidElements, optionalTags) {
MixedFoldMode.call(this, new XmlFoldMode(voidElements, optionalTags), {
"js-": new CStyleFoldMode(),
"css-": new CStyleFoldMode()
});
};
oop.inherits(FoldMode, MixedFoldMode);
});
ace.define("ace/mode/html_completions",["require","exports","module","ace/token_iterator"], function(require, exports, module) {
"use strict";
var TokenIterator = require("../token_iterator").TokenIterator;
var commonAttributes = [
"accesskey",
"class",
"contenteditable",
"contextmenu",
"dir",
"draggable",
"dropzone",
"hidden",
"id",
"inert",
"itemid",
"itemprop",
"itemref",
"itemscope",
"itemtype",
"lang",
"spellcheck",
"style",
"tabindex",
"title",
"translate"
];
var eventAttributes = [
"onabort",
"onblur",
"oncancel",
"oncanplay",
"oncanplaythrough",
"onchange",
"onclick",
"onclose",
"oncontextmenu",
"oncuechange",
"ondblclick",
"ondrag",
"ondragend",
"ondragenter",
"ondragleave",
"ondragover",
"ondragstart",
"ondrop",
"ondurationchange",
"onemptied",
"onended",
"onerror",
"onfocus",
"oninput",
"oninvalid",
"onkeydown",
"onkeypress",
"onkeyup",
"onload",
"onloadeddata",
"onloadedmetadata",
"onloadstart",
"onmousedown",
"onmousemove",
"onmouseout",
"onmouseover",
"onmouseup",
"onmousewheel",
"onpause",
"onplay",
"onplaying",
"onprogress",
"onratechange",
"onreset",
"onscroll",
"onseeked",
"onseeking",
"onselect",
"onshow",
"onstalled",
"onsubmit",
"onsuspend",
"ontimeupdate",
"onvolumechange",
"onwaiting"
];
var globalAttributes = commonAttributes.concat(eventAttributes);
var attributeMap = {
"html": ["manifest"],
"head": [],
"title": [],
"base": ["href", "target"],
"link": ["href", "hreflang", "rel", "media", "type", "sizes"],
"meta": ["http-equiv", "name", "content", "charset"],
"style": ["type", "media", "scoped"],
"script": ["charset", "type", "src", "defer", "async"],
"noscript": ["href"],
"body": ["onafterprint", "onbeforeprint", "onbeforeunload", "onhashchange", "onmessage", "onoffline", "onpopstate", "onredo", "onresize", "onstorage", "onundo", "onunload"],
"section": [],
"nav": [],
"article": ["pubdate"],
"aside": [],
"h1": [],
"h2": [],
"h3": [],
"h4": [],
"h5": [],
"h6": [],
"header": [],
"footer": [],
"address": [],
"main": [],
"p": [],
"hr": [],
"pre": [],
"blockquote": ["cite"],
"ol": ["start", "reversed"],
"ul": [],
"li": ["value"],
"dl": [],
"dt": [],
"dd": [],
"figure": [],
"figcaption": [],
"div": [],
"a": ["href", "target", "ping", "rel", "media", "hreflang", "type"],
"em": [],
"strong": [],
"small": [],
"s": [],
"cite": [],
"q": ["cite"],
"dfn": [],
"abbr": [],
"data": [],
"time": ["datetime"],
"code": [],
"var": [],
"samp": [],
"kbd": [],
"sub": [],
"sup": [],
"i": [],
"b": [],
"u": [],
"mark": [],
"ruby": [],
"rt": [],
"rp": [],
"bdi": [],
"bdo": [],
"span": [],
"br": [],
"wbr": [],
"ins": ["cite", "datetime"],
"del": ["cite", "datetime"],
"img": ["alt", "src", "height", "width", "usemap", "ismap"],
"iframe": ["name", "src", "height", "width", "sandbox", "seamless"],
"embed": ["src", "height", "width", "type"],
"object": ["param", "data", "type", "height" , "width", "usemap", "name", "form", "classid"],
"param": ["name", "value"],
"video": ["src", "autobuffer", "autoplay", "loop", "controls", "width", "height", "poster"],
"audio": ["src", "autobuffer", "autoplay", "loop", "controls"],
"source": ["src", "type", "media"],
"track": ["kind", "src", "srclang", "label", "default"],
"canvas": ["width", "height"],
"map": ["name"],
"area": ["shape", "coords", "href", "hreflang", "alt", "target", "media", "rel", "ping", "type"],
"svg": [],
"math": [],
"table": ["summary"],
"caption": [],
"colgroup": ["span"],
"col": ["span"],
"tbody": [],
"thead": [],
"tfoot": [],
"tr": [],
"td": ["headers", "rowspan", "colspan"],
"th": ["headers", "rowspan", "colspan", "scope"],
"form": ["accept-charset", "action", "autocomplete", "enctype", "method", "name", "novalidate", "target"],
"fieldset": ["disabled", "form", "name"],
"legend": [],
"label": ["form", "for"],
"input": ["type", "accept", "alt", "autocomplete", "checked", "disabled", "form", "formaction", "formenctype", "formmethod", "formnovalidate", "formtarget", "height", "list", "max", "maxlength", "min", "multiple", "pattern", "placeholder", "readonly", "required", "size", "src", "step", "width", "files", "value"],
"button": ["autofocus", "disabled", "form", "formaction", "formenctype", "formmethod", "formnovalidate", "formtarget", "name", "value", "type"],
"select": ["autofocus", "disabled", "form", "multiple", "name", "size"],
"datalist": [],
"optgroup": ["disabled", "label"],
"option": ["disabled", "selected", "label", "value"],
"textarea": ["autofocus", "disabled", "form", "maxlength", "name", "placeholder", "readonly", "required", "rows", "cols", "wrap"],
"keygen": ["autofocus", "challenge", "disabled", "form", "keytype", "name"],
"output": ["for", "form", "name"],
"progress": ["value", "max"],
"meter": ["value", "min", "max", "low", "high", "optimum"],
"details": ["open"],
"summary": [],
"command": ["type", "label", "icon", "disabled", "checked", "radiogroup", "command"],
"menu": ["type", "label"],
"dialog": ["open"]
};
var elements = Object.keys(attributeMap);
function is(token, type) {
return token.type.lastIndexOf(type + ".xml") > -1;
}
function findTagName(session, pos) {
var iterator = new TokenIterator(session, pos.row, pos.column);
var token = iterator.getCurrentToken();
while (token && !is(token, "tag-name")){
token = iterator.stepBackward();
}
if (token)
return token.value;
}
var HtmlCompletions = function() {
};
(function() {
this.getCompletions = function(state, session, pos, prefix) {
var token = session.getTokenAt(pos.row, pos.column);
if (!token)
return [];
if (is(token, "tag-name") || is(token, "tag-open") || is(token, "end-tag-open"))
return this.getTagCompletions(state, session, pos, prefix);
if (is(token, "tag-whitespace") || is(token, "attribute-name"))
return this.getAttributeCompetions(state, session, pos, prefix);
return [];
};
this.getTagCompletions = function(state, session, pos, prefix) {
return elements.map(function(element){
return {
value: element,
meta: "tag",
score: Number.MAX_VALUE
};
});
};
this.getAttributeCompetions = function(state, session, pos, prefix) {
var tagName = findTagName(session, pos);
if (!tagName)
return [];
var attributes = globalAttributes;
if (tagName in attributeMap) {
attributes = attributes.concat(attributeMap[tagName]);
}
return attributes.map(function(attribute){
return {
caption: attribute,
snippet: attribute + '="$0"',
meta: "attribute",
score: Number.MAX_VALUE
};
});
};
}).call(HtmlCompletions.prototype);
exports.HtmlCompletions = HtmlCompletions;
});
ace.define("ace/mode/html",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/text","ace/mode/javascript","ace/mode/css","ace/mode/html_highlight_rules","ace/mode/behaviour/xml","ace/mode/folding/html","ace/mode/html_completions","ace/worker/worker_client"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var lang = require("../lib/lang");
var TextMode = require("./text").Mode;
var JavaScriptMode = require("./javascript").Mode;
var CssMode = require("./css").Mode;
var HtmlHighlightRules = require("./html_highlight_rules").HtmlHighlightRules;
var XmlBehaviour = require("./behaviour/xml").XmlBehaviour;
var HtmlFoldMode = require("./folding/html").FoldMode;
var HtmlCompletions = require("./html_completions").HtmlCompletions;
var WorkerClient = require("../worker/worker_client").WorkerClient;
var voidElements = ["area", "base", "br", "col", "embed", "hr", "img", "input", "keygen", "link", "meta", "param", "source", "track", "wbr"];
var optionalEndTags = ["li", "dt", "dd", "p", "rt", "rp", "optgroup", "option", "colgroup", "td", "th"];
var Mode = function(options) {
this.fragmentContext = options && options.fragmentContext;
this.HighlightRules = HtmlHighlightRules;
this.$behaviour = new XmlBehaviour();
this.$completer = new HtmlCompletions();
this.createModeDelegates({
"js-": JavaScriptMode,
"css-": CssMode
});
this.foldingRules = new HtmlFoldMode(this.voidElements, lang.arrayToMap(optionalEndTags));
};
oop.inherits(Mode, TextMode);
(function() {
this.blockComment = {start: "<!--", end: "-->"};
this.voidElements = lang.arrayToMap(voidElements);
this.getNextLineIndent = function(state, line, tab) {
return this.$getIndent(line);
};
this.checkOutdent = function(state, line, input) {
return false;
};
this.getCompletions = function(state, session, pos, prefix) {
return this.$completer.getCompletions(state, session, pos, prefix);
};
this.createWorker = function(session) {
if (this.constructor != Mode)
return;
var worker = new WorkerClient(["ace"], "ace/mode/html_worker", "Worker");
worker.attachToDocument(session.getDocument());
if (this.fragmentContext)
worker.call("setOptions", [{context: this.fragmentContext}]);
worker.on("error", function(e) {
session.setAnnotations(e.data);
});
worker.on("terminate", function() {
session.clearAnnotations();
});
return worker;
};
this.$id = "ace/mode/html";
}).call(Mode.prototype);
exports.Mode = Mode;
});
ace.define("ace/mode/curly_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/html_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var HtmlHighlightRules = require("./html_highlight_rules").HtmlHighlightRules;
var CurlyHighlightRules = function() {
HtmlHighlightRules.call(this);
this.$rules["start"].unshift({
token: "variable",
regex: "{{",
push: "curly-start"
});
this.$rules["curly-start"] = [{
token: "variable",
regex: "}}",
next: "pop"
}];
this.normalizeRules();
};
oop.inherits(CurlyHighlightRules, HtmlHighlightRules);
exports.CurlyHighlightRules = CurlyHighlightRules;
});
ace.define("ace/mode/curly",["require","exports","module","ace/lib/oop","ace/mode/html","ace/mode/matching_brace_outdent","ace/mode/html_highlight_rules","ace/mode/folding/html","ace/mode/curly_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var HtmlMode = require("./html").Mode;
var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent;
var HtmlHighlightRules = require("./html_highlight_rules").HtmlHighlightRules;
var HtmlFoldMode = require("./folding/html").FoldMode;
var CurlyHighlightRules = require("./curly_highlight_rules").CurlyHighlightRules;
var Mode = function() {
HtmlMode.call(this);
this.HighlightRules = CurlyHighlightRules;
this.$outdent = new MatchingBraceOutdent();
this.foldingRules = new HtmlFoldMode();
};
oop.inherits(Mode, HtmlMode);
(function() {
this.$id = "ace/mode/curly";
}).call(Mode.prototype);
exports.Mode = Mode;
}); | PypiClean |
/EagleWrapper-1.1.1-py3-none-any.whl/eaglewrapper/eagle.py | from typing import Optional, List, Dict, Any
import datetime
from pathlib import Path
import json
import concurrent.futures
import requests
from loguru import logger
class ImageData:
"""
A class to represent image data for adding images to Eagle.
Attributes:
url (str): Required. The URL of the image to be added. Supports http, https, and base64.
name (str): Required. The name of the image to be added.
website (str): The address of the image source.
tags (list): Tags for the image.
annotation (str): The annotation for the image.
modificationTime (int): The creation date of the image. Can be used to alter the image's sorting order in Eagle.
headers (dict): Optional. Customize the HTTP headers properties. Can be used to bypass security on certain websites.
"""
def __init__(self, url: str, name: str, website: str = '', tags: list = None, annotation: str = '', modificationTime: int = None, headers: dict = None):
"""
Initializes the ImageData instance with the given attributes.
Args:
url (str): Required. The URL of the image to be added. Supports http, https, and base64.
name (str): Required. The name of the image to be added.
website (str, optional): The address of the image source. Defaults to an empty string.
tags (list, optional): Tags for the image. Defaults to an empty list.
annotation (str, optional): The annotation for the image. Defaults to an empty string.
modificationTime (int, optional): The creation date of the image. Can be used to alter the image's sorting order in Eagle. Defaults to None.
headers (dict, optional): Optional. Customize the HTTP headers properties. Can be used to bypass security on certain websites. Defaults to an empty dictionary.
Raises:
ValueError: If the URL or name is empty.
"""
if tags is None:
tags = []
if headers is None:
headers = {}
if not url:
raise ValueError("URL cannot be empty.")
if not name:
raise ValueError("Name cannot be empty.")
self.url = url
self.name = name
self.website = website
self.tags = tags
self.annotation = annotation
self.modificationTime = modificationTime
self.headers = headers
def to_dict(self) -> dict:
return self.__dict__
class Eagle:
def __init__(self, domain='http://localhost', port=41595):
self.domain = domain
self.port = port
self.host = f'{domain}:{port}'
def check_success(self, response) -> bool:
"""
Check if the API response status is 'success'.
Args:
response (Response): The response object from an API call.
Returns:
bool: True if the status is 'success', otherwise False.
"""
status = response.json().get('status')
if status != 'success':
return False
return True
# =================================================
# NOT IN API methods
# =================================================
def get_img_info_from_lib_path(self, library_path: str, name_start_filters=[], max_workers=4) -> list:
"""
Get all images' information (metadata) from the path of the source library.
For example, there is a path of a library at '/path/to/your/test.library'.
Args:
library_path (str): The path of the source library.
name_start_filters (list, optional): A list of name prefixes to filter images. Defaults to an empty list.
max_workers (int, optional): The number of concurrent workers. Defaults to 4.
Returns:
list: A list of dictionaries containing the metadata of the images.
"""
imgs_info = []
def load_id(meta_path):
data = {}
with open(meta_path, 'r', encoding='utf-8') as f:
try:
data = json.loads(f.read())
name = data.get('name')
for name_start_filter in name_start_filters:
if not name.startswith(name_start_filter):
return {}
except Exception as e:
logger.warning(f'[Eagle] {e}')
return data
logger.debug('[Eagle] Start getting images INFO...')
st = datetime.datetime.now()
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
get_id_job = {
executor.submit(load_id, meta_path): meta_path for meta_path in Path(library_path).glob('**/metadata.json')
}
for future in concurrent.futures.as_completed(get_id_job):
try:
data = future.result()
if data is not None:
imgs_info.append(data)
except Exception as e:
logger.error(e)
ed = datetime.datetime.now()
logger.debug(f'[Eagle] Get all images INFO cost :: {(ed - st).total_seconds():.2f} sec(s)')
return imgs_info
def set_tag_with_id(self, item_id: int, tags: list) -> dict:
"""
Set tags to an image with its ID by using `update` API.
API DOC: https://api.eagle.cool/item/update
Args:
item_id (int): The ID of the image to be updated.
tags (list): A list of tags to be associated with the image.
Returns:
dict: A dictionary containing the result information of the updated image.
"""
return self.update_item(item_id, tags)
def get_img_list_info(self, max_image_number: int, name_start_filter='') -> list:
"""
Get a list of images' information up to the specified maximum number of images.
API DOC: https://api.eagle.cool/item/list
Args:
max_image_number (int): The maximum number of images to retrieve.
name_start_filter (str, optional): A string to filter images by name prefix. Defaults to an empty string.
Returns:
list: A list of dictionaries containing the information of the retrieved images.
"""
resp = requests.get(f'{self.host}/api/item/list?limit={max_image_number}')
data = resp.json().get('data')
if name_start_filter != '':
data = [d for d in data if d.get('name').startswith(name_start_filter)]
return data
# =================================================
# Application methods
# =================================================
def get_application_info(self) -> dict:
"""
Get detailed information on the Eagle App currently running.
API DOC: https://api.eagle.cool/application/info
Returns:
dict: A dictionary containing the detailed information on the Eagle App, such as version, buildVersion, execPath, and platform.
"""
resp = requests.get(f'{self.host}/api/application/info')
return resp.json().get('data')
# =================================================
# Image (Item) methods
# =================================================
def add_from_url(self, url: str, name: str, tags=[], website='', annotation='', modification_time: int = None, folder_id: str = None, headers: dict = None) -> bool:
"""
Add a new image or materials from the given URL.
API DOC: https://api.eagle.cool/item/add-from-url
Args:
url (str): The URL of the image or materials to be added.
name (str): The name to be assigned to the item.
tags (list, optional): A list of tags to be associated with the item. Defaults to an empty list.
website (str, optional): The website where the image or materials are from. Defaults to ''.
annotation (str, optional): Any additional annotation or description for the item. Defaults to ''.
modification_time (int, optional): The creation date of the image.
folder_id (str, optional): If this parameter is defined, the image will be added to the corresponding folder.
headers (dict, optional): Customize the HTTP headers properties, this could be used to circumvent the security of certain websites.
Returns:
bool: A flag.
"""
data = ImageData(
url=url,
name=name,
website=website,
tags=tags,
annotation=annotation,
modificationTime=modification_time,
headers=headers
).to_dict()
if folder_id is not None:
data["folderId"] = folder_id
resp = requests.post(f'{self.host}/api/item/addFromURL', json=data)
return self.check_success(resp)
def add_from_urls(self, items: list, folder_id: str = None) -> bool:
"""
Add multiple images from URLs to Eagle.
API DOC: https://api.eagle.cool/item/add-from-urls
Args:
items (list): The array object made up of multiple items. Each item is a dictionary with keys:
- url (str): Required, the URL of the image to be added. Supports http, https, base64.
- name (str): Required, the name of the image to be added.
- website (str, optional): The address of the source of the image.
- annotation (str, optional): The annotation for the image.
- tags (list, optional): Tags for the image.
- modificationTime (int, optional): The creation date of the image.
- headers (dict, optional): Customize the HTTP headers properties.
folder_id (str, optional): If the parameter is defined, images will be added to the corresponding folder.
Returns:
bool: A flag.
"""
items_ = []
for item in items:
item_ = ImageData(
url=item['url'],
name=item['name'],
website=item['website'],
tags=item['tags'],
annotation=item['annotation'],
modificationTime=item['modification_time'],
headers=item['headers']
).to_dict()
items_.append(item_)
data = {"items": items_}
if folder_id is not None:
data["folderId"] = folder_id
resp = requests.post(f'{self.host}/api/item/addFromURLs', json=data)
return self.check_success(resp)
def add_from_path(self, path: str, name: str, website: Optional[str] = None,
annotation: Optional[str] = None, tags: Optional[List[str]] = None,
folder_id: Optional[str] = None) -> bool:
"""
Add a local file to Eagle.
API DOC: https://api.eagle.cool/item/add-from-path
Args:
path (str): Required, the path of the local file.
name (str): Required, the name of the image to be added.
website (str, optional): The address of the source of the image.
annotation (str, optional): The annotation for the image.
tags (List[str], optional): Tags for the image.
folder_id (str, optional): If this parameter is defined, the image will be added to the corresponding folder.
Returns:
bool: A flag.
"""
data = ImageData(
url=path,
name=name,
website=website,
tags=tags,
annotation=annotation
).to_dict()
data['path'] = data['url']
del data['url']
data["folderId"] = folder_id
resp = requests.post(f'{self.host}/api/item/addFromPath', json=data)
return resp.json()
def add_from_paths(self, items: List[Dict], folder_id: Optional[str] = None) -> bool:
"""
Add multiple local files to Eagle.
API DOC: https://api.eagle.cool/item/add-from-paths
Args:
items (List[Dict]): A list of dictionaries containing the following keys:
- path (str): The path of the local file.
- name (str): The name of the image to be added.
- website (str, optional): The address of the source of the image.
- annotation (str, optional): The annotation for the image.
- tags (List[str], optional): Tags for the image.
folder_id (str, optional): If this parameter is defined, the images will be added to the corresponding folder.
Returns:
bool: A flag.
"""
items_ = []
for item in items:
item_ = ImageData(
url=item["path"],
name=item["name"],
website=item["website"],
tags=item["tags"],
annotation=item["annotation"]
).to_dict()
item_['path'] = item_['url']
del item_['url']
items_.append(item_)
data = {"items": items_}
if folder_id is not None:
data["folderId"] = folder_id
resp = requests.post(f'{self.host}/api/item/addFromPaths', json=data)
return self.check_success(resp)
def add_bookmark(self, url: str, name: str, tags: List[str] = None,
thumbnail_base64: str = None, modification_time: int = None,
folder_id: str = None) -> bool:
"""
Save the link in the URL form to Eagle.
API DOC: https://api.eagle.cool/item/add-bookmark
Args:
url (str): The link of the image to be saved. Supports http, https, base64.
name (str): The name of the image to be added.
tags (List[str], optional): Tags for the image.
thumbnail_base64 (str, optional): The thumbnail of the bookmark. Must be in base64 format.
modification_time (int, optional): The creation date of the images. The parameter can be used to alter the
images' sorting order in Eagle.
folder_id (str, optional): If this parameter is defined, the image will be added to the corresponding folder.
Returns:
bool: A flag.
"""
data = {
"url": url,
"name": name,
"tags": tags or [],
"base64": thumbnail_base64,
"modificationTime": modification_time,
"folderId": folder_id
}
resp = requests.post(f'{self.host}/api/item/addBookmark', json=data)
return self.check_success(resp)
def get_item_info(self, item_id: str) -> Optional[Dict[str, Any]]:
"""
Get properties of the specified file, including the file name, tags, categorizations, folders, dimensions, etc.
API DOC: https://api.eagle.cool/item/info
Args:
item_id (str): ID of the file.
Returns:
Optional[Dict[str, Any]]: A dictionary containing the properties of the specified file or None if unsuccessful.
"""
params = {'id': item_id}
resp = requests.get(f'{self.host}/api/item/info', params=params)
if not self.check_success(resp):
return None
return resp.json().get('data')
def get_thumbnail_path(self, item_id: str) -> Optional[str]:
"""
Get the path of the thumbnail of the specified file.
API DOC: https://api.eagle.cool/item/thumbnail
Args:
item_id (str): ID of the file.
Returns:
Optional[str]: The thumbnail path of the specified file or None if unsuccessful.
"""
params = {'id': item_id}
resp = requests.get(f'{self.host}/api/item/thumbnail', params=params)
if not self.check_success(resp):
return None
return resp.json().get('data')
def list_items(self, limit: int = 200, offset: int = 0, order_by: Optional[str] = None, keyword: Optional[str] = None, ext: Optional[str] = None, tags: Optional[List[str]] = None, folders: Optional[List[str]] = None) -> dict:
"""
Get items that match the filter condition.
API DOC: https://api.eagle.cool/item/list
Args:
limit (int): The number of items to be displayed. The default number is 200.
offset (int): Offset a collection of results from the api. Start with 0.
order_by (Optional[str]): The sorting order. CREATEDATE, FILESIZE, NAME, RESOLUTION, add a minus sign for descending order: -FILESIZE.
keyword (Optional[str]): Filter by the keyword.
ext (Optional[str]): Filter by the extension type, e.g.: jpg, png.
tags (Optional[List[str]]): Filter by tags.
folders (Optional[List[str]]): Filter by Folders.
Returns:
dict: A dictionary containing the filtered items and their information.
"""
params = {'limit': limit, 'offset': offset}
if order_by:
params['orderBy'] = order_by
if keyword:
params['keyword'] = keyword
if ext:
params['ext'] = ext
if tags:
params['tags'] = ','.join(tags)
if folders:
params['folders'] = ','.join(folders)
resp = requests.get(f'{self.host}/api/item/list', params=params)
return resp.json().get('data')
def move_to_trash(self, item_ids: List[str]) -> bool:
"""
Move items to trash.
API DOC: https://api.eagle.cool/item/api-item-movetotrash
Args:
item_ids (List[str]): A list of item IDs to be moved to trash.
Returns:
bool: True if the operation was successful, False otherwise.
"""
data = {"itemIds": item_ids}
resp = requests.post(f'{self.host}/api/item/moveToTrash', json=data)
return self.check_success(resp)
def refresh_palette(self, item_id: str) -> bool:
"""
Re-analysis the color of the file. When changes to the original file were made,
you can call this function to refresh the Color Analysis.
API DOC: https://api.eagle.cool/item/refresh-palette
Args:
item_id (str): The item's ID.
Returns:
bool: True if the operation was successful, False otherwise.
"""
data = {"id": item_id}
resp = requests.post(f'{self.host}/api/item/refreshPalette', json=data)
return self.check_success(resp)
def refresh_thumbnail(self, item_id: str) -> bool:
"""
Re-generate the thumbnail of the file used to display in the List. When changes to
the original file were made, you can call this function to re-generate the thumbnail,
the color analysis will also be made.
API DOC: https://api.eagle.cool/item/refresh-thumbnail
Args:
item_id (str): The item's ID.
Returns:
bool: True if the operation was successful, False otherwise.
"""
data = {"id": item_id}
resp = requests.post(f'{self.host}/api/item/refreshThumbnail', json=data)
return self.check_success(resp)
def update_item(self, item_id: str, tags: Optional[List[str]] = None, annotation: Optional[str] = None, url: Optional[str] = None, star: Optional[int] = None) -> dict:
"""
Modify data of specified fields of the item.
API DOC: https://api.eagle.cool/item/update
Args:
item_id (str): Required, the ID of the item to be modified.
tags (list): Optional, tags.
annotation (str): Optional, annotations.
url (str): Optional, the source url.
star (int): Optional, ratings.
Returns:
dict: Updated item data.
"""
data = {"id": item_id}
if tags is not None:
data["tags"] = tags
if annotation is not None:
data["annotation"] = annotation
if url is not None:
data["url"] = url
if star is not None:
data["star"] = star
resp = requests.post(f'{self.host}/api/item/update', json=data)
if not self.check_success(resp):
raise Exception("[Eagle] Failed to update the item.")
return resp.json().get("data")
# =================================================
# Folder methods
# =================================================
def create_folder(self, folder_name: str, parent_id=None) -> dict:
"""
Create a folder. The created folder will be placed at the bottom of the folder list of the current library.
API DOC: https://api.eagle.cool/folder/create
Args:
folder_name (str): The name of the folder to be created.
parent_id (int, optional): The ID of the parent folder. If not provided, the created folder will be at the top level. Defaults to None.
Returns:
dict: A dictionary containing the result information of the created folder.
"""
data = {
'folderName': folder_name,
}
if parent_id is not None:
data['parent'] = parent_id
resp = requests.post(f'{self.host}/api/folder/create', json=data)
return resp.json().get('data')
def rename_folder(self, folder_id: str, new_name: str) -> dict:
"""
Rename the specified folder.
API DOC: https://api.eagle.cool/folder/rename
Args:
folder_id (str): The ID of the folder to be renamed.
new_name (str): The new name for the folder.
Returns:
dict: A dictionary containing the result information of the renamed folder.
"""
data = {
'folderId': folder_id,
'newName': new_name,
}
resp = requests.post(f'{self.host}/api/folder/rename', json=data)
return resp.json().get('data')
def update_folder(self, folder_id: str, new_name: str = None, new_description: str = None, new_color: str = None) -> dict:
"""
Update the specified folder.
API DOC: https://api.eagle.cool/folder/update
Args:
folder_id (str): The ID of the folder to be updated.
new_name (str, optional): The new name for the folder. Defaults to None.
new_description (str, optional): The new description for the folder. Defaults to None.
new_color (str, optional): The new color for the folder. Valid options are "red", "orange", "green", "yellow", "aqua", "blue", "purple", "pink". Defaults to None.
Returns:
dict: A dictionary containing the result information of the updated folder.
"""
data = {
'folderId': folder_id,
}
if new_name is not None:
data['newName'] = new_name
if new_description is not None:
data['newDescription'] = new_description
if new_color is not None:
data['newColor'] = new_color
resp = requests.post(f'{self.host}/api/folder/update', json=data)
return resp.json().get('data')
def list_folders(self) -> dict:
"""
Get the list of folders of the current library.
API DOC: https://api.eagle.cool/folder/list
Returns:
list: A list of dictionaries containing the folder information.
"""
resp = requests.get(f'{self.host}/api/folder/list')
return resp.json().get('data')
def get_recent_folders(self) -> dict:
"""
Get the list of recently used folders by the user.
API DOC: https://api.eagle.cool/folder/list-recent
Returns:
dict: A dictionary containing the status and data of the recently used folders.
"""
resp = requests.get(f'{self.host}/api/folder/listRecent')
return resp.json().get('data')
# =================================================
# Library methods
# =================================================
def get_library_info(self) -> dict:
"""
Get detailed information of the currently running library. This function can be used to obtain details
such as All Folders, All Smart Folders, All Tag Groups, Quick Access, etc.
API DOC: https://api.eagle.cool/library/info
Returns:
dict: A dictionary containing detailed information about the currently running library.
"""
resp = requests.get(f'{self.host}/api/library/info')
return resp.json().get('data')
def get_library_history(self) -> list:
"""
Get the list of libraries recently opened by the Application.
API DOC: https://api.eagle.cool/library/history
Returns:
list: A list containing the paths of recently opened libraries.
"""
resp = requests.get(f'{self.host}/api/library/history')
return resp.json().get('data')
def switch_library(self, library_path: str) -> bool:
"""
Switch the library currently opened by Eagle.
API DOC: https://api.eagle.cool/library/switch
Args:
library_path (str): The path of the library to switch to.
Returns:
bool: A flag.
"""
data = {
"libraryPath": library_path
}
resp = requests.post(f'{self.host}/api/library/switch', json=data)
return self.check_success(resp) | PypiClean |
/Braindecode-0.7.tar.gz/Braindecode-0.7/braindecode/datasets/bcicomp.py |
import glob
import os
import os.path as osp
from os import remove
from shutil import unpack_archive
import mne
import numpy as np
from mne.utils import verbose
from scipy.io import loadmat
from braindecode.datasets import BaseDataset, BaseConcatDataset
DATASET_URL = 'https://stacks.stanford.edu/file/druid:zk881ps0522/' \
'BCI_Competion4_dataset4_data_fingerflexions.zip'
class BCICompetitionIVDataset4(BaseConcatDataset):
"""BCI competition IV dataset 4.
Contains ECoG recordings for three patients moving fingers during the experiment.
Targets correspond to the time courses of the flexion of each of five fingers.
See http://www.bbci.de/competition/iv/desc_4.pdf and
http://www.bbci.de/competition/iv/ for the dataset and competition description.
ECoG library containing the dataset: https://searchworks.stanford.edu/view/zk881ps0522
Notes
-----
When using this dataset please cite [1]_ .
Parameters
----------
subject_ids : list(int) | int | None
(list of) int of subject(s) to be loaded. If None, load all available
subjects. Should be in range 1-3.
References
----------
.. [1] Miller, Kai J. "A library of human electrocorticographic data and analyses."
Nature human behaviour 3, no. 11 (2019): 1225-1235. https://doi.org/10.1038/s41562-019-0678-3
"""
possible_subjects = [1, 2, 3]
def __init__(self, subject_ids=None):
data_path = self.download()
if isinstance(subject_ids, int):
subject_ids = [subject_ids]
if subject_ids is None:
subject_ids = self.possible_subjects
self._validate_subjects(subject_ids)
files_list = [f'{data_path}/sub{i}_comp.mat' for i in subject_ids]
datasets = []
for file_path in files_list:
raw_train, raw_test = self._load_data_to_mne(file_path)
desc_train = dict(
subject=file_path.split('/')[-1].split('sub')[1][0],
file_name=file_path.split('/')[-1],
session='train'
)
desc_test = dict(
subject=file_path.split('/')[-1].split('sub')[1][0],
file_name=file_path.split('/')[-1],
session='test'
)
datasets.append(BaseDataset(raw_train, description=desc_train))
datasets.append(BaseDataset(raw_test, description=desc_test))
super().__init__(datasets)
@staticmethod
def download(path=None, force_update=False, verbose=None):
"""Download the dataset.
Parameters
----------
path (None | str) – Location of where to look for the data storing location.
If None, the environment variable or config parameter
MNE_DATASETS_(dataset)_PATH is used. If it doesn’t exist, the “~/mne_data”
directory is used. If the dataset is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update (bool) – Force update of the dataset even if a local copy exists.
verbose (bool, str, int, or None) – If not None, override default verbose level
(see mne.verbose())
Returns
-------
"""
signature = 'BCICompetitionIVDataset4'
folder_name = 'BCI_Competion4_dataset4_data_fingerflexions'
# Check if the dataset already exists (unpacked). We have to do that manually
# because we are removing .zip file from disk to save disk space.
from moabb.datasets.download import get_dataset_path # keep soft depenency
path = get_dataset_path(signature, path)
key_dest = "MNE-{:s}-data".format(signature.lower())
# We do not use mne _url_to_local_path due to ':' in the url that causes problems on Windows
destination = osp.join(path, key_dest, folder_name)
if len(list(glob.glob(osp.join(destination, '*.mat')))) == 6:
return destination
data_path = _data_dl(DATASET_URL, osp.join(destination, folder_name, signature),
force_update=force_update)
unpack_archive(data_path, osp.dirname(destination))
# removes .zip file that the data was unpacked from
remove(data_path)
return destination
@staticmethod
def _prepare_targets(upsampled_targets, targets_stride):
original_targets = np.full_like(upsampled_targets, np.nan)
original_targets[::targets_stride] = upsampled_targets[::targets_stride]
return original_targets
def _load_data_to_mne(self, file_path):
data = loadmat(file_path)
test_labels = loadmat(file_path.replace('comp.mat', 'testlabels.mat'))
train_data = data['train_data']
test_data = data['test_data']
upsampled_train_targets = data['train_dg']
upsampled_test_targets = test_labels['test_dg']
signal_sfreq = 1000
original_target_sfreq = 25
targets_stride = int(signal_sfreq / original_target_sfreq)
original_targets = self._prepare_targets(upsampled_train_targets, targets_stride)
original_test_targets = self._prepare_targets(upsampled_test_targets, targets_stride)
ch_names = [f'{i}' for i in range(train_data.shape[1])]
ch_names += [f'target_{i}' for i in range(original_targets.shape[1])]
ch_types = ['ecog' for _ in range(train_data.shape[1])]
ch_types += ['misc' for _ in range(original_targets.shape[1])]
info = mne.create_info(sfreq=signal_sfreq, ch_names=ch_names, ch_types=ch_types)
info['temp'] = dict(target_sfreq=original_target_sfreq)
train_data = np.concatenate([train_data, original_targets], axis=1)
test_data = np.concatenate([test_data, original_test_targets], axis=1)
raw_train = mne.io.RawArray(train_data.T, info=info)
raw_test = mne.io.RawArray(test_data.T, info=info)
# TODO: show how to resample targets
return raw_train, raw_test
def _validate_subjects(self, subject_ids):
if isinstance(subject_ids, (list, tuple)):
if not all((subject in self.possible_subjects for subject in subject_ids)):
raise ValueError(
f'Wrong subject_ids parameter. Possible values: {self.possible_subjects}. '
f'Provided {subject_ids}.'
)
else:
raise ValueError(
'Wrong subject_ids format. Expected types: None, list, tuple, int.'
)
@verbose
def _data_dl(url, destination, force_update=False, verbose=None):
# Code taken from moabb due to problem with ':' occurring in path
# On Windows ':' is a forbidden in folder name
# moabb/datasets/download.py
from pooch import file_hash, retrieve # keep soft depenency
if not osp.isfile(destination) or force_update:
if osp.isfile(destination):
os.remove(destination)
if not osp.isdir(osp.dirname(destination)):
os.makedirs(osp.dirname(destination))
known_hash = None
else:
known_hash = file_hash(destination)
data_path = retrieve(
url, known_hash, fname=osp.basename(url), path=osp.dirname(destination)
)
return data_path | PypiClean |
/OASYS1-XOPPY-1.2.10.tar.gz/OASYS1-XOPPY-1.2.10/orangecontrib/xoppy/widgets/optics/xfh.py | import sys
import numpy
from PyQt5.QtWidgets import QApplication
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui, congruence
from oasys.widgets.exchange import DataExchangeObject
from xoppylib.crystals.tools import bragg_calc, bragg_calc2, crystal_fh
from orangecontrib.xoppy.widgets.gui.ow_xoppy_widget import XoppyWidget
import xraylib
from dabax.dabax_xraylib import DabaxXraylib
class OWxfh(XoppyWidget):
name = "Fh"
id = "orange.widgets.dataxfh"
description = "Crystal Structure Factors"
icon = "icons/xoppy_xfh.png"
priority = 17
category = ""
keywords = ["xoppy", "xfh"]
ILATTICE = Setting(32)
HMILLER = Setting(1)
KMILLER = Setting(1)
LMILLER = Setting(1)
plot_variable = Setting(0)
I_PLOT = Setting(2)
TEMPER = Setting(1.0)
ENERGY = Setting(8000.0)
ENERGY_END = Setting(18000.0)
NPOINTS = Setting(20)
DUMP_TO_FILE = Setting(0) # No
FILE_NAME = Setting("Fh.dat")
# new crystals #todo: add to menus?
material_constants_library_flag = Setting(2) # 0=xraylib, 1=dabax, 2=xraylib completed by dabax
dx = None # DABAX object
def __init__(self):
super().__init__(show_script_tab=True)
def build_gui(self):
box = oasysgui.widgetBox(self.controlArea, self.name + " Input Parameters", orientation="vertical", width=self.CONTROL_AREA_WIDTH-5)
idx = -1
#widget index 3
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "ILATTICE",
label=self.unitLabels()[idx], addSpace=False,
items=self.get_crystal_list(),
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 4
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "HMILLER",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 5
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "KMILLER",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 6
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "LMILLER",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 7
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "plot_variable",
label=self.unitLabels()[idx], addSpace=False,
items=self.plotOptionList()[2:],
valueType=int, orientation="horizontal", labelWidth=150)
self.show_at(self.unitFlags()[idx], box1)
#widget index 8
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "TEMPER",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 9
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "ENERGY",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 10
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "ENERGY_END",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 11
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "NPOINTS",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
# widget index 12
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "DUMP_TO_FILE",
label=self.unitLabels()[idx], addSpace=True,
items=["No", "Yes"],
orientation="horizontal")
self.show_at(self.unitFlags()[idx], box1)
# widget index 13
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "FILE_NAME",
label=self.unitLabels()[idx], addSpace=True)
self.show_at(self.unitFlags()[idx], box1)
gui.rubber(self.controlArea)
def unitLabels(self):
return ['Crystal:','h miller index','k miller index','l miller index',
'Plot:','Temperature factor [see help]:',
'From Energy [eV]','To energy [eV]','Number of points',
'Dump to file','File name']
def unitFlags(self):
return ['True','True','True','True','True','True','True','True','True',
"True","self.DUMP_TO_FILE == 1"]
def plotOptionList(self):
return ["Photon energy [eV]",
"Wavelength [A]",
"Bragg angle [deg]",
"Re(f_0)",
"Im(f_0) ",
"Re(FH)",
"Im(FH)",
"Re(FH_BAR)",
"Im(FH_BAR)",
"Re(psi_0)",
"Im(psi_0) ",
"Re(psi_H)",
"Im(psi_H)",
"Re(psi_BAR)",
"Im(psi_BAR)",
"Re(F(h,k,l))",
"Im(F(h,k,l))",
"delta (1-Re(refrac))",
"Re(refrac index)",
"Im(refrac index)",
"absorption coeff",
"s-pol Darwin half-width [microrad]",
"p-pol Darwin half-width [microrad]",
"Sin(Bragg angle)/Lambda",
"psi_over_f"]
def get_help_name(self):
return 'fh'
def check_fields(self):
self.HMILLER = congruence.checkNumber(self.HMILLER, "h miller index")
self.KMILLER = congruence.checkNumber(self.KMILLER, "k miller index")
self.LMILLER = congruence.checkNumber(self.LMILLER, "l miller index")
self.TEMPER = congruence.checkNumber(self.TEMPER, "Temperature factor")
self.ENERGY = congruence.checkPositiveNumber(self.ENERGY, "Energy from")
self.ENERGY_END = congruence.checkStrictlyPositiveNumber(self.ENERGY_END, "Energy to")
congruence.checkLessThan(self.ENERGY, self.ENERGY_END, "Energy from", "Energy to")
self.NPOINTS = congruence.checkStrictlyPositiveNumber(self.NPOINTS, "Number of Points")
def get_crystal_list(self):
crystal_list_xrl = list(xraylib.Crystal_GetCrystalsList())
if self.material_constants_library_flag == 0:
return crystal_list_xrl
if self.dx is None:
self.dx = DabaxXraylib()
crystal_list_dabax = self.dx.Crystal_GetCrystalsList()
if self.material_constants_library_flag == 1:
return crystal_list_dabax
crystal_list_combined = crystal_list_xrl
for crystal in crystal_list_dabax:
if crystal not in crystal_list_combined:
crystal_list_combined.append(crystal)
if self.material_constants_library_flag == 2:
return crystal_list_combined
def do_xoppy_calculation(self):
self.I_PLOT = self.plot_variable + 2
return self.xoppy_calc_xfh()
def extract_data_from_xoppy_output(self, calculation_output):
return calculation_output
def get_data_exchange_widget_name(self):
return "XFH"
def getTitles(self):
return ["Calculation Result"]
def getXTitles(self):
return ["Photon energy [eV]"]
def getYTitles(self):
return [self.plotOptionList()[self.I_PLOT]]
def getVariablesToPlot(self):
return [(0, self.I_PLOT)]
def getLogPlot(self):
return[(False, False)]
def xoppy_calc_xfh(self):
#TODO: remove I_ABSORP
ILATTICE = self.ILATTICE
HMILLER = self.HMILLER
KMILLER = self.KMILLER
LMILLER = self.LMILLER
I_PLOT = self.I_PLOT
TEMPER = self.TEMPER
ENERGY = self.ENERGY
ENERGY_END = self.ENERGY_END
NPOINTS = self.NPOINTS
descriptor = self.get_crystal_list()[self.ILATTICE]
if self.material_constants_library_flag == 0:
material_constants_library = xraylib
elif self.material_constants_library_flag == 1:
material_constants_library = self.dx
elif self.material_constants_library_flag == 2:
if descriptor in xraylib.Crystal_GetCrystalsList():
material_constants_library = xraylib
elif descriptor in self.dx.Crystal_GetCrystalsList():
material_constants_library = self.dx
else:
raise Exception("Descriptor not found in material constants database")
# descriptor = material_constants_library.Crystal_GetCrystalsList()[ILATTICE]
print("Using crystal descriptor: ",descriptor)
bragg_dictionary = bragg_calc2(descriptor=descriptor,
hh=HMILLER,
kk=KMILLER,
ll=LMILLER,
temper=TEMPER,
emin=ENERGY,
emax=ENERGY_END,
estep=50.0,
fileout=None,
material_constants_library=material_constants_library)
energy = numpy.linspace(ENERGY,ENERGY_END,NPOINTS)
out = numpy.zeros((25,NPOINTS))
info = ""
for i,ienergy in enumerate(energy):
dic2 = crystal_fh(bragg_dictionary,ienergy)
print("Energy=%g eV FH=(%g,%g)"%(ienergy,dic2["STRUCT"].real,dic2["STRUCT"].imag))
out[0,i] = ienergy
out[1,i] = dic2["WAVELENGTH"]*1e10
out[2,i] = dic2["THETA"]*180/numpy.pi
out[3,i] = dic2["F_0"].real
out[4,i] = dic2["F_0"].imag
out[5,i] = dic2["FH"].real
out[6,i] = dic2["FH"].imag
out[7,i] = dic2["FH_BAR"].real
out[8,i] = dic2["FH_BAR"].imag
out[9,i] = dic2["psi_0"].real
out[10,i] = dic2["psi_0"].imag
out[11,i] = dic2["psi_h"].real
out[12,i] = dic2["psi_h"].imag
out[13,i] = dic2["psi_hbar"].real
out[14,i] = dic2["psi_hbar"].imag
out[15,i] = dic2["STRUCT"].real
out[16,i] = dic2["STRUCT"].imag
out[17,i] = dic2["DELTA_REF"]
out[18,i] = dic2["REFRAC"].real
out[19,i] = dic2["REFRAC"].imag
out[20,i] = dic2["ABSORP"]
out[21,i] = 1e6 * dic2["ssr"] # in microrads
out[22,i] = 1e6 * dic2["spr"] # in microrads
out[23,i] = dic2["RATIO"]
out[24,i] = dic2["psi_over_f"]
info += "#\n#\n#\n"
info += dic2["info"]
if self.DUMP_TO_FILE:
with open(self.FILE_NAME, "w") as file:
try:
file.write("#F %s\n"%self.FILE_NAME)
file.write("\n#S 1 xoppy CrossSec results\n")
file.write("#N %d\n"%(out.shape[0]))
tmp = "#L"
for item in self.plotOptionList():
tmp += " %s"%(item)
tmp += "\n"
file.write(tmp)
for j in range(out.shape[1]):
file.write(("%19.12e "*out.shape[0]+"\n")%tuple(out[i,j] for i in range(out.shape[0])))
file.close()
print("File written to disk: %s \n"%self.FILE_NAME)
except:
raise Exception("CrossSec: The data could not be dumped onto the specified file!\n")
#
# write python script
#
if isinstance(material_constants_library, DabaxXraylib):
material_constants_library_txt = "DabaxXraylib()"
else:
material_constants_library_txt = "xraylib"
dict_parameters = {
'descriptor': descriptor,
'ILATTICE' : ILATTICE,
'HMILLER' : HMILLER,
'KMILLER' : KMILLER,
'LMILLER' : LMILLER,
'I_PLOT' : self.I_PLOT,
'TEMPER' : TEMPER,
'ENERGY' : ENERGY,
'ENERGY_END' : ENERGY_END,
'NPOINTS' : NPOINTS,
'material_constants_library_txt': material_constants_library_txt,
'xtitle' : self.plotOptionList()[0],
'ytitle' : self.plotOptionList()[self.I_PLOT],
}
self.xoppy_script.set_code(self.script_template().format_map(dict_parameters))
#send exchange
calculated_data = DataExchangeObject("XOPPY", self.get_data_exchange_widget_name())
try:
calculated_data.add_content("xoppy_data", out.T)
calculated_data.add_content("plot_x_col",0)
calculated_data.add_content("plot_y_col", I_PLOT)
except:
pass
try:
calculated_data.add_content("labels",self.plotOptionList())
except:
pass
try:
calculated_data.add_content("info",info)
except:
pass
return calculated_data
def script_template(self):
return """
#
# script to calculate crystal diffraction profiles (created by XOPPY:crystal)
#
import numpy
from xoppylib.crystals.tools import bragg_calc2, crystal_fh
import xraylib
from dabax.dabax_xraylib import DabaxXraylib
#
# run bragg_calc2 (preprocessor)
#
bragg_dictionary = bragg_calc2(
descriptor = "{descriptor}",
hh = {HMILLER},
kk = {KMILLER},
ll = {LMILLER},
temper = {TEMPER},
emin = {ENERGY},
emax = {ENERGY_END},
estep = 50,
ANISO_SEL = 0,
fileout = None,
do_not_prototype = 0, # 0=use site groups (recommended), 1=use all individual sites
verbose = False,
material_constants_library = {material_constants_library_txt},
)
#
# run loop
#
energy = numpy.linspace({ENERGY},{ENERGY_END},{NPOINTS})
out = numpy.zeros((25,{NPOINTS}))
info = ""
for i,ienergy in enumerate(energy):
dic2 = crystal_fh(bragg_dictionary,ienergy)
print("Energy=%g eV FH=(%g,%g)"%(ienergy,dic2["STRUCT"].real,dic2["STRUCT"].imag))
out[0,i] = ienergy
out[1,i] = dic2["WAVELENGTH"]*1e10
out[2,i] = dic2["THETA"]*180/numpy.pi
out[3,i] = dic2["F_0"].real
out[4,i] = dic2["F_0"].imag
out[5,i] = dic2["FH"].real
out[6,i] = dic2["FH"].imag
out[7,i] = dic2["FH_BAR"].real
out[8,i] = dic2["FH_BAR"].imag
out[9,i] = dic2["psi_0"].real
out[10,i] = dic2["psi_0"].imag
out[11,i] = dic2["psi_h"].real
out[12,i] = dic2["psi_h"].imag
out[13,i] = dic2["psi_hbar"].real
out[14,i] = dic2["psi_hbar"].imag
out[15,i] = dic2["STRUCT"].real
out[16,i] = dic2["STRUCT"].imag
out[17,i] = dic2["DELTA_REF"]
out[18,i] = dic2["REFRAC"].real
out[19,i] = dic2["REFRAC"].imag
out[20,i] = dic2["ABSORP"]
out[21,i] = 1e6 * dic2["ssr"] # in microrads
out[22,i] = 1e6 * dic2["spr"] # in microrads
out[23,i] = dic2["RATIO"]
out[24,i] = dic2["psi_over_f"]
info += "#"
info += dic2["info"]
#
# example plot
#
from srxraylib.plot.gol import plot
plot(out[0,:], out[{I_PLOT},:], xtitle="{xtitle}", ytitle="{ytitle}")
#
# end script
#
"""
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWxfh()
w.show()
app.exec()
w.saveSettings() | PypiClean |
/AHRS-0.3.1-py3-none-any.whl/ahrs/filters/oleq.py | import numpy as np
from ..common.mathfuncs import cosd, sind
class OLEQ:
"""
Optimal Linear Estimator of Quaternion
Parameters
----------
acc : numpy.ndarray, default: None
N-by-3 array with measurements of acceleration in in m/s^2
mag : numpy.ndarray, default: None
N-by-3 array with measurements of magnetic field in mT
magnetic_ref : float or numpy.ndarray
Local magnetic reference.
frame : str, default: 'NED'
Local tangent plane coordinate frame. Valid options are right-handed
``'NED'`` for North-East-Down and ``'ENU'`` for East-North-Up.
Raises
------
ValueError
When dimension of input arrays ``acc`` and ``mag`` are not equal.
Examples
--------
>>> acc_data.shape, mag_data.shape # NumPy arrays with sensor data
((1000, 3), (1000, 3))
>>> from ahrs.filters import OLEQ
>>> orientation = OLEQ(acc=acc_data, mag=mag_data)
>>> orientation.Q.shape # Estimated attitude
(1000, 4)
"""
def __init__(self,
acc: np.ndarray = None,
mag: np.ndarray = None,
weights: np.ndarray = None,
magnetic_ref: np.ndarray = None,
frame: str = 'NED'
):
self.acc = acc
self.mag = mag
self.a = weights if weights is not None else np.ones(2)
self.frame = frame
# Reference measurements
self._set_reference_frames(magnetic_ref, self.frame)
if self.acc is not None and self.mag is not None:
self.Q = self._compute_all()
def _set_reference_frames(self, mref: float, frame: str = 'NED') -> None:
if frame.upper() not in ['NED', 'ENU']:
raise ValueError(f"Invalid frame '{frame}'. Try 'NED' or 'ENU'")
# Magnetic Reference Vector
if mref is None:
# Local magnetic reference of Munich, Germany
from ..common.mathfuncs import MUNICH_LATITUDE, MUNICH_LONGITUDE, MUNICH_HEIGHT
from ..utils.wmm import WMM
wmm = WMM(latitude=MUNICH_LATITUDE, longitude=MUNICH_LONGITUDE, height=MUNICH_HEIGHT)
self.m_ref = np.array([wmm.X, wmm.Y, wmm.Z]) if frame.upper() == 'NED' else np.array([wmm.Y, wmm.X, -wmm.Z])
elif isinstance(mref, (int, float)):
cd, sd = cosd(mref), sind(mref)
self.m_ref = np.array([cd, 0.0, sd]) if frame.upper() == 'NED' else np.array([0.0, cd, -sd])
else:
self.m_ref = np.copy(mref)
self.m_ref /= np.linalg.norm(self.m_ref)
# Gravitational Reference Vector
self.a_ref = np.array([0.0, 0.0, -1.0]) if frame.upper() == 'NED' else np.array([0.0, 0.0, 1.0])
def _compute_all(self) -> np.ndarray:
"""Estimate the quaternions given all data.
Attributes ``acc`` and ``mag`` must contain data.
Returns
-------
Q : array
M-by-4 Array with all estimated quaternions, where M is the number
of samples.
"""
if self.acc.shape != self.mag.shape:
raise ValueError("acc and mag are not the same size")
num_samples = len(self.acc)
Q = np.zeros((num_samples, 4))
for t in range(num_samples):
Q[t] = self.estimate(self.acc[t], self.mag[t])
return Q
def WW(self, Db: np.ndarray, Dr: np.ndarray) -> np.ndarray:
"""W Matrix
.. math::
\\mathbf{W} = D_x^r\\mathbf{M}_1 + D_y^r\\mathbf{M}_2 + D_z^r\\mathbf{M}_3
Parameters
----------
Db : numpy.ndarray
Normalized tri-axial observations vector.
Dr : numpy.ndarray
Normalized tri-axial reference vector.
Returns
-------
W_matrix : numpy.ndarray
W Matrix.
"""
bx, by, bz = Db
rx, ry, rz = Dr
M1 = np.array([
[bx, 0.0, bz, -by],
[0.0, bx, by, bz],
[bz, by, -bx, 0.0],
[-by, bz, 0.0, -bx]]) # (eq. 18a)
M2 = np.array([
[by, -bz, 0.0, bx],
[-bz, -by, bx, 0.0],
[0.0, bx, by, bz],
[bx, 0.0, bz, -by]]) # (eq. 18b)
M3 = np.array([
[bz, by, -bx, 0.0],
[by, -bz, 0.0, bx],
[-bx, 0.0, -bz, by],
[0.0, bx, by, bz]]) # (eq. 18c)
return rx*M1 + ry*M2 + rz*M3 # (eq. 20)
def estimate(self, acc: np.ndarray, mag: np.ndarray) -> np.ndarray:
"""Attitude Estimation
Parameters
----------
acc : numpy.ndarray
Sample of tri-axial Accelerometer.
mag : numpy.ndarray
Sample of tri-axial Magnetometer.
Returns
-------
q : numpy.ndarray
Estimated quaternion.
"""
# Normalize measurements (eq. 1)
a_norm = np.linalg.norm(acc)
m_norm = np.linalg.norm(mag)
if not a_norm > 0 or not m_norm > 0: # handle NaN
return None
acc = np.copy(acc)/a_norm
mag = np.copy(mag)/m_norm
sum_aW = self.a[0]*self.WW(acc, self.a_ref) + self.a[1]*self.WW(mag, self.m_ref) # (eq. 31)
R = 0.5*(np.identity(4) + sum_aW) # (eq. 33)
q = np.array([0., 0., 0., 1.]) # "random" quaternion
last_q = np.array([1., 0., 0., 0.])
i = 0
while np.linalg.norm(q-last_q) > 1e-8 and i <= 20:
last_q = q
q = R @ last_q # (eq. 24)
q /= np.linalg.norm(q)
i += 1
return q/np.linalg.norm(q) | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/components/sites/sites/nyaa.py | from urllib.parse import quote
import feedparser
from loguru import logger
from flexget import plugin
from flexget.components.sites.utils import normalize_unicode, torrent_availability
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.tools import parse_filesize
logger = logger.bind(name='nyaa')
CATEGORIES = {
'all': '0_0',
# Anime
'anime': '1_0',
'anime amv': '1_1',
'anime eng': '1_2',
'anime non-eng': '1_3',
'anime raw': '1_4',
# Audio
'audio': '2_0',
'audio lless': '2_1',
'audio lossy': '2_2',
# Literature
'lit': '3_0',
'lit eng': '3_1',
'lit non-eng': '3_2',
'lit raw': '3_3',
# Live Action
'liveact': '4_0',
'liveact eng': '4_1',
'liveact idol': '4_2',
'liveact non-eng': '4_3',
'liveact raw': '4_4',
# Pictures
'pics': '5_0',
'pics graphics': '5_1',
'pics photos': '5_2',
# Software
'software': '6_0',
'software apps': '6_1',
'software games': '6_2',
}
FILTERS = ['all', 'filter remakes', 'trusted only']
class UrlRewriteNyaa:
"""Nyaa urlrewriter and search plugin."""
schema = {
'oneOf': [
{'type': 'string', 'enum': list(CATEGORIES)},
{
'type': 'object',
'properties': {
'category': {'type': 'string', 'enum': list(CATEGORIES)},
'filter': {'type': 'string', 'enum': list(FILTERS)},
},
'additionalProperties': False,
},
]
}
def search(self, task, entry, config):
if not isinstance(config, dict):
config = {'category': config}
config.setdefault('category', 'anime eng')
config.setdefault('filter', 'all')
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
name = normalize_unicode(search_string)
url = 'https://www.nyaa.si/?page=rss&q={}&c={}&f={}'.format(
quote(name.encode('utf-8')),
CATEGORIES[config['category']],
FILTERS.index(config['filter']),
)
logger.debug('requesting: {}', url)
rss = feedparser.parse(url)
status = rss.get('status', False)
if status != 200:
logger.debug('Search result not 200 (OK), received {}', status)
if status >= 400:
continue
ex = rss.get('bozo_exception', False)
if ex:
logger.error('Got bozo_exception (bad feed) on {}', url)
continue
for item in rss.entries:
entry = Entry()
entry['title'] = item.title
entry['url'] = item.link
entry['torrent_seeds'] = int(item.nyaa_seeders)
entry['torrent_leeches'] = int(item.nyaa_leechers)
entry['torrent_info_hash'] = item.nyaa_infohash
entry['torrent_availability'] = torrent_availability(
entry['torrent_seeds'], entry['torrent_leeches']
)
if item.nyaa_size:
entry['content_size'] = parse_filesize(item.nyaa_size)
entries.add(entry)
return entries
def url_rewritable(self, task, entry):
return entry['url'].startswith('https://www.nyaa.si/view/')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('view', 'download') + ".torrent"
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteNyaa, 'nyaa', interfaces=['search', 'urlrewriter'], api_ver=2) | PypiClean |
/Harakiri-0.2.3-py3-none-any.whl/harakiri/client.py | from typing import Any, Dict, Optional
import aiohttp
from harakiri.model import (
SkillData,
AllSkillsData,
GalleryPost,
GalleryList,
)
class Client:
@staticmethod
async def get(path: str, params: Optional[Dict[str, Any]] = None):
url = "http://manjiapi.ombe.xyz" + path
async with aiohttp.ClientSession() as session:
async with session.get(url=url, params=params) as resp:
return await resp.json()
async def skill_data(self, num: int) -> SkillData:
"""Get skill data corresponding to the number.
>>> client.skill_data(0)
{"status":200,"info": ...
Parameters
- `num`(int): skill number (max: 411)
Returns
- Class `SkillData`
"""
return SkillData(**await self.get(f"/skill/{num}"))
async def all_skills_data(self) -> AllSkillsData:
"""Get all skill datas.
>>> client.add_skills_data()
{"status":200,"skill_list":[ ...
Parameters
- Nope
Returns
- Class `AllSkillData`
"""
return AllSkillsData(**await self.get("/skill/all"))
async def gallery_post(self, num: int) -> GalleryPost:
"""Get post info from yoshimitsu gallery.
>>> client.gallery_post(4749)
{"status":200,"content":{"title": ...
Parameters
- `num`(int): yoshimitsu gallery post id
Returns
- Class `GalleryPost`
"""
return GalleryPost(**await self.get(f"/gallery/view/{num}"))
async def gallery_todaytip(self, page: Optional[int] = 1) -> GalleryList:
"""Get post list from yoshimitsu gallery todaytip head.
>>> client.gallery_todaytip(1)
{"status":200,"total":49,"lists":[ ...
Parameters
- `page`(int): todaytip head page
Returns
- Class `GalleryList`
"""
return GalleryList(**await self.get(f"/gallery/tt/lists/{page}"))
async def gallery_search(
self,
keyword: str,
search_mode: Optional[str] = "search_subject_memo",
page: Optional[int] = 1,
):
"""Get search results from the yoshimitsu gallery.
>>> client.gallery_search("ㅇyㅇ", "search_name", 1)
{"status":200,"total":22,"lists":[ ...
Parameters
- `keyword`(str): search keyword
- `search_mode`: search mode
- `search_subject_memo`: search by `title + content`
- `search_subject`: search by `title`
- `search_memo`: search by `content`
- `search_name`: search by `writer`
Returns
- Class `GalleryList`
"""
return GalleryList(
**await self.get(
"/gallery/search",
params={"keyword": keyword, "search_mode": search_mode, "page": page},
)
) | PypiClean |
/MolScribe-1.1.1.tar.gz/MolScribe-1.1.1/molscribe/augment.py | import albumentations as A
from albumentations.augmentations.geometric.functional import safe_rotate_enlarged_img_size, _maybe_process_in_chunks, \
keypoint_rotate
import cv2
import math
import random
import numpy as np
def safe_rotate(
img: np.ndarray,
angle: int = 0,
interpolation: int = cv2.INTER_LINEAR,
value: int = None,
border_mode: int = cv2.BORDER_REFLECT_101,
):
old_rows, old_cols = img.shape[:2]
# getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
image_center = (old_cols / 2, old_rows / 2)
# Rows and columns of the rotated image (not cropped)
new_rows, new_cols = safe_rotate_enlarged_img_size(angle=angle, rows=old_rows, cols=old_cols)
# Rotation Matrix
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
# Shift the image to create padding
rotation_mat[0, 2] += new_cols / 2 - image_center[0]
rotation_mat[1, 2] += new_rows / 2 - image_center[1]
# CV2 Transformation function
warp_affine_fn = _maybe_process_in_chunks(
cv2.warpAffine,
M=rotation_mat,
dsize=(new_cols, new_rows),
flags=interpolation,
borderMode=border_mode,
borderValue=value,
)
# rotate image with the new bounds
rotated_img = warp_affine_fn(img)
return rotated_img
def keypoint_safe_rotate(keypoint, angle, rows, cols):
old_rows = rows
old_cols = cols
# Rows and columns of the rotated image (not cropped)
new_rows, new_cols = safe_rotate_enlarged_img_size(angle=angle, rows=old_rows, cols=old_cols)
col_diff = (new_cols - old_cols) / 2
row_diff = (new_rows - old_rows) / 2
# Shift keypoint
shifted_keypoint = (int(keypoint[0] + col_diff), int(keypoint[1] + row_diff), keypoint[2], keypoint[3])
# Rotate keypoint
rotated_keypoint = keypoint_rotate(shifted_keypoint, angle, rows=new_rows, cols=new_cols)
return rotated_keypoint
class SafeRotate(A.SafeRotate):
def __init__(
self,
limit=90,
interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REFLECT_101,
value=None,
mask_value=None,
always_apply=False,
p=0.5,
):
super(SafeRotate, self).__init__(
limit=limit,
interpolation=interpolation,
border_mode=border_mode,
value=value,
mask_value=mask_value,
always_apply=always_apply,
p=p)
def apply(self, img, angle=0, interpolation=cv2.INTER_LINEAR, **params):
return safe_rotate(
img=img, value=self.value, angle=angle, interpolation=interpolation, border_mode=self.border_mode)
def apply_to_keypoint(self, keypoint, angle=0, **params):
return keypoint_safe_rotate(keypoint, angle=angle, rows=params["rows"], cols=params["cols"])
class CropWhite(A.DualTransform):
def __init__(self, value=(255, 255, 255), pad=0, p=1.0):
super(CropWhite, self).__init__(p=p)
self.value = value
self.pad = pad
assert pad >= 0
def update_params(self, params, **kwargs):
super().update_params(params, **kwargs)
assert "image" in kwargs
img = kwargs["image"]
height, width, _ = img.shape
x = (img != self.value).sum(axis=2)
if x.sum() == 0:
return params
row_sum = x.sum(axis=1)
top = 0
while row_sum[top] == 0 and top+1 < height:
top += 1
bottom = height
while row_sum[bottom-1] == 0 and bottom-1 > top:
bottom -= 1
col_sum = x.sum(axis=0)
left = 0
while col_sum[left] == 0 and left+1 < width:
left += 1
right = width
while col_sum[right-1] == 0 and right-1 > left:
right -= 1
# crop_top = max(0, top - self.pad)
# crop_bottom = max(0, height - bottom - self.pad)
# crop_left = max(0, left - self.pad)
# crop_right = max(0, width - right - self.pad)
# params.update({"crop_top": crop_top, "crop_bottom": crop_bottom,
# "crop_left": crop_left, "crop_right": crop_right})
params.update({"crop_top": top, "crop_bottom": height - bottom,
"crop_left": left, "crop_right": width - right})
return params
def apply(self, img, crop_top=0, crop_bottom=0, crop_left=0, crop_right=0, **params):
height, width, _ = img.shape
img = img[crop_top:height - crop_bottom, crop_left:width - crop_right]
img = A.augmentations.pad_with_params(
img, self.pad, self.pad, self.pad, self.pad, border_mode=cv2.BORDER_CONSTANT, value=self.value)
return img
def apply_to_keypoint(self, keypoint, crop_top=0, crop_bottom=0, crop_left=0, crop_right=0, **params):
x, y, angle, scale = keypoint[:4]
return x - crop_left + self.pad, y - crop_top + self.pad, angle, scale
def get_transform_init_args_names(self):
return ('value', 'pad')
class PadWhite(A.DualTransform):
def __init__(self, pad_ratio=0.2, p=0.5, value=(255, 255, 255)):
super(PadWhite, self).__init__(p=p)
self.pad_ratio = pad_ratio
self.value = value
def update_params(self, params, **kwargs):
super().update_params(params, **kwargs)
assert "image" in kwargs
img = kwargs["image"]
height, width, _ = img.shape
side = random.randrange(4)
if side == 0:
params['pad_top'] = int(height * self.pad_ratio * random.random())
elif side == 1:
params['pad_bottom'] = int(height * self.pad_ratio * random.random())
elif side == 2:
params['pad_left'] = int(width * self.pad_ratio * random.random())
elif side == 3:
params['pad_right'] = int(width * self.pad_ratio * random.random())
return params
def apply(self, img, pad_top=0, pad_bottom=0, pad_left=0, pad_right=0, **params):
height, width, _ = img.shape
img = A.augmentations.pad_with_params(
img, pad_top, pad_bottom, pad_left, pad_right, border_mode=cv2.BORDER_CONSTANT, value=self.value)
return img
def apply_to_keypoint(self, keypoint, pad_top=0, pad_bottom=0, pad_left=0, pad_right=0, **params):
x, y, angle, scale = keypoint[:4]
return x + pad_left, y + pad_top, angle, scale
def get_transform_init_args_names(self):
return ('value', 'pad_ratio')
class SaltAndPepperNoise(A.DualTransform):
def __init__(self, num_dots, value=(0, 0, 0), p=0.5):
super().__init__(p)
self.num_dots = num_dots
self.value = value
def apply(self, img, **params):
height, width, _ = img.shape
num_dots = random.randrange(self.num_dots + 1)
for i in range(num_dots):
x = random.randrange(height)
y = random.randrange(width)
img[x, y] = self.value
return img
def apply_to_keypoint(self, keypoint, **params):
return keypoint
def get_transform_init_args_names(self):
return ('value', 'num_dots')
class ResizePad(A.DualTransform):
def __init__(self, height, width, interpolation=cv2.INTER_LINEAR, value=(255, 255, 255)):
super(ResizePad, self).__init__(always_apply=True)
self.height = height
self.width = width
self.interpolation = interpolation
self.value = value
def apply(self, img, interpolation=cv2.INTER_LINEAR, **params):
h, w, _ = img.shape
img = A.augmentations.geometric.functional.resize(
img,
height=min(h, self.height),
width=min(w, self.width),
interpolation=interpolation
)
h, w, _ = img.shape
pad_top = (self.height - h) // 2
pad_bottom = (self.height - h) - pad_top
pad_left = (self.width - w) // 2
pad_right = (self.width - w) - pad_left
img = A.augmentations.pad_with_params(
img,
pad_top,
pad_bottom,
pad_left,
pad_right,
border_mode=cv2.BORDER_CONSTANT,
value=self.value,
)
return img
def normalized_grid_distortion(
img,
num_steps=10,
xsteps=(),
ysteps=(),
*args,
**kwargs
):
height, width = img.shape[:2]
# compensate for smaller last steps in source image.
x_step = width // num_steps
last_x_step = min(width, ((num_steps + 1) * x_step)) - (num_steps * x_step)
xsteps[-1] *= last_x_step / x_step
y_step = height // num_steps
last_y_step = min(height, ((num_steps + 1) * y_step)) - (num_steps * y_step)
ysteps[-1] *= last_y_step / y_step
# now normalize such that distortion never leaves image bounds.
tx = width / math.floor(width / num_steps)
ty = height / math.floor(height / num_steps)
xsteps = np.array(xsteps) * (tx / np.sum(xsteps))
ysteps = np.array(ysteps) * (ty / np.sum(ysteps))
# do actual distortion.
return A.augmentations.functional.grid_distortion(img, num_steps, xsteps, ysteps, *args, **kwargs)
class NormalizedGridDistortion(A.augmentations.transforms.GridDistortion):
def apply(self, img, stepsx=(), stepsy=(), interpolation=cv2.INTER_LINEAR, **params):
return normalized_grid_distortion(img, self.num_steps, stepsx, stepsy, interpolation, self.border_mode,
self.value)
def apply_to_mask(self, img, stepsx=(), stepsy=(), **params):
return normalized_grid_distortion(
img, self.num_steps, stepsx, stepsy, cv2.INTER_NEAREST, self.border_mode, self.mask_value) | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/angular-sanitize.min.js | (function(n,h,p){'use strict';function E(a){var d=[];s(d,h.noop).chars(a);return d.join("")}function g(a){var d={};a=a.split(",");var c;for(c=0;c<a.length;c++)d[a[c]]=!0;return d}function F(a,d){function c(a,b,c,l){b=h.lowercase(b);if(t[b])for(;f.last()&&u[f.last()];)e("",f.last());v[b]&&f.last()==b&&e("",b);(l=w[b]||!!l)||f.push(b);var m={};c.replace(G,function(a,b,d,c,e){m[b]=r(d||c||e||"")});d.start&&d.start(b,m,l)}function e(a,b){var c=0,e;if(b=h.lowercase(b))for(c=f.length-1;0<=c&&f[c]!=b;c--);
if(0<=c){for(e=f.length-1;e>=c;e--)d.end&&d.end(f[e]);f.length=c}}"string"!==typeof a&&(a=null===a||"undefined"===typeof a?"":""+a);var b,k,f=[],m=a,l;for(f.last=function(){return f[f.length-1]};a;){l="";k=!0;if(f.last()&&x[f.last()])a=a.replace(new RegExp("(.*)<\\s*\\/\\s*"+f.last()+"[^>]*>","i"),function(a,b){b=b.replace(H,"$1").replace(I,"$1");d.chars&&d.chars(r(b));return""}),e("",f.last());else{if(0===a.indexOf("\x3c!--"))b=a.indexOf("--",4),0<=b&&a.lastIndexOf("--\x3e",b)===b&&(d.comment&&d.comment(a.substring(4,
b)),a=a.substring(b+3),k=!1);else if(y.test(a)){if(b=a.match(y))a=a.replace(b[0],""),k=!1}else if(J.test(a)){if(b=a.match(z))a=a.substring(b[0].length),b[0].replace(z,e),k=!1}else K.test(a)&&((b=a.match(A))?(b[4]&&(a=a.substring(b[0].length),b[0].replace(A,c)),k=!1):(l+="<",a=a.substring(1)));k&&(b=a.indexOf("<"),l+=0>b?a:a.substring(0,b),a=0>b?"":a.substring(b),d.chars&&d.chars(r(l)))}if(a==m)throw L("badparse",a);m=a}e()}function r(a){if(!a)return"";var d=M.exec(a);a=d[1];var c=d[3];if(d=d[2])q.innerHTML=
d.replace(/</g,"<"),d="textContent"in q?q.textContent:q.innerText;return a+d+c}function B(a){return a.replace(/&/g,"&").replace(N,function(a){var c=a.charCodeAt(0);a=a.charCodeAt(1);return"&#"+(1024*(c-55296)+(a-56320)+65536)+";"}).replace(O,function(a){return"&#"+a.charCodeAt(0)+";"}).replace(/</g,"<").replace(/>/g,">")}function s(a,d){var c=!1,e=h.bind(a,a.push);return{start:function(a,k,f){a=h.lowercase(a);!c&&x[a]&&(c=a);c||!0!==C[a]||(e("<"),e(a),h.forEach(k,function(c,f){var k=
h.lowercase(f),g="img"===a&&"src"===k||"background"===k;!0!==P[k]||!0===D[k]&&!d(c,g)||(e(" "),e(f),e('="'),e(B(c)),e('"'))}),e(f?"/>":">"))},end:function(a){a=h.lowercase(a);c||!0!==C[a]||(e("</"),e(a),e(">"));a==c&&(c=!1)},chars:function(a){c||e(B(a))}}}var L=h.$$minErr("$sanitize"),A=/^<((?:[a-zA-Z])[\w:-]*)((?:\s+[\w:-]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)\s*(>?)/,z=/^<\/\s*([\w:-]+)[^>]*>/,G=/([\w:-]+)(?:\s*=\s*(?:(?:"((?:[^"])*)")|(?:'((?:[^'])*)')|([^>\s]+)))?/g,K=/^</,
J=/^<\//,H=/\x3c!--(.*?)--\x3e/g,y=/<!DOCTYPE([^>]*?)>/i,I=/<!\[CDATA\[(.*?)]]\x3e/g,N=/[\uD800-\uDBFF][\uDC00-\uDFFF]/g,O=/([^\#-~| |!])/g,w=g("area,br,col,hr,img,wbr");n=g("colgroup,dd,dt,li,p,tbody,td,tfoot,th,thead,tr");p=g("rp,rt");var v=h.extend({},p,n),t=h.extend({},n,g("address,article,aside,blockquote,caption,center,del,dir,div,dl,figure,figcaption,footer,h1,h2,h3,h4,h5,h6,header,hgroup,hr,ins,map,menu,nav,ol,pre,script,section,table,ul")),u=h.extend({},p,g("a,abbr,acronym,b,bdi,bdo,big,br,cite,code,del,dfn,em,font,i,img,ins,kbd,label,map,mark,q,ruby,rp,rt,s,samp,small,span,strike,strong,sub,sup,time,tt,u,var"));
n=g("animate,animateColor,animateMotion,animateTransform,circle,defs,desc,ellipse,font-face,font-face-name,font-face-src,g,glyph,hkern,image,linearGradient,line,marker,metadata,missing-glyph,mpath,path,polygon,polyline,radialGradient,rect,set,stop,svg,switch,text,title,tspan,use");var x=g("script,style"),C=h.extend({},w,t,u,v,n),D=g("background,cite,href,longdesc,src,usemap,xlink:href");n=g("abbr,align,alt,axis,bgcolor,border,cellpadding,cellspacing,class,clear,color,cols,colspan,compact,coords,dir,face,headers,height,hreflang,hspace,ismap,lang,language,nohref,nowrap,rel,rev,rows,rowspan,rules,scope,scrolling,shape,size,span,start,summary,target,title,type,valign,value,vspace,width");
p=g("accent-height,accumulate,additive,alphabetic,arabic-form,ascent,attributeName,attributeType,baseProfile,bbox,begin,by,calcMode,cap-height,class,color,color-rendering,content,cx,cy,d,dx,dy,descent,display,dur,end,fill,fill-rule,font-family,font-size,font-stretch,font-style,font-variant,font-weight,from,fx,fy,g1,g2,glyph-name,gradientUnits,hanging,height,horiz-adv-x,horiz-origin-x,ideographic,k,keyPoints,keySplines,keyTimes,lang,marker-end,marker-mid,marker-start,markerHeight,markerUnits,markerWidth,mathematical,max,min,offset,opacity,orient,origin,overline-position,overline-thickness,panose-1,path,pathLength,points,preserveAspectRatio,r,refX,refY,repeatCount,repeatDur,requiredExtensions,requiredFeatures,restart,rotate,rx,ry,slope,stemh,stemv,stop-color,stop-opacity,strikethrough-position,strikethrough-thickness,stroke,stroke-dasharray,stroke-dashoffset,stroke-linecap,stroke-linejoin,stroke-miterlimit,stroke-opacity,stroke-width,systemLanguage,target,text-anchor,to,transform,type,u1,u2,underline-position,underline-thickness,unicode,unicode-range,units-per-em,values,version,viewBox,visibility,width,widths,x,x-height,x1,x2,xlink:actuate,xlink:arcrole,xlink:role,xlink:show,xlink:title,xlink:type,xml:base,xml:lang,xml:space,xmlns,xmlns:xlink,y,y1,y2,zoomAndPan");
var P=h.extend({},D,p,n),q=document.createElement("pre"),M=/^(\s*)([\s\S]*?)(\s*)$/;h.module("ngSanitize",[]).provider("$sanitize",function(){this.$get=["$$sanitizeUri",function(a){return function(d){var c=[];F(d,s(c,function(c,b){return!/^unsafe/.test(a(c,b))}));return c.join("")}}]});h.module("ngSanitize").filter("linky",["$sanitize",function(a){var d=/((ftp|https?):\/\/|(mailto:)?[A-Za-z0-9._%+-]+@)\S*[^\s.;,(){}<>"]/,c=/^mailto:/;return function(e,b){function k(a){a&&g.push(E(a))}function f(a,
c){g.push("<a ");h.isDefined(b)&&g.push('target="',b,'" ');g.push('href="',a.replace('"',"""),'">');k(c);g.push("</a>")}if(!e)return e;for(var m,l=e,g=[],n,p;m=l.match(d);)n=m[0],m[2]==m[3]&&(n="mailto:"+n),p=m.index,k(l.substr(0,p)),f(n,m[0].replace(c,"")),l=l.substring(p+m[0].length);k(l);return a(g.join(""))}}])})(window,window.angular);
//# sourceMappingURL=angular-sanitize.min.js.map | PypiClean |
/Flask-Ink-3.1.10.tar.gz/Flask-Ink-3.1.10/flask_ink/static/js/ink.tooltip.js | Ink.createModule('Ink.UI.Tooltip', '1', ['Ink.UI.Aux_1', 'Ink.Dom.Event_1', 'Ink.Dom.Element_1', 'Ink.Dom.Selector_1', 'Ink.Util.Array_1', 'Ink.Dom.Css_1', 'Ink.Dom.Browser_1'], function (Aux, InkEvent, InkElement, Selector, InkArray, Css) {
'use strict';
/**
* @class Ink.UI.Tooltip
* @constructor
*
* @param {DOMElement|String} target Target element or selector of elements, to display the tooltips on.
* @param {Object} [options]
* @param [options.text=''] Text content for the tooltip.
* @param [options.where='up'] Positioning for the tooltip. Options:
* @param options.where.up/down/left/right Place above, below, to the left of, or to the right of, the target. Show an arrow.
* @param options.where.mousemove Place the tooltip to the bottom and to the right of the mouse when it hovers the element, and follow the mouse as it moves.
* @param options.where.mousefix Place the tooltip to the bottom and to the right of the mouse when it hovers the element, keep the tooltip there motionless.
*
* @param [options.color=''] Color of the tooltip. Options are red, orange, blue, green and black. Default is white.
* @param [options.fade=0.3] Fade time; Duration of the fade in/out effect.
* @param [options.forever=0] Set to 1/true to prevent the tooltip from being erased when the mouse hovers away from the target
* @param [options.timeout=0] Time for the tooltip to live. Useful together with [options.forever].
* @param [options.delay] Time the tooltip waits until it is displayed. Useful to avoid getting the attention of the user unnecessarily
* @param [options.template=null] Element or selector containing HTML to be cloned into the tooltips. Can be a hidden element, because CSS `display` is set to `block`.
* @param [options.templatefield=null] Selector within the template element to choose where the text is inserted into the tooltip. Useful when a wrapper DIV is required.
*
* @param [options.left,top=10] (Nitty-gritty) Spacing from the target to the tooltip, when `where` is `mousemove` or `mousefix`
* @param [options.spacing=8] (Nitty-gritty) Spacing between the tooltip and the target element, when `where` is `up`, `down`, `left`, or `right`
*
* @example
* <ul class="buttons">
* <li class="button" data-tip-text="Create a new document">New</li>
* <li class="button" data-tip-text="Exit the program">Quit</li>
* <li class="button" data-tip-text="Save the document you are working on">Save</li>
* </ul>
*
* [...]
*
* <script>
* Ink.requireModules(['Ink.UI.Tooltip_1'], function (Tooltip) {
* new Tooltip('.button', {where: 'mousefix'});
* });
* </script>
*/
function Tooltip(element, options) {
this._init(element, options || {});
}
function EachTooltip(root, elm) {
this._init(root, elm);
}
var transitionDurationName,
transitionPropertyName,
transitionTimingFunctionName;
(function () { // Feature detection
var test = document.createElement('DIV');
var names = ['transition', 'oTransition', 'msTransition', 'mozTransition',
'webkitTransition'];
for (var i = 0; i < names.length; i++) {
if (typeof test.style[names[i] + 'Duration'] !== 'undefined') {
transitionDurationName = names[i] + 'Duration';
transitionPropertyName = names[i] + 'Property';
transitionTimingFunctionName = names[i] + 'TimingFunction';
break;
}
}
}());
// Body or documentElement
var bodies = document.getElementsByTagName('body');
var body = bodies && bodies.length ? bodies[0] : document.documentElement;
Tooltip.prototype = {
_init: function(element, options) {
var elements;
this.options = Ink.extendObj({
where: 'up',
zIndex: 10000,
left: 10,
top: 10,
spacing: 8,
forever: 0,
color: '',
timeout: 0,
delay: 0,
template: null,
templatefield: null,
fade: 0.3,
text: ''
}, options || {});
if (typeof element === 'string') {
elements = Selector.select(element);
} else if (typeof element === 'object') {
elements = [element];
} else {
throw 'Element expected';
}
this.tooltips = [];
for (var i = 0, len = elements.length; i < len; i++) {
this.tooltips[i] = new EachTooltip(this, elements[i]);
}
},
/**
* Destroys the tooltips created by this instance
*
* @method destroy
*/
destroy: function () {
InkArray.each(this.tooltips, function (tooltip) {
tooltip._destroy();
});
this.tooltips = null;
this.options = null;
}
};
EachTooltip.prototype = {
_oppositeDirections: {
left: 'right',
right: 'left',
up: 'down',
down: 'up'
},
_init: function(root, elm) {
InkEvent.observe(elm, 'mouseover', Ink.bindEvent(this._onMouseOver, this));
InkEvent.observe(elm, 'mouseout', Ink.bindEvent(this._onMouseOut, this));
InkEvent.observe(elm, 'mousemove', Ink.bindEvent(this._onMouseMove, this));
this.root = root;
this.element = elm;
this._delayTimeout = null;
this.tooltip = null;
},
_makeTooltip: function (mousePosition) {
if (!this._getOpt('text')) {
return false;
}
var tooltip = this._createTooltipElement();
if (this.tooltip) {
this._removeTooltip();
}
this.tooltip = tooltip;
this._fadeInTooltipElement(tooltip);
this._placeTooltipElement(tooltip, mousePosition);
InkEvent.observe(tooltip, 'mouseover', Ink.bindEvent(this._onTooltipMouseOver, this));
var timeout = this._getFloatOpt('timeout');
if (timeout) {
setTimeout(Ink.bind(function () {
if (this.tooltip === tooltip) {
this._removeTooltip();
}
}, this), timeout * 1000);
}
},
_createTooltipElement: function () {
var template = this._getOpt('template'), // User template instead of our HTML
templatefield = this._getOpt('templatefield'),
tooltip, // The element we float
field; // Element where we write our message. Child or same as the above
if (template) { // The user told us of a template to use. We copy it.
var temp = document.createElement('DIV');
temp.innerHTML = Aux.elOrSelector(template, 'options.template').outerHTML;
tooltip = temp.firstChild;
if (templatefield) {
field = Selector.select(templatefield, tooltip);
if (field) {
field = field[0];
} else {
throw 'options.templatefield must be a valid selector within options.template';
}
} else {
field = tooltip; // Assume same element if user did not specify a field
}
} else { // We create the default structure
tooltip = document.createElement('DIV');
Css.addClassName(tooltip, 'ink-tooltip');
Css.addClassName(tooltip, this._getOpt('color'));
field = document.createElement('DIV');
Css.addClassName(field, 'content');
tooltip.appendChild(field);
}
InkElement.setTextContent(field, this._getOpt('text'));
tooltip.style.display = 'block';
tooltip.style.position = 'absolute';
tooltip.style.zIndex = this._getIntOpt('zIndex');
return tooltip;
},
_fadeInTooltipElement: function (tooltip) {
var fadeTime = this._getFloatOpt('fade');
if (transitionDurationName && fadeTime) {
tooltip.style.opacity = '0';
tooltip.style[transitionDurationName] = fadeTime + 's';
tooltip.style[transitionPropertyName] = 'opacity';
tooltip.style[transitionTimingFunctionName] = 'ease-in-out';
setTimeout(function () {
tooltip.style.opacity = '1';
}, 0);
}
},
_placeTooltipElement: function (tooltip, mousePosition) {
var where = this._getOpt('where');
if (where === 'mousemove' || where === 'mousefix') {
var mPos = mousePosition;
this._setPos(mPos[0], mPos[1]);
body.appendChild(tooltip);
} else if (where.match(/(up|down|left|right)/)) {
body.appendChild(tooltip);
var targetElementPos = InkElement.offset(this.element);
var tleft = targetElementPos[0],
ttop = targetElementPos[1];
if (tleft instanceof Array) { // Work around a bug in Ink.Dom.Element.offsetLeft which made it return the result of offset() instead. TODO remove this check when fix is merged
ttop = tleft[1];
tleft = tleft[0];
}
var centerh = (InkElement.elementWidth(this.element) / 2) - (InkElement.elementWidth(tooltip) / 2),
centerv = (InkElement.elementHeight(this.element) / 2) - (InkElement.elementHeight(tooltip) / 2);
var spacing = this._getIntOpt('spacing');
var tooltipDims = InkElement.elementDimensions(tooltip);
var elementDims = InkElement.elementDimensions(this.element);
var maxX = InkElement.scrollWidth() + InkElement.viewportWidth();
var maxY = InkElement.scrollHeight() + InkElement.viewportHeight();
if (where === 'left' && tleft - tooltipDims[0] < 0) {
where = 'right';
} else if (where === 'right' && tleft + tooltipDims[0] > maxX) {
where = 'left';
} else if (where === 'up' && ttop - tooltipDims[1] < 0) {
where = 'down';
} else if (where === 'down' && ttop + tooltipDims[1] > maxY) {
where = 'up';
}
if (where === 'up') {
ttop -= tooltipDims[1];
ttop -= spacing;
tleft += centerh;
} else if (where === 'down') {
ttop += elementDims[1];
ttop += spacing;
tleft += centerh;
} else if (where === 'left') {
tleft -= tooltipDims[0];
tleft -= spacing;
ttop += centerv;
} else if (where === 'right') {
tleft += elementDims[0];
tleft += spacing;
ttop += centerv;
}
var arrow = null;
if (where.match(/(up|down|left|right)/)) {
arrow = document.createElement('SPAN');
Css.addClassName(arrow, 'arrow');
Css.addClassName(arrow, this._oppositeDirections[where]);
tooltip.appendChild(arrow);
}
var scrl = this._getLocalScroll();
var tooltipLeft = tleft - scrl[0];
var tooltipTop = ttop - scrl[1];
var toBottom = (tooltipTop + tooltipDims[1]) - maxY;
var toRight = (tooltipLeft + tooltipDims[0]) - maxX;
var toLeft = 0 - tooltipLeft;
var toTop = 0 - tooltipTop;
if (toBottom > 0) {
if (arrow) { arrow.style.top = (tooltipDims[1] / 2) + toBottom + 'px'; }
tooltipTop -= toBottom;
} else if (toTop > 0) {
if (arrow) { arrow.style.top = (tooltipDims[1] / 2) - toTop + 'px'; }
tooltipTop += toTop;
} else if (toRight > 0) {
if (arrow) { arrow.style.left = (tooltipDims[0] / 2) + toRight + 'px'; }
tooltipLeft -= toRight;
} else if (toLeft > 0) {
if (arrow) { arrow.style.left = (tooltipDims[0] / 2) - toLeft + 'px'; }
tooltipLeft += toLeft;
}
tooltip.style.left = tooltipLeft + 'px';
tooltip.style.top = tooltipTop + 'px';
}
},
_removeTooltip: function() {
var tooltip = this.tooltip;
if (!tooltip) {return;}
var remove = Ink.bind(InkElement.remove, {}, tooltip);
if (this._getOpt('where') !== 'mousemove' && transitionDurationName) {
tooltip.style.opacity = 0;
// remove() will operate on correct tooltip, although this.tooltip === null then
setTimeout(remove, this._getFloatOpt('fade') * 1000);
} else {
remove();
}
this.tooltip = null;
},
_getOpt: function (option) {
var dataAttrVal = InkElement.data(this.element)[InkElement._camelCase('tip-' + option)];
if (dataAttrVal /* either null or "" may signify the absense of this attribute*/) {
return dataAttrVal;
}
var instanceOption = this.root.options[option];
if (typeof instanceOption !== 'undefined') {
return instanceOption;
}
},
_getIntOpt: function (option) {
return parseInt(this._getOpt(option), 10);
},
_getFloatOpt: function (option) {
return parseFloat(this._getOpt(option), 10);
},
_destroy: function () {
if (this.tooltip) {
InkElement.remove(this.tooltip);
}
this.root = null; // Cyclic reference = memory leaks
this.element = null;
this.tooltip = null;
},
_onMouseOver: function(e) {
// on IE < 10 you can't access the mouse event not even a tick after it fired
var mousePosition = this._getMousePosition(e);
var delay = this._getFloatOpt('delay');
if (delay) {
this._delayTimeout = setTimeout(Ink.bind(function () {
if (!this.tooltip) {
this._makeTooltip(mousePosition);
}
this._delayTimeout = null;
}, this), delay * 1000);
} else {
this._makeTooltip(mousePosition);
}
},
_onMouseMove: function(e) {
if (this._getOpt('where') === 'mousemove' && this.tooltip) {
var mPos = this._getMousePosition(e);
this._setPos(mPos[0], mPos[1]);
}
},
_onMouseOut: function () {
if (!this._getIntOpt('forever')) {
this._removeTooltip();
}
if (this._delayTimeout) {
clearTimeout(this._delayTimeout);
this._delayTimeout = null;
}
},
_onTooltipMouseOver: function () {
if (this.tooltip) { // If tooltip is already being removed, this has no effect
this._removeTooltip();
}
},
_setPos: function(left, top) {
left += this._getIntOpt('left');
top += this._getIntOpt('top');
var pageDims = this._getPageXY();
if (this.tooltip) {
var elmDims = [InkElement.elementWidth(this.tooltip), InkElement.elementHeight(this.tooltip)];
var scrollDim = this._getScroll();
if((elmDims[0] + left - scrollDim[0]) >= (pageDims[0] - 20)) {
left = (left - elmDims[0] - this._getIntOpt('left') - 10);
}
if((elmDims[1] + top - scrollDim[1]) >= (pageDims[1] - 20)) {
top = (top - elmDims[1] - this._getIntOpt('top') - 10);
}
this.tooltip.style.left = left + 'px';
this.tooltip.style.top = top + 'px';
}
},
_getPageXY: function() {
var cWidth = 0;
var cHeight = 0;
if( typeof( window.innerWidth ) === 'number' ) {
cWidth = window.innerWidth;
cHeight = window.innerHeight;
} else if( document.documentElement && ( document.documentElement.clientWidth || document.documentElement.clientHeight ) ) {
cWidth = document.documentElement.clientWidth;
cHeight = document.documentElement.clientHeight;
} else if( document.body && ( document.body.clientWidth || document.body.clientHeight ) ) {
cWidth = document.body.clientWidth;
cHeight = document.body.clientHeight;
}
return [parseInt(cWidth, 10), parseInt(cHeight, 10)];
},
_getScroll: function() {
var dd = document.documentElement, db = document.body;
if (dd && (dd.scrollLeft || dd.scrollTop)) {
return [dd.scrollLeft, dd.scrollTop];
} else if (db) {
return [db.scrollLeft, db.scrollTop];
} else {
return [0, 0];
}
},
_getLocalScroll: function () {
var cumScroll = [0, 0];
var cursor = this.element.parentNode;
var left, top;
while (cursor && cursor !== document.documentElement && cursor !== document.body) {
left = cursor.scrollLeft;
top = cursor.scrollTop;
if (left) {
cumScroll[0] += left;
}
if (top) {
cumScroll[1] += top;
}
cursor = cursor.parentNode;
}
return cumScroll;
},
_getMousePosition: function(e) {
return [parseInt(InkEvent.pointerX(e), 10), parseInt(InkEvent.pointerY(e), 10)];
}
};
return Tooltip;
}); | PypiClean |
/Jinja-1.2.tar.gz/Jinja-1.2/jinja/parser.py | from jinja import nodes
from jinja.datastructure import StateTest
from jinja.exceptions import TemplateSyntaxError
from jinja.utils import set
__all__ = ['Parser']
# general callback functions for the parser
end_of_block = StateTest.expect_token('block_end',
msg='expected end of block tag')
end_of_variable = StateTest.expect_token('variable_end',
msg='expected end of variable')
end_of_comment = StateTest.expect_token('comment_end',
msg='expected end of comment')
# internal tag callbacks
switch_for = StateTest.expect_token('else', 'endfor')
end_of_for = StateTest.expect_token('endfor')
switch_if = StateTest.expect_token('else', 'elif', 'endif')
end_of_if = StateTest.expect_token('endif')
end_of_filter = StateTest.expect_token('endfilter')
end_of_macro = StateTest.expect_token('endmacro')
end_of_call = StateTest.expect_token('endcall')
end_of_block_tag = StateTest.expect_token('endblock')
end_of_trans = StateTest.expect_token('endtrans')
# this ends a tuple
tuple_edge_tokens = set(['rparen', 'block_end', 'variable_end', 'in',
'recursive'])
class Parser(object):
"""
The template parser class.
Transforms sourcecode into an abstract syntax tree.
"""
def __init__(self, environment, source, filename=None):
self.environment = environment
if isinstance(source, str):
source = source.decode(environment.template_charset, 'ignore')
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
self.source = source
self.filename = filename
self.closed = False
#: set for blocks in order to keep them unique
self.blocks = set()
#: mapping of directives that require special treatment
self.directives = {
# "fake" directives that just trigger errors
'raw': self.parse_raw_directive,
'extends': self.parse_extends_directive,
# real directives
'for': self.parse_for_loop,
'if': self.parse_if_condition,
'cycle': self.parse_cycle_directive,
'call': self.parse_call_directive,
'set': self.parse_set_directive,
'filter': self.parse_filter_directive,
'print': self.parse_print_directive,
'macro': self.parse_macro_directive,
'block': self.parse_block_directive,
'include': self.parse_include_directive,
'trans': self.parse_trans_directive
}
#: set of directives that are only available in a certain
#: context.
self.context_directives = set([
'elif', 'else', 'endblock', 'endfilter', 'endfor', 'endif',
'endmacro', 'endraw', 'endtrans', 'pluralize'
])
#: get the `no_variable_block` flag
self.no_variable_block = self.environment.lexer.no_variable_block
self.stream = environment.lexer.tokenize(source, filename)
def parse_raw_directive(self):
"""
Handle fake raw directive. (real raw directives are handled by
the lexer. But if there are arguments to raw or the end tag
is missing the parser tries to resolve this directive. In that
case present the user a useful error message.
"""
if self.stream:
raise TemplateSyntaxError('raw directive does not support '
'any arguments.', self.stream.lineno,
self.filename)
raise TemplateSyntaxError('missing end tag for raw directive.',
self.stream.lineno, self.filename)
def parse_extends_directive(self):
"""
Handle the extends directive used for inheritance.
"""
raise TemplateSyntaxError('mispositioned extends tag. extends must '
'be the first tag of a template.',
self.stream.lineno, self.filename)
def parse_for_loop(self):
"""
Handle a for directive and return a ForLoop node
"""
token = self.stream.expect('for')
item = self.parse_tuple_expression(simplified=True)
if not item.allows_assignments():
raise TemplateSyntaxError('cannot assign to expression',
token.lineno, self.filename)
self.stream.expect('in')
seq = self.parse_tuple_expression()
if self.stream.current.type == 'recursive':
self.stream.next()
recursive = True
else:
recursive = False
self.stream.expect('block_end')
body = self.subparse(switch_for)
# do we have an else section?
if self.stream.current.type == 'else':
self.stream.next()
self.stream.expect('block_end')
else_ = self.subparse(end_of_for, True)
else:
self.stream.next()
else_ = None
self.stream.expect('block_end')
return nodes.ForLoop(item, seq, body, else_, recursive,
token.lineno, self.filename)
def parse_if_condition(self):
"""
Handle if/else blocks.
"""
token = self.stream.expect('if')
expr = self.parse_expression()
self.stream.expect('block_end')
tests = [(expr, self.subparse(switch_if))]
else_ = None
# do we have an else section?
while True:
if self.stream.current.type == 'else':
self.stream.next()
self.stream.expect('block_end')
else_ = self.subparse(end_of_if, True)
elif self.stream.current.type == 'elif':
self.stream.next()
expr = self.parse_expression()
self.stream.expect('block_end')
tests.append((expr, self.subparse(switch_if)))
continue
else:
self.stream.next()
break
self.stream.expect('block_end')
return nodes.IfCondition(tests, else_, token.lineno, self.filename)
def parse_cycle_directive(self):
"""
Handle {% cycle foo, bar, baz %}.
"""
token = self.stream.expect('cycle')
expr = self.parse_tuple_expression()
self.stream.expect('block_end')
return nodes.Cycle(expr, token.lineno, self.filename)
def parse_set_directive(self):
"""
Handle {% set foo = 'value of foo' %}.
"""
token = self.stream.expect('set')
name = self.stream.expect('name')
self.test_name(name.value)
self.stream.expect('assign')
value = self.parse_expression()
if self.stream.current.type == 'bang':
self.stream.next()
scope_local = False
else:
scope_local = True
self.stream.expect('block_end')
return nodes.Set(name.value, value, scope_local,
token.lineno, self.filename)
def parse_filter_directive(self):
"""
Handle {% filter foo|bar %} directives.
"""
token = self.stream.expect('filter')
filters = []
while self.stream.current.type != 'block_end':
if filters:
self.stream.expect('pipe')
token = self.stream.expect('name')
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
filters.append((token.value, args))
self.stream.expect('block_end')
body = self.subparse(end_of_filter, True)
self.stream.expect('block_end')
return nodes.Filter(body, filters, token.lineno, self.filename)
def parse_print_directive(self):
"""
Handle {% print foo %}.
"""
token = self.stream.expect('print')
expr = self.parse_tuple_expression()
node = nodes.Print(expr, token.lineno, self.filename)
self.stream.expect('block_end')
return node
def parse_macro_directive(self):
"""
Handle {% macro foo bar, baz %} as well as
{% macro foo(bar, baz) %}.
"""
token = self.stream.expect('macro')
macro_name = self.stream.expect('name')
self.test_name(macro_name.value)
if self.stream.current.type == 'lparen':
self.stream.next()
needle_token = 'rparen'
else:
needle_token = 'block_end'
args = []
while self.stream.current.type != needle_token:
if args:
self.stream.expect('comma')
name = self.stream.expect('name').value
self.test_name(name)
if self.stream.current.type == 'assign':
self.stream.next()
default = self.parse_expression()
else:
default = None
args.append((name, default))
self.stream.next()
if needle_token == 'rparen':
self.stream.expect('block_end')
body = self.subparse(end_of_macro, True)
self.stream.expect('block_end')
return nodes.Macro(macro_name.value, args, body, token.lineno,
self.filename)
def parse_call_directive(self):
"""
Handle {% call foo() %}...{% endcall %}
"""
token = self.stream.expect('call')
expr = self.parse_call_expression()
self.stream.expect('block_end')
body = self.subparse(end_of_call, True)
self.stream.expect('block_end')
return nodes.Call(expr, body, token.lineno, self.filename)
def parse_block_directive(self):
"""
Handle block directives used for inheritance.
"""
token = self.stream.expect('block')
name = self.stream.expect('name').value
# check if this block does not exist by now.
if name in self.blocks:
raise TemplateSyntaxError('block %r defined twice' %
name, token.lineno,
self.filename)
self.blocks.add(name)
if self.stream.current.type != 'block_end':
lineno = self.stream.lineno
expr = self.parse_tuple_expression()
node = nodes.Print(expr, lineno, self.filename)
body = nodes.NodeList([node], lineno, self.filename)
self.stream.expect('block_end')
else:
# otherwise parse the body and attach it to the block
self.stream.expect('block_end')
body = self.subparse(end_of_block_tag, True)
self.stream.expect('block_end')
return nodes.Block(name, body, token.lineno, self.filename)
def parse_include_directive(self):
"""
Handle the include directive used for template inclusion.
"""
token = self.stream.expect('include')
template = self.stream.expect('string').value
self.stream.expect('block_end')
return nodes.Include(template, token.lineno, self.filename)
def parse_trans_directive(self):
"""
Handle translatable sections.
"""
trans_token = self.stream.expect('trans')
# string based translations {% trans "foo" %}
if self.stream.current.type == 'string':
text = self.stream.expect('string')
self.stream.expect('block_end')
return nodes.Trans(text.value, None, None, None,
trans_token.lineno, self.filename)
# block based translations
replacements = {}
plural_var = None
while self.stream.current.type != 'block_end':
if replacements:
self.stream.expect('comma')
name = self.stream.expect('name')
if self.stream.current.type == 'assign':
self.stream.next()
value = self.parse_expression()
else:
value = nodes.NameExpression(name.value, name.lineno,
self.filename)
if name.value in replacements:
raise TemplateSyntaxError('translation variable %r '
'is defined twice' % name.value,
name.lineno, self.filename)
replacements[name.value] = value
if plural_var is None:
plural_var = name.value
self.stream.expect('block_end')
def process_variable():
var_name = self.stream.expect('name')
if var_name.value not in replacements:
raise TemplateSyntaxError('unregistered translation variable'
" '%s'." % var_name.value,
var_name.lineno, self.filename)
buf.append('%%(%s)s' % var_name.value)
buf = singular = []
plural = None
while True:
token = self.stream.current
if token.type == 'data':
buf.append(token.value.replace('%', '%%'))
self.stream.next()
elif token.type == 'variable_begin':
self.stream.next()
process_variable()
self.stream.expect('variable_end')
elif token.type == 'block_begin':
self.stream.next()
if plural is None and self.stream.current.type == 'pluralize':
self.stream.next()
if self.stream.current.type == 'name':
plural_var = self.stream.expect('name').value
plural = buf = []
elif self.stream.current.type == 'endtrans':
self.stream.next()
self.stream.expect('block_end')
break
else:
if self.no_variable_block:
process_variable()
else:
raise TemplateSyntaxError('blocks are not allowed '
'in trans tags',
self.stream.lineno,
self.filename)
self.stream.expect('block_end')
else:
assert False, 'something very strange happened'
singular = u''.join(singular)
if plural is not None:
plural = u''.join(plural)
return nodes.Trans(singular, plural, plural_var, replacements,
trans_token.lineno, self.filename)
def parse_expression(self):
"""
Parse one expression from the stream.
"""
return self.parse_conditional_expression()
def parse_subscribed_expression(self):
"""
Like parse_expression but parses slices too. Because this
parsing function requires a border the two tokens rbracket
and comma mark the end of the expression in some situations.
"""
lineno = self.stream.lineno
if self.stream.current.type == 'colon':
self.stream.next()
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
self.stream.next()
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
self.stream.next()
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.SliceExpression(*(args + [lineno, self.filename]))
def parse_conditional_expression(self):
"""
Parse a conditional expression (foo if bar else baz)
"""
lineno = self.stream.lineno
expr1 = self.parse_or_expression()
while self.stream.current.type == 'if':
self.stream.next()
expr2 = self.parse_or_expression()
self.stream.expect('else')
expr3 = self.parse_conditional_expression()
expr1 = nodes.ConditionalExpression(expr2, expr1, expr3,
lineno, self.filename)
lineno = self.stream.lineno
return expr1
def parse_or_expression(self):
"""
Parse something like {{ foo or bar }}.
"""
lineno = self.stream.lineno
left = self.parse_and_expression()
while self.stream.current.type == 'or':
self.stream.next()
right = self.parse_and_expression()
left = nodes.OrExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_and_expression(self):
"""
Parse something like {{ foo and bar }}.
"""
lineno = self.stream.lineno
left = self.parse_compare_expression()
while self.stream.current.type == 'and':
self.stream.next()
right = self.parse_compare_expression()
left = nodes.AndExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_compare_expression(self):
"""
Parse something like {{ foo == bar }}.
"""
known_operators = set(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq', 'in'])
lineno = self.stream.lineno
expr = self.parse_add_expression()
ops = []
while True:
if self.stream.current.type in known_operators:
op = self.stream.current.type
self.stream.next()
ops.append([op, self.parse_add_expression()])
elif self.stream.current.type == 'not' and \
self.stream.look().type == 'in':
self.stream.skip(2)
ops.append(['not in', self.parse_add_expression()])
else:
break
if not ops:
return expr
return nodes.CompareExpression(expr, ops, lineno, self.filename)
def parse_add_expression(self):
"""
Parse something like {{ foo + bar }}.
"""
lineno = self.stream.lineno
left = self.parse_sub_expression()
while self.stream.current.type == 'add':
self.stream.next()
right = self.parse_sub_expression()
left = nodes.AddExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_sub_expression(self):
"""
Parse something like {{ foo - bar }}.
"""
lineno = self.stream.lineno
left = self.parse_concat_expression()
while self.stream.current.type == 'sub':
self.stream.next()
right = self.parse_concat_expression()
left = nodes.SubExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_concat_expression(self):
"""
Parse something like {{ foo ~ bar }}.
"""
lineno = self.stream.lineno
args = [self.parse_mul_expression()]
while self.stream.current.type == 'tilde':
self.stream.next()
args.append(self.parse_mul_expression())
if len(args) == 1:
return args[0]
return nodes.ConcatExpression(args, lineno, self.filename)
def parse_mul_expression(self):
"""
Parse something like {{ foo * bar }}.
"""
lineno = self.stream.lineno
left = self.parse_div_expression()
while self.stream.current.type == 'mul':
self.stream.next()
right = self.parse_div_expression()
left = nodes.MulExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_div_expression(self):
"""
Parse something like {{ foo / bar }}.
"""
lineno = self.stream.lineno
left = self.parse_floor_div_expression()
while self.stream.current.type == 'div':
self.stream.next()
right = self.parse_floor_div_expression()
left = nodes.DivExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_floor_div_expression(self):
"""
Parse something like {{ foo // bar }}.
"""
lineno = self.stream.lineno
left = self.parse_mod_expression()
while self.stream.current.type == 'floordiv':
self.stream.next()
right = self.parse_mod_expression()
left = nodes.FloorDivExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_mod_expression(self):
"""
Parse something like {{ foo % bar }}.
"""
lineno = self.stream.lineno
left = self.parse_pow_expression()
while self.stream.current.type == 'mod':
self.stream.next()
right = self.parse_pow_expression()
left = nodes.ModExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_pow_expression(self):
"""
Parse something like {{ foo ** bar }}.
"""
lineno = self.stream.lineno
left = self.parse_unary_expression()
while self.stream.current.type == 'pow':
self.stream.next()
right = self.parse_unary_expression()
left = nodes.PowExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_unary_expression(self):
"""
Parse all kinds of unary expressions.
"""
if self.stream.current.type == 'not':
return self.parse_not_expression()
elif self.stream.current.type == 'sub':
return self.parse_neg_expression()
elif self.stream.current.type == 'add':
return self.parse_pos_expression()
return self.parse_primary_expression()
def parse_not_expression(self):
"""
Parse something like {{ not foo }}.
"""
token = self.stream.expect('not')
node = self.parse_unary_expression()
return nodes.NotExpression(node, token.lineno, self.filename)
def parse_neg_expression(self):
"""
Parse something like {{ -foo }}.
"""
token = self.stream.expect('sub')
node = self.parse_unary_expression()
return nodes.NegExpression(node, token.lineno, self.filename)
def parse_pos_expression(self):
"""
Parse something like {{ +foo }}.
"""
token = self.stream.expect('add')
node = self.parse_unary_expression()
return nodes.PosExpression(node, token.lineno, self.filename)
def parse_primary_expression(self, parse_postfix=True):
"""
Parse a primary expression such as a name or literal.
"""
current = self.stream.current
if current.type == 'name':
if current.value in ('true', 'false'):
node = self.parse_bool_expression()
elif current.value == 'none':
node = self.parse_none_expression()
elif current.value == 'undefined':
node = self.parse_undefined_expression()
elif current.value == '_':
node = self.parse_gettext_call()
else:
node = self.parse_name_expression()
elif current.type in ('integer', 'float'):
node = self.parse_number_expression()
elif current.type == 'string':
node = self.parse_string_expression()
elif current.type == 'regex':
node = self.parse_regex_expression()
elif current.type == 'lparen':
node = self.parse_paren_expression()
elif current.type == 'lbracket':
node = self.parse_list_expression()
elif current.type == 'lbrace':
node = self.parse_dict_expression()
elif current.type == 'at':
node = self.parse_set_expression()
else:
raise TemplateSyntaxError("unexpected token '%s'" %
self.stream.current,
self.stream.current.lineno,
self.filename)
if parse_postfix:
node = self.parse_postfix_expression(node)
return node
def parse_tuple_expression(self, enforce=False, simplified=False):
"""
Parse multiple expressions into a tuple. This can also return
just one expression which is not a tuple. If you want to enforce
a tuple, pass it enforce=True.
"""
lineno = self.stream.lineno
if simplified:
parse = self.parse_primary_expression
else:
parse = self.parse_expression
args = []
is_tuple = False
while True:
if args:
self.stream.expect('comma')
if self.stream.current.type in tuple_edge_tokens:
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
if not is_tuple and args:
if enforce:
raise TemplateSyntaxError('tuple expected', lineno,
self.filename)
return args[0]
return nodes.TupleExpression(args, lineno, self.filename)
def parse_bool_expression(self):
"""
Parse a boolean literal.
"""
token = self.stream.expect('name')
if token.value == 'true':
value = True
elif token.value == 'false':
value = False
else:
raise TemplateSyntaxError("expected boolean literal",
token.lineno, self.filename)
return nodes.ConstantExpression(value, token.lineno, self.filename)
def parse_none_expression(self):
"""
Parse a none literal.
"""
token = self.stream.expect('name', 'none')
return nodes.ConstantExpression(None, token.lineno, self.filename)
def parse_undefined_expression(self):
"""
Parse an undefined literal.
"""
token = self.stream.expect('name', 'undefined')
return nodes.UndefinedExpression(token.lineno, self.filename)
def parse_gettext_call(self):
"""
parse {{ _('foo') }}.
"""
# XXX: check if only one argument was passed and if
# it is a string literal. Maybe that should become a special
# expression anyway.
token = self.stream.expect('name', '_')
node = nodes.NameExpression(token.value, token.lineno, self.filename)
return self.parse_call_expression(node)
def parse_name_expression(self):
"""
Parse any name.
"""
token = self.stream.expect('name')
self.test_name(token.value)
return nodes.NameExpression(token.value, token.lineno, self.filename)
def parse_number_expression(self):
"""
Parse a number literal.
"""
token = self.stream.current
if token.type not in ('integer', 'float'):
raise TemplateSyntaxError('integer or float literal expected',
token.lineno, self.filename)
self.stream.next()
return nodes.ConstantExpression(token.value, token.lineno, self.filename)
def parse_string_expression(self):
"""
Parse a string literal.
"""
token = self.stream.expect('string')
return nodes.ConstantExpression(token.value, token.lineno, self.filename)
def parse_regex_expression(self):
"""
Parse a regex literal.
"""
token = self.stream.expect('regex')
return nodes.RegexExpression(token.value, token.lineno, self.filename)
def parse_paren_expression(self):
"""
Parse a parenthized expression.
"""
self.stream.expect('lparen')
try:
return self.parse_tuple_expression()
finally:
self.stream.expect('rparen')
def parse_list_expression(self):
"""
Parse something like {{ [1, 2, "three"] }}
"""
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.ListExpression(items, token.lineno, self.filename)
def parse_dict_expression(self):
"""
Parse something like {{ {1: 2, 3: 4} }}
"""
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append((key, value))
self.stream.expect('rbrace')
return nodes.DictExpression(items, token.lineno, self.filename)
def parse_set_expression(self):
"""
Parse something like {{ @(1, 2, 3) }}.
"""
token = self.stream.expect('at')
self.stream.expect('lparen')
items = []
while self.stream.current.type != 'rparen':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rparen':
break
items.append(self.parse_expression())
self.stream.expect('rparen')
return nodes.SetExpression(items, token.lineno, self.filename)
def parse_postfix_expression(self, node):
"""
Parse a postfix expression such as a filter statement or a
function call.
"""
while True:
current = self.stream.current.type
if current == 'dot' or current == 'lbracket':
node = self.parse_subscript_expression(node)
elif current == 'lparen':
node = self.parse_call_expression(node)
elif current == 'pipe':
node = self.parse_filter_expression(node)
elif current == 'is':
node = self.parse_test_expression(node)
else:
break
return node
def parse_subscript_expression(self, node):
"""
Parse a subscript statement. Gets attributes and items from an
object.
"""
lineno = self.stream.lineno
if self.stream.current.type == 'dot':
self.stream.next()
token = self.stream.current
if token.type in ('name', 'integer'):
arg = nodes.ConstantExpression(token.value, token.lineno,
self.filename)
else:
raise TemplateSyntaxError('expected name or number',
token.lineno, self.filename)
self.stream.next()
elif self.stream.current.type == 'lbracket':
self.stream.next()
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed_expression())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.TupleExpression(args, lineno, self.filename)
else:
raise TemplateSyntaxError('expected subscript expression',
self.lineno, self.filename)
return nodes.SubscriptExpression(node, arg, lineno, self.filename)
def parse_call_expression(self, node=None):
"""
Parse a call.
"""
if node is None:
node = self.parse_primary_expression(parse_postfix=False)
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = None
dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
raise TemplateSyntaxError('invalid syntax for function '
'call expression', token.lineno,
self.filename)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
self.stream.next()
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
self.stream.next()
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
kwargs.append((key, self.parse_expression()))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
return nodes.CallExpression(node, args, kwargs, dyn_args,
dyn_kwargs, token.lineno,
self.filename)
def parse_filter_expression(self, node):
"""
Parse filter calls.
"""
lineno = self.stream.lineno
filters = []
while self.stream.current.type == 'pipe':
self.stream.next()
token = self.stream.expect('name')
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
filters.append((token.value, args))
return nodes.FilterExpression(node, filters, lineno, self.filename)
def parse_test_expression(self, node):
"""
Parse test calls.
"""
token = self.stream.expect('is')
if self.stream.current.type == 'not':
self.stream.next()
negated = True
else:
negated = False
name = self.stream.expect('name').value
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace', 'regex'):
args.append(self.parse_expression())
node = nodes.TestExpression(node, name, args, token.lineno,
self.filename)
if negated:
node = nodes.NotExpression(node, token.lineno, self.filename)
return node
def test_name(self, name):
"""
Test if a name is not a special constant
"""
if name in ('true', 'false', 'none', 'undefined', '_'):
raise TemplateSyntaxError('expected name not special constant',
self.stream.lineno, self.filename)
def subparse(self, test, drop_needle=False):
"""
Helper function used to parse the sourcecode until the test
function which is passed a tuple in the form (lineno, token, data)
returns True. In that case the current token is pushed back to
the stream and the generator ends.
The test function is only called for the first token after a
block tag. Variable tags are *not* aliases for {% print %} in
that case.
If drop_needle is True the needle_token is removed from the
stream.
"""
if self.closed:
raise RuntimeError('parser is closed')
result = []
buffer = []
next = self.stream.next
lineno = self.stream.lineno
def assemble_list():
push_buffer()
return nodes.NodeList(result, lineno, self.filename)
def push_variable():
buffer.append((True, self.parse_tuple_expression()))
def push_data():
buffer.append((False, self.stream.expect('data')))
def push_buffer():
if not buffer:
return
template = []
variables = []
for is_var, data in buffer:
if is_var:
template.append('%s')
variables.append(data)
else:
template.append(data.value.replace('%', '%%'))
result.append(nodes.Text(u''.join(template), variables,
buffer[0][1].lineno, self.filename))
del buffer[:]
def push_node(node):
push_buffer()
result.append(node)
while self.stream:
token_type = self.stream.current.type
if token_type == 'variable_begin':
next()
push_variable()
self.stream.expect('variable_end')
elif token_type == 'raw_begin':
next()
push_data()
self.stream.expect('raw_end')
elif token_type == 'block_begin':
next()
if test is not None and test(self.stream.current):
if drop_needle:
next()
return assemble_list()
handler = self.directives.get(self.stream.current.type)
if handler is None:
if self.no_variable_block:
push_variable()
self.stream.expect('block_end')
elif self.stream.current.type in self.context_directives:
raise TemplateSyntaxError('unexpected directive %r.' %
self.stream.current.type,
lineno, self.filename)
else:
name = self.stream.current.value
raise TemplateSyntaxError('unknown directive %r.' %
name, lineno, self.filename)
else:
node = handler()
if node is not None:
push_node(node)
elif token_type == 'data':
push_data()
# this should be unreachable code
else:
assert False, "unexpected token %r" % self.stream.current
if test is not None:
msg = isinstance(test, StateTest) and ': ' + test.msg or ''
raise TemplateSyntaxError('unexpected end of stream' + msg,
self.stream.lineno, self.filename)
return assemble_list()
def sanitize_tree(self, body, extends):
self._sanitize_tree([body], [body], extends, body)
return body
def _sanitize_tree(self, nodelist, stack, extends, body):
"""
This is not a closure because python leaks memory if it is. It's used
by `parse()` to make sure blocks do not trigger unexpected behavior.
"""
for node in nodelist:
if extends is not None and \
node.__class__ is nodes.Block and \
stack[-1] is not body:
for n in stack:
if n.__class__ is nodes.Block:
break
else:
raise TemplateSyntaxError('misplaced block %r, '
'blocks in child '
'templates must be '
'either top level or '
'located in a block '
'tag.' % node.name,
node.lineno,
self.filename)
stack.append(node)
self._sanitize_tree(node.get_child_nodes(), stack, extends, body)
stack.pop()
def parse(self):
"""
Parse the template and return a Template node. This also does some
post processing sanitizing and parses for an extends tag.
"""
if self.closed:
raise RuntimeError('parser is closed')
try:
# get the leading whitespace, if we are not in a child
# template we push that back to the stream later.
leading_whitespace = self.stream.read_whitespace()
# parse an optional extends which *must* be the first node
# of a template.
if self.stream.current.type == 'block_begin' and \
self.stream.look().type == 'extends':
self.stream.skip(2)
extends = self.stream.expect('string').value
self.stream.expect('block_end')
else:
extends = None
if leading_whitespace:
self.stream.shift(leading_whitespace)
body = self.sanitize_tree(self.subparse(None), extends)
return nodes.Template(extends, body, 1, self.filename)
finally:
self.close()
def close(self):
"""Clean up soon."""
self.closed = True
self.stream = self.directives = self.stream = self.blocks = \
self.environment = None | PypiClean |
/MSM_PELE-1.1.1-py3-none-any.whl/AdaptivePELE/AdaptivePELE/freeEnergies/cluster.py | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import glob
import pyemma.coordinates as coor
from pyemma.coordinates.clustering import AssignCenters
import scipy
class Cluster:
def __init__(self, numClusters, trajectoryFolder, trajectoryBasename, stride=1, alwaysCluster=True):
"""
alwaysCluster: clusterize regardless of whether discretized/clusterCenters.dat exists or not
"""
self.discretizedFolder = "discretized"
self.clusterCentersFile = os.path.join(self.discretizedFolder, "clusterCenters.dat")
self.clusterCenters = np.array([])
self.dTrajTemplateName = os.path.join(self.discretizedFolder, "%s.disctraj")
self.clusteringFile = "clustering_object.pkl"
self.stride = stride
self.trajFilenames = []
self.dtrajs = []
self.alwaysCluster = alwaysCluster
self.numClusters = numClusters
self.trajectoryFolder = trajectoryFolder
self.trajectoryBasename = trajectoryBasename
self.x = []
def cluster(self, trajectories):
""" Cluster the trajectories into numClusters clusters using kmeans
algorithm.
Returns a KmeansClusteringObject
"""
return coor.cluster_kmeans(data=trajectories, k=self.numClusters, max_iter=500, stride=self.stride)
def assignNewTrajectories(self, trajs):
# wrap the clusterCentersFile argument in a str call to pass pyemma
# assign check if isinstance of str
assign = AssignCenters(str(self.clusterCentersFile))
dTrajs = assign.assign(trajs)
return dTrajs
def clusterTrajectories(self):
print("Loading trajectories...")
self.x, self.trajFilenames = loadTrajFiles(self.trajectoryFolder, self.trajectoryBasename)
# cluster & assign
if self.alwaysCluster or not os.path.exists(self.clusterCentersFile):
print("Clustering data...")
cl = self.cluster(self.x) # cl: pyemma's clusteringObject
makeFolder(self.discretizedFolder)
self.clusterCenters = cl.clustercenters
self._writeClusterCenters(self.clusterCenters, self.clusterCentersFile)
print("Assigning data...")
self.dtrajs = cl.dtrajs[:]
else:
print("Assigning data (clustering exists)...")
self.clusterCenters = np.loadtxt(self.clusterCentersFile)
self.dtrajs = self.assignNewTrajectories(self.x)
print("Writing clustering data...")
self._writeDtrajs(self.trajFilenames, self.dtrajs, self.dTrajTemplateName)
def eliminateLowPopulatedClusters(self, clusterCountsThreshold, tau=None):
if self.dtrajs == []:
print("Call clusterTrajectories() first!")
return
dtrajs = np.array(self.dtrajs).copy()
try:
dtrajs = np.concatenate(dtrajs[:, :-tau])
except:
dtrajs = np.concatenate(dtrajs)
dummy = np.zeros(dtrajs.size)
data = np.ones(dtrajs.size)
# using sparse is fast and sucint
counts = np.ravel(scipy.sparse.coo_matrix((data, (dtrajs, dummy)), shape=(self.numClusters, 1)).toarray())
clustersToDelete = np.argwhere(counts < clusterCountsThreshold)
if clustersToDelete.shape[0] > 0:
print("Removing %d clusters due to a small number of counts (less than %d)" % (clustersToDelete.shape[0], clusterCountsThreshold))
self.clusterCenters = np.delete(self.clusterCenters, clustersToDelete, axis=0)
self._writeClusterCenters(self.clusterCenters, self.clusterCentersFile)
print("Reassigning trajectories")
self.dtrajs = self.assignNewTrajectories(self.x)
def _writeClusterCenters(self, clusterCenters, outputFilename):
np.savetxt(outputFilename, clusterCenters, fmt=b"%.5f")
def _writeDtrajs(self, filenames, dtrajs, filenameTemplate="%s.disctraj"):
for filename, dtraj in zip(filenames, dtrajs):
fname = os.path.split(filename)[-1][:-4]
dtrajfname = filenameTemplate % (fname)
np.savetxt(dtrajfname, dtraj, fmt=b"%d")
# Standalone functions
def loadTrajFiles(trajectoryFolder, trajectory_basename):
trajectoryBasename = os.path.join(trajectoryFolder, trajectory_basename)
# load traj
files = glob.glob(trajectoryBasename)
x = len(files)*[0]
for i, f in enumerate(files):
currentX = np.loadtxt(f, ndmin=2)[:, 1:]
x[i] = currentX
if not x:
raise ValueError("Didn't find any trajectory files in the specified path!!!")
return x, files
def makeFolder(outputDir):
if not os.path.exists(outputDir):
os.makedirs(outputDir) | PypiClean |
/GaugeRnR-0.6.0.tar.gz/GaugeRnR-0.6.0/README.md | # Gauge R&R
[](https://github.com/owodunni/GaugeRnR)
[](https://pypi.org/project/GaugeRnR/)
[](https://github.com/owodunni/GaugeRnR/blob/master/LICENSE)
## Table of Contents
1. [Install](#Install)
2. [CLI](#CLI)
3. [Example](#Example)
4. [Statistics](#Statistics)
## Install
From PyPi:
``` vim
pip install GaugeRnR
```
From source:
``` console
pip install -e .
```
Development dependencies:
``` vim
pip install -r pip/requirements-dev.txt
```
## CLI
The package can be used to generate reports from CLI:
```vim
GaugeRnR -f data/data_mXop.csv -s 3,5,11 -o outDir
```
This generates a html report that is stored in the outDir folder.
Setting the axes parameter is usefull if the data is not structured correct:
```vim
GaugeRnR -f data/data_opXm.csv -s 5,7,11 -a 2,1,0 -o outDir
```
To calculate linearity and bias ground truth is required:
```vim
GaugeRnR -f data/data_demoGRnR.csv -s 3,10,3 -a 0,2,1 -g 40,42,30,43,29,45,27.5,42,26,35 -o outDir
```
For more help run:
```vim
GaugeRnR -h
```
```
GaugeRnR.
The input data should be structeted
in a 3d array n[i,j,k] where
i = operator, j = part, k = measurement
Stored to file this data would look:
m1 m2 m3
3.29; 3.41; 3.64 # p1 | o1
2.44; 2.32; 2.42 # p2
3.08; 3.25; 3.07 # p1 | o2
2.53; 1.78; 2.32 # p2
3.04; 2.89; 2.85 # p1 | o3
1.62; 1.87; 2.04 # p2
More info: https://github.com/owodunni/GaugeRnR
Usage:
GaugeRnR -f FILE -s STRUCTURE [-a <AXES>] [-d <DELIMITER>] [-o <FOLDER>] [-g <PARTS>]
GaugeRnR -h | --help
GaugeRnR -v | --version
Examples:
GaugeRnR -f data.csv -s5,7,11 -o report
GaugeRnR -f data/data_mXop.csv -s 3,5,11 -o outDir
GaugeRnR -f data/data_opXm.csv -s 5,7,11 -a 2,1,0 -o outDir
GaugeRnR -f data/data_demoGRnR.csv -s 3,10,3 -a 0,2,1 -g 40,42,30,43,29,45,27.5,42,26,35 -o outDir
Options:
-f --file=FILE Load input data.
-s --structure=STRUCTURE Data structure.
Order should be operators, parts, measurements.
-a --axes=<AXES> Order of data axes [default: 0,1,2].
-d --delimiter=<DELIMITER> Order of data axes [default: ;].
-o --output=<FOLDER> Report output directory
-g --groundTruth=<PARTS> Ground Truth data for parts
-h --help Show this screen.
-v --version Show version.
```
## Example
The package can be used in the following way:
``` python
from gaugeRnR import GaugeRnR
import numpy as np
# The input should be structeted in a 3d
# numpy array n[i,j,k] where
# i = operator, j = part, k = measurement
# Example:
# m1 m2 m3
data = np.array( #
[[[3.29, 3.41, 3.64], # p1 | o1
[2.44, 2.32, 2.42], # p2
[4.34, 4.17, 4.27], # p3
[3.47, 3.5, 3.64], # p4
[2.2, 2.08, 2.16]], # p5
[[3.08, 3.25, 3.07], # p1 | o2
[2.53, 1.78, 2.32], # p2
[4.19, 3.94, 4.34], # p3
[3.01, 4.03, 3.2], # p4
[2.44, 1.8, 1.72]], # p5
[[3.04, 2.89, 2.85], # p1 | o3
[1.62, 1.87, 2.04], # p2
[3.88, 4.09, 3.67], # p3
[3.14, 3.2, 3.11], # p4
[1.54, 1.93, 1.55]]]) # p5
g = GaugeRnR(data)
g.calculate()
print(g.summary())
```
This will result in the following table:
| Sources of Variance | DF | SS | MS | Var (σ²) | Std (σ) | F-value | P-value |
|-----------------------|------|--------|-------|------------|-----------|-----------|-----------|
| Operator | 2 | 1.63 | 0.815 | 0.054 | 0.232 | 100.322 | 0.000 |
| Part | 4 | 28.909 | 7.227 | 0.802 | 0.896 | 889.458 | 0.000 |
| Operator by Part | 8 | 0.065 | 0.008 | 0 | 0 | 0.142 | 0.996 |
| Measurment | 30 | 1.712 | 0.057 | 0.057 | 0.239 | | |
| Total | 44 | 32.317 | 0.734 | 0.913 | 0.956 | | |
To access the result from the Gauge RnR data directly:
``` python
from gaugeRnR import GaugeRnR, Component, Result
.
.
.
g = GaugeRnR(data)
result = g.calculate()
F = result[Result.F]
>>> print(F[Component.OPERATOR])
100.322
```
For more examples of how to use this library take a look at the [unit tests](https://github.com/owodunni/GaugeRnR/tree/master/tests)!
## Statistics
The pacakge can generate the following statistics:
* GaugeRnR
Gauge R&R, which stands for gage repeatability and reproducibility, is a statistical tool that measures the amount of variation in the measurement system arising from the measurement device and the people taking the measurement.
Unfortunately, all measurement data contains a certain percentage of variation. The variation is the difference between the true values and the observed values. The variation represents the amount of measurement error. In addition to measurement error, is the actual product or process variation. When we combine measurement error with product or process variation the resulting value represents the total variation. To assure that our measurement data is accurate we must determine if the amount of variation is acceptable
If the p value is less than 0.05, it means that the source of variation has a significant impact on the results.
For more information take a look at:
* [anova-gage-rr-part-1](https://www.spcforexcel.com/knowledge/measurement-systems-analysis/anova-gage-rr-part-1)
* [anova-gage-rr-part-2](https://www.spcforexcel.com/knowledge/measurement-systems-analysis/anova-gage-rr-part-2)
* [Introduction to Statistical Quality Control 6th Edition](https://www.amazon.com/Introduction-Statistical-Quality-Control-Montgomery/dp/0470169923)
* Mean, Standard Deviation and bar chart plots.
To get a better feel for our measurement data we can plot it togheter with a bar chart and show some caracteristic statistics of the data.
* Normality test
For Gauge R&R to work it is important that our data is normal distibuted. If we don't have enough data the it might not be normal distributed. We can test if the data is normal distributed using a Shapiro-Wilk Test. Small values of W are evidence of departure from normality. It is important that our parts are normally distributed. A P-value smaller then 0.05 indicates that the data is not Gaussian.
For more information take a look at:
* [Engineering statistics handbook](https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm)
* [Normality tests in python](https://machinelearningmastery.com/a-gentle-introduction-to-normality-tests-in-python/)
* Linearity and Bias - requires ground truth data
Bias and linearity assess the accuracy of a gage.
* Bias examines the difference between the observed average measurement and a reference value.
Bias indicates how accurate the gage is when compared to a reference value.
* Linearity examines how accurate your measurements are through the expected range of the
measurements. Linearity indicates whether the gage has the same accuracy across all reference values.
A P-value smaller then 0.05 indicates that a linear equation fits well to the data.
For more information take a look at:
* [Measurement System Analysis](http://reliawiki.org/index.php/Measurement_System_Analysis?fbclid=IwAR2uptrlw9MyMaOVLXCOE89GDvN8hNb0qfxgxfxZs7msewQ7ijzqfnGp8oc)
| PypiClean |
/BoxKit-2023.6.7.tar.gz/BoxKit-2023.6.7/boxkit/library/_region.py | class Region: # pylint: disable=too-few-public-methods, disable=too-many-instance-attributes
"""Base class for a Region."""
type_ = "base"
def __init__(self, blocklist, **attributes):
"""Initialize the Region object and allocate the data.
Parameters
----------
blocklist : list of objects
attributes : dictionary
{ 'xmin' : low bound in x dir
'ymin' : low bound in y dir
'zmin' : low bound in z dir
'xmax' : high bound in x dir
'ymax' : high bound in y dir
'zmax' : high bound in z dir}
"""
super().__init__()
self.xmin, self.ymin, self.zmin = [-1e10, -1e10, -1e10]
self.xmax, self.ymax, self.zmax = [1e10, 1e10, 1e10]
self.xcenter, self.ycenter, self.zcenter = [0.0, 0.0, 0.0]
self._set_attributes(attributes)
self._map_blocklist(blocklist)
def __repr__(self):
"""Return a representation of the object."""
return (
"Region:\n"
+ f" - type : {type(self)}\n"
+ f" - bound(z-y-x) : [{self.zmin}, {self.zmax}] x "
+ f"[{self.ymin}, {self.ymax}] x "
+ f"[{self.xmin}, {self.xmax}]\n"
)
def _set_attributes(self, attributes):
"""`
Private method for intialization
"""
for key, value in attributes.items():
if hasattr(self, key):
setattr(self, key, value)
else:
raise ValueError(
"[boxkit.library.create.Region] "
+ f'Attribute "{key}" not present in class Region'
)
self.xcenter = (self.xmin + self.xmax) / 2.0
self.ycenter = (self.ymin + self.ymax) / 2.0
self.zcenter = (self.zmin + self.zmax) / 2.0
def _map_blocklist(self, blocklist):
"""
Private method for initialization
"""
self.blocklist = []
if not blocklist:
return
self.blocklist = [block for block in blocklist if self._in_collision(block)]
self._update_bounds()
def _in_collision(self, block):
"""
Check if a block is in collision with the region
"""
xcollision = (
abs(self.xcenter - block.xcenter)
- (self.xmax - self.xmin) / 2.0
- (block.xmax - block.xmin) / 2.0
<= 0.0
)
ycollision = (
abs(self.ycenter - block.ycenter)
- (self.ymax - self.ymin) / 2.0
- (block.ymax - block.ymin) / 2.0
<= 0.0
)
zcollision = (
abs(self.zcenter - block.zcenter)
- (self.zmax - self.zmin) / 2.0
- (block.zmax - block.zmin) / 2.0
<= 0.0
)
incollision = xcollision and ycollision and zcollision
return incollision
def _update_bounds(self):
"""
Update block bounds using the blocklist
"""
if not self.blocklist:
raise ValueError(
"[boxkit.library.create.Region] "
+ "is empty and outside scope of Blocks\n"
)
self.xmin, self.ymin, self.zmin = [
self.blocklist[0].xmin,
self.blocklist[0].ymin,
self.blocklist[0].zmin,
]
self.xmax, self.ymax, self.zmax = [
self.blocklist[0].xmax,
self.blocklist[0].ymax,
self.blocklist[0].zmax,
]
for block in self.blocklist:
self.xmin = min(self.xmin, block.xmin)
self.ymin = min(self.ymin, block.ymin)
self.zmin = min(self.zmin, block.zmin)
self.xmax = max(self.xmax, block.xmax)
self.ymax = max(self.ymax, block.ymax)
self.zmax = max(self.zmax, block.zmax)
self.xcenter = (self.xmin + self.xmax) / 2.0
self.ycenter = (self.ymin + self.ymax) / 2.0
self.zcenter = (self.zmin + self.zmax) / 2.0 | PypiClean |
/Golmorich-1.2.0.1-py3-none-any.whl/Golmorich-1.2.0.1.dist-info/LICENSE.md | GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
| PypiClean |
/FastNLP-1.0.1.tar.gz/FastNLP-1.0.1/fastNLP/envs/set_env_on_import.py | import os
import sys
from .env import *
import datetime
__all__ = []
def remove_local_rank_in_argv():
"""
通过 torch.distributed.launch 启动的时候,如果没有加入参数 --use_env ,pytorch 会默认通过 rank 注入 rank,这就
要求代码中必须有能够 parse rank 的parser,这里将 rank 删除掉,防止后续报错。
:return:
"""
index = -1
for i, v in enumerate(sys.argv):
if v.startswith('--local_rank='):
os.environ['LOCAL_RANK'] = v.split('=')[1]
index = i
break
if index != -1:
sys.argv.pop(index)
def set_env_on_import_torch():
if 'WORLD_SIZE' in os.environ and 'LOCAL_RANK' in os.environ and 'RANK' in os.environ:
os.environ[FASTNLP_GLOBAL_RANK] = os.environ['RANK']
if int(os.environ.get(FASTNLP_REMOVE_LOCAL_RANK, 1)):
remove_local_rank_in_argv()
if 'WORLD_SIZE' in os.environ and 'LOCAL_RANK' in os.environ and 'RANK' in os.environ and \
FASTNLP_DISTRIBUTED_CHECK not in os.environ:
os.environ[FASTNLP_BACKEND_LAUNCH] = '1'
# TODO paddle may need set this
def set_env_on_import_paddle():
if "PADDLE_TRAINERS_NUM" in os.environ and "PADDLE_TRAINER_ID" in os.environ \
and "PADDLE_RANK_IN_NODE" in os.environ:
# 检测到了分布式环境的环境变量
os.environ[FASTNLP_GLOBAL_RANK] = os.environ["PADDLE_TRAINER_ID"]
# 如果不是由 fastnlp 启动的
if FASTNLP_DISTRIBUTED_CHECK not in os.environ:
os.environ[FASTNLP_BACKEND_LAUNCH] = "1"
# TODO jittor may need set this
def set_env_on_import_jittor():
# todo 需要设置 FASTNLP_GLOBAL_RANK 和 FASTNLP_BACKEND_LAUNCH
if 'log_silent' not in os.environ:
os.environ['log_silent'] = '1'
def set_env_on_import_oneflow():
if 'GLOG_log_dir' in os.environ:
os.environ[FASTNLP_GLOBAL_RANK] = os.environ['RANK']
if int(os.environ.get(FASTNLP_REMOVE_LOCAL_RANK, 1)):
remove_local_rank_in_argv()
if 'GLOG_log_dir' in os.environ and FASTNLP_DISTRIBUTED_CHECK not in os.environ:
os.environ[FASTNLP_BACKEND_LAUNCH] = '1'
def set_env_on_import():
"""
设置环境变量
:return:
"""
# 框架相关的变量设置
if "oneflow" not in sys.modules:
set_env_on_import_torch()
set_env_on_import_paddle()
set_env_on_import_jittor()
set_env_on_import_oneflow()
# fastNLP 内部使用的一些变量
if FASTNLP_LAUNCH_TIME not in os.environ:
cur_time = f"{datetime.datetime.now().strftime('%Y-%m-%d-%H_%M_%S_%f')}"
os.environ[FASTNLP_LAUNCH_TIME] = cur_time
# 设置对应的值
if FASTNLP_LOG_LEVEL not in os.environ:
os.environ[FASTNLP_LOG_LEVEL] = 'AUTO' | PypiClean |
/MNN_FMA-1.0.1-cp27-cp27m-manylinux2010_x86_64.whl/MNN/tools/mnn_fb/ReductionParam.py |
# namespace: MNN
import flatbuffers
class ReductionParam(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsReductionParam(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ReductionParam()
x.Init(buf, n + offset)
return x
# ReductionParam
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ReductionParam
def Operation(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# ReductionParam
def Dim(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# ReductionParam
def DimAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# ReductionParam
def DimLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ReductionParam
def Coeff(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# ReductionParam
def KeepDims(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# ReductionParam
def DType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 1
def ReductionParamStart(builder): builder.StartObject(5)
def ReductionParamAddOperation(builder, operation): builder.PrependInt8Slot(0, operation, 0)
def ReductionParamAddDim(builder, dim): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(dim), 0)
def ReductionParamStartDimVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def ReductionParamAddCoeff(builder, coeff): builder.PrependFloat32Slot(2, coeff, 0.0)
def ReductionParamAddKeepDims(builder, keepDims): builder.PrependBoolSlot(3, keepDims, 0)
def ReductionParamAddDType(builder, dType): builder.PrependInt32Slot(4, dType, 1)
def ReductionParamEnd(builder): return builder.EndObject() | PypiClean |
/GraphQL_core_next-1.1.1-py3-none-any.whl/graphql/language/__init__.py | from .source import Source
from .location import get_location, SourceLocation
from .print_location import print_location, print_source_location
from .token_kind import TokenKind
from .lexer import Lexer
from .parser import parse, parse_type, parse_value
from .printer import print_ast
from .visitor import (
visit,
Visitor,
ParallelVisitor,
TypeInfoVisitor,
BREAK,
SKIP,
REMOVE,
IDLE,
)
from .ast import (
Location,
Token,
Node,
# Each kind of AST node
NameNode,
DocumentNode,
DefinitionNode,
ExecutableDefinitionNode,
OperationDefinitionNode,
OperationType,
VariableDefinitionNode,
VariableNode,
SelectionSetNode,
SelectionNode,
FieldNode,
ArgumentNode,
FragmentSpreadNode,
InlineFragmentNode,
FragmentDefinitionNode,
ValueNode,
IntValueNode,
FloatValueNode,
StringValueNode,
BooleanValueNode,
NullValueNode,
EnumValueNode,
ListValueNode,
ObjectValueNode,
ObjectFieldNode,
DirectiveNode,
TypeNode,
NamedTypeNode,
ListTypeNode,
NonNullTypeNode,
TypeSystemDefinitionNode,
SchemaDefinitionNode,
OperationTypeDefinitionNode,
TypeDefinitionNode,
ScalarTypeDefinitionNode,
ObjectTypeDefinitionNode,
FieldDefinitionNode,
InputValueDefinitionNode,
InterfaceTypeDefinitionNode,
UnionTypeDefinitionNode,
EnumTypeDefinitionNode,
EnumValueDefinitionNode,
InputObjectTypeDefinitionNode,
DirectiveDefinitionNode,
TypeSystemExtensionNode,
SchemaExtensionNode,
TypeExtensionNode,
ScalarTypeExtensionNode,
ObjectTypeExtensionNode,
InterfaceTypeExtensionNode,
UnionTypeExtensionNode,
EnumTypeExtensionNode,
InputObjectTypeExtensionNode,
)
from .predicates import (
is_definition_node,
is_executable_definition_node,
is_selection_node,
is_value_node,
is_type_node,
is_type_system_definition_node,
is_type_definition_node,
is_type_system_extension_node,
is_type_extension_node,
)
from .directive_locations import DirectiveLocation
__all__ = [
"get_location",
"SourceLocation",
"print_location",
"print_source_location",
"TokenKind",
"Lexer",
"parse",
"parse_value",
"parse_type",
"print_ast",
"Source",
"visit",
"Visitor",
"ParallelVisitor",
"TypeInfoVisitor",
"BREAK",
"SKIP",
"REMOVE",
"IDLE",
"Location",
"Token",
"DirectiveLocation",
"Node",
"NameNode",
"DocumentNode",
"DefinitionNode",
"ExecutableDefinitionNode",
"OperationDefinitionNode",
"OperationType",
"VariableDefinitionNode",
"VariableNode",
"SelectionSetNode",
"SelectionNode",
"FieldNode",
"ArgumentNode",
"FragmentSpreadNode",
"InlineFragmentNode",
"FragmentDefinitionNode",
"ValueNode",
"IntValueNode",
"FloatValueNode",
"StringValueNode",
"BooleanValueNode",
"NullValueNode",
"EnumValueNode",
"ListValueNode",
"ObjectValueNode",
"ObjectFieldNode",
"DirectiveNode",
"TypeNode",
"NamedTypeNode",
"ListTypeNode",
"NonNullTypeNode",
"TypeSystemDefinitionNode",
"SchemaDefinitionNode",
"OperationTypeDefinitionNode",
"TypeDefinitionNode",
"ScalarTypeDefinitionNode",
"ObjectTypeDefinitionNode",
"FieldDefinitionNode",
"InputValueDefinitionNode",
"InterfaceTypeDefinitionNode",
"UnionTypeDefinitionNode",
"EnumTypeDefinitionNode",
"EnumValueDefinitionNode",
"InputObjectTypeDefinitionNode",
"DirectiveDefinitionNode",
"TypeSystemExtensionNode",
"SchemaExtensionNode",
"TypeExtensionNode",
"ScalarTypeExtensionNode",
"ObjectTypeExtensionNode",
"InterfaceTypeExtensionNode",
"UnionTypeExtensionNode",
"EnumTypeExtensionNode",
"InputObjectTypeExtensionNode",
"is_definition_node",
"is_executable_definition_node",
"is_selection_node",
"is_value_node",
"is_type_node",
"is_type_system_definition_node",
"is_type_definition_node",
"is_type_system_extension_node",
"is_type_extension_node",
] | PypiClean |
/MeetupAPI-1.5.8-py3-none-any.whl/meetup_api/meetup_functions/events.py | import time
from meetup_api.log import Log
class MeetupEvents():
def __init__(self,
meetup_class,
results_per_page=200,
pages='all',
maximum_num_events=10000,
fields=['group_key_photo', 'series', 'simple_html_description', 'rsvp_sample']):
# Events
# https://www.meetup.com/meetup_api/docs/:urlname/events/#list
import requests
self.logs = ['self.__init__']
self.started = round(time.time())
self.log('events()')
self.value = []
self.offset = 0
self.response_json = ['']
if pages == 'all':
pages = 10000
while pages >= self.offset and len(self.response_json) > 0:
self.response = requests.get('https://api.meetup.com/'+meetup_class.group+'/events',
params={
'fields': fields,
'photo-host': 'public',
'page': results_per_page,
'offset': self.offset
})
self.offset += 1
self.response_json = self.response.json()
if 'errors' in self.response_json and self.response_json['errors'][0]['code'] == 'group_error':
self.log('-> ERROR: Group name doesnt exist')
else:
self.value += [
{
'str_name_en_US': meetup_class.str_name_en_US(event),
'int_UNIXtime_event_start': meetup_class.int_UNIXtime_event_start(event),
'int_UNIXtime_event_end': meetup_class.int_UNIXtime_event_end(event),
'int_minutes_duration': meetup_class.int_minutes_duration(event),
'url_featured_photo': meetup_class.url_featured_photo(event),
'text_description_en_US': meetup_class.text_description_en_US(event),
'str_location': meetup_class.str_location(event),
'one_space': meetup_class.one_space(event) if hasattr(meetup_class, 'one_space') else None,
'one_guilde': meetup_class.one_guilde(event) if hasattr(meetup_class, 'one_guilde') else None,
'str_series_id': meetup_class.str_series_id(event),
'int_series_startUNIX': meetup_class.int_series_startUNIX(event),
'int_series_endUNIX': meetup_class.int_series_endUNIX(event),
'text_series_timing': meetup_class.text_series_timing(event),
'url_meetup_event': meetup_class.url_meetup_event(event),
'int_UNIXtime_created': meetup_class.int_UNIXtime_created(event),
'int_UNIXtime_updated': meetup_class.int_UNIXtime_updated(event),
'str_timezone': meetup_class.str_timezone(event)
} for event in self.response_json
]
if len(self.value) >= maximum_num_events:
self.log('Collected maximum number of events.')
break
self.log('Collected {} events...'.format(len(self.value)))
def log(self, text):
import os
self.logs.append(text)
Log().print('{}'.format(text), os.path.basename(__file__), self.started) | PypiClean |
/BrickBreakerGame-0.2.6.tar.gz/BrickBreakerGame-0.2.6/BrickBreaker/brick_breaker.py | import pygame
from BrickBreaker import *
from BrickBreaker.Scenes import *
from BrickBreaker.Shared import *
class BrickBreaker:
def __init__(self):
self._lives = 5
self._score = 0
self._bonus = 1
self._level = Level(self)
self._level.load_random()
self._pad = Pad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.PAD_SIZE[1]),
pygame.image.load(GameConstants.PAD_IMAGE))
self._balls = [
Ball((400, 400), pygame.image.load(GameConstants.BALL_IMAGE), self)
]
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.mixer.init()
pygame.init()
pygame.display.set_caption("Brick Breaker")
self._clock = pygame.time.Clock()
self.screen = pygame.display.set_mode(GameConstants.SCREEN_SIZE)
pygame.mouse.set_visible(False)
self._scenes = (
PlayingGameScene(self),
HighscoreScene(self),
MainMenuScene(self),
GameOverScene(self),
WinScene(self),
ControlsScene(self),
GameRulesScene(self),
)
self._current_scene = 2
self._sounds = (
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_A_STANDARD_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_SPEED_UP_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_EXTRA_LIFE_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_BALL_HITTING_A_WALL_OR_A_PAD),
pygame.mixer.Sound(GameConstants.SOUND_FILE_GAME_OVER),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_EXTRA_BALL_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_BONUS_SIZE_BRICK),
)
def start(self):
while True:
self._clock.tick(60)
self.screen.fill((0, 0, 0))
_current_scene = self._scenes[self._current_scene]
_current_scene.handle_events(pygame.event.get())
_current_scene.render()
pygame.display.update()
def change_scene(self, scene):
self._current_scene = scene
def get_level(self):
return self._level
def get_bonus(self):
return self._bonus
def increment_bonus(self):
self._bonus += 1
def reset_bonus(self):
self._bonus = 1
def double_pad(self):
keyboard = self._pad.get_keyboard_status()
mouse = self._pad.get_mouse_status()
self._pad = DoublePad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.DOUBLE_PAD_SIZE[1]),
pygame.image.load(GameConstants.DOUBLE_PAD_IMAGE))
if keyboard:
self._pad.activate_keyboard()
if mouse:
self._pad.activate_mouse()
def reset_pad(self):
keyboard = self._pad.get_keyboard_status()
mouse = self._pad.get_mouse_status()
self._pad = Pad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.PAD_SIZE[1]),
pygame.image.load(GameConstants.PAD_IMAGE))
if keyboard:
self._pad.activate_keyboard()
if mouse:
self._pad.activate_mouse()
def get_pad(self):
return self._pad
def get_score(self):
return self._score
def increase_score(self, score):
self._score += score * self._bonus
def increase_score_by_1k(self, score=1000):
self._score += score * self._bonus
def get_lives(self):
return self._lives
def get_balls(self):
return self._balls
def add_one_ball(self):
self._balls.append(Ball((400, 400), pygame.image.load(GameConstants.BALL_IMAGE), self))
def play_sound(self, sound_clip):
sound = self._sounds[sound_clip]
sound.stop()
sound.play()
def reduce_life_by_one(self):
self._lives -= 1
def add_one_life(self):
self._lives += 1
def reset(self):
self._lives = 5
self._score = 0
self._bonus = 1
self._level = Level(self)
self._level.load_random()
self._pad = Pad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.PAD_SIZE[1]),
pygame.image.load(GameConstants.PAD_IMAGE))
def main():
BrickBreaker().start()
if __name__ == '__main__':
BrickBreaker().start() | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas/numerical/isolines/isolines_numpy.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from numpy import asarray
from numpy import meshgrid
from numpy import linspace
from numpy import amax
from numpy import amin
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
__all__ = [
"scalarfield_contours_numpy",
]
# def trimesh_descent(trimesh):
# """"""
# vertices, faces = trimesh.to_vertices_and_faces()
# V = array(vertices)
# F = array(faces)
# G = grad(V, F)
# sfield = V[:, 2].reshape((-1, 1))
# vfield = - G.dot(sfield)
# return vfield.reshape((-1, 3), order='F').tolist()
# ==============================================================================
# contours
# ==============================================================================
def scalarfield_contours_numpy(xy, s, levels=50, density=100):
r"""Compute the contour lines of a scalarfield.
Parameters
----------
xy : array-like
The xy-coordinates at which the scalar field is defined.
s : array-like
The values of the scalar field.
levels : int, optional
The number of contour lines to compute.
Default is ``50``.
Returns
-------
tuple
A tuple of a list of levels and a list of contour geometry.
The list of levels contains the values of the scalarfield at each of
the contours. The second item in the tuple is a list of contour lines.
Each contour line is a list of paths, and each path is a list polygons.
Notes
-----
The computation of the contour lines is based on the `contours function`_
available through matplotlib.
Examples
--------
.. code-block:: python
import compas
from compas.datastructures import Mesh
from compas.geometry import centroid_points
from compas.geometry import distance_point_point
from compas.geometry import scalarfield_contours_numpy
mesh = Mesh.from_obj(compas.get('faces.obj'))
points = [mesh.vertex_coordinates(key) for key in mesh.vertices()]
centroid = centroid_points(points)
distances = [distance_point_point(point, centroid) for point in points]
xy = [point[0:2] for point in points]
levels, contours = scalarfield_contours_numpy(xy, distances)
for i in range(len(contours)):
level = levels[i]
contour = contours[i]
print(level)
for path in contour:
for polygon in path:
print(polygon)
.. _contours function: http://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.contour.html#matplotlib.axes.Axes.contour
"""
xy = asarray(xy)
s = asarray(s)
x = xy[:, 0]
y = xy[:, 1]
X, Y = meshgrid(linspace(amin(x), amax(x), 2 * density), linspace(amin(y), amax(y), 2 * density))
S = griddata((x, y), s, (X, Y), method="cubic")
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
c = ax.contour(X, Y, S, levels)
contours = [0] * len(c.collections)
levels = c.levels
for i, coll in enumerate(iter(c.collections)):
paths = coll.get_paths()
contours[i] = [0] * len(paths)
for j, path in enumerate(iter(paths)):
polygons = path.to_polygons()
contours[i][j] = [0] * len(polygons)
for k, polygon in enumerate(iter(polygons)):
contours[i][j][k] = polygon
plt.close(fig)
return levels, contours | PypiClean |
/Office365-REST-Python-Client-2.4.3.tar.gz/Office365-REST-Python-Client-2.4.3/office365/sharepoint/sharing/links/info.py | from office365.runtime.client_value import ClientValue
from office365.runtime.client_value_collection import ClientValueCollection
from office365.sharepoint.sharing.invitation.link import LinkInvitation
from office365.sharepoint.sharing.principal import Principal
class SharingLinkInfo(ClientValue):
def __init__(self, allows_anonymous_access=None, application_id=None, created=None, created_by=Principal(),
password_protected=None, invitations=None, redeemed_users=None,
last_modified_by=Principal(), password_last_modified_by=Principal(),
url=None):
"""
Specifies the information about the tokenized sharing link.
:param bool allows_anonymous_access: Indicates whether the tokenized sharing link allows anonymous access.
:param str application_id:
:param str created: The UTC date/time string with complete representation for calendar date and time of day
which represents the time and date of creation of the tokenized sharing link.
:param Principal created_by: Indicates the principal who created the tokenized sharing link, or null if the
created by value is not recorded.
:param bool password_protected:
:param list[LinkInvitation] invitations: This value contains the current membership list for principals
that have been Invited to the tokenized sharing link.
:param list[LinkInvitation] redeemed_users:
:param Principal last_modified_by: Indicates the principal who last modified the tokenized sharing link.
This value MUST be null if the last modified by value is not recorded.
:param Principal password_last_modified_by:
"""
super(SharingLinkInfo, self).__init__()
self.AllowsAnonymousAccess = allows_anonymous_access
self.ApplicationId = application_id
self.Created = created
self.CreatedBy = created_by
self.PasswordProtected = password_protected
self.Invitations = ClientValueCollection(LinkInvitation, invitations)
self.RedeemedUsers = ClientValueCollection(LinkInvitation, redeemed_users)
self.LastModifiedBy = last_modified_by
self.PasswordLastModifiedBy = password_last_modified_by
self.Url = url
def __str__(self):
return self.Url
def __repr__(self):
return self.Url
@property
def entity_type_name(self):
return "SP.Sharing.SharingLinkInfo" | PypiClean |
/Django-Photofile-0.5.0.zip/Django-Photofile-0.5.0/photofile/metadata.py | import os
import time
import datetime
import re
from PIL import Image
from PIL.ExifTags import TAGS, GPSTAGS
from PIL import IptcImagePlugin
try:
import pyexiv2
PYEXIV2_SUPPORT = True
except ImportError:
PYEXIV2_SUPPORT = False
# Credits http://eran.sandler.co.il/2011/05/20/extract-gps-latitude-and-longitude-data-from-exif-using-python-imaging-library-pil/
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
def _fractToSimple(frac):
if not frac:
return None
try:
f,n = frac
return round(float(f) / float(n), 3)
except Exception, e:
return None
def _convert_to_degress(value):
"""Helper function to convert the GPS coordinates stored in the EXIF to degress in float format"""
d0 = value[0][0]
d1 = value[0][1]
d = float(d0) / float(d1)
m0 = value[1][0]
m1 = value[1][1]
m = float(m0) / float(m1)
s0 = value[2][0]
s1 = value[2][1]
s = float(s0) / float(s1)
return d + (m / 60.0) + (s / 3600.0)
def get_keywords(filename):
if not PYEXIV2_SUPPORT:
metadata = get_exif(filename)
return metadata.get('keywords', [])
metadata = pyexiv2.metadata.ImageMetadata(filename)
metadata.read()
key = 'Iptc.Application2.Keywords' # XMP keywords?
try:
return [s.strip() for s in metadata[key].raw_value]
except KeyError:
return []
def set_keywords(filename, keywords):
if not PYEXIV2_SUPPORT or not keywords:
return
metadata = pyexiv2.metadata.ImageMetadata(filename)
metadata.read()
key = 'Iptc.Application2.Keywords' # XMP keywords?
try:
old_keywords = [s.strip() for s in metadata[key].raw_value]
except KeyError:
old_keywords = []
for keyword in old_keywords:
if not keyword in keywords:
keywords.append(keyword)
metadata[key] = keywords#pyexiv2.ExifTag(key,
metadata.write()
# http://www.blog.pythonlibrary.org/2010/03/28/getting-photo-metadata-exif-using-python/
def get_exif(fn):
ret = {}
i = Image.open(fn)
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
#print "TAG", decoded, value
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
ret[decoded] = gps_data
lat = None
lon = None
gps_latitude = _get_if_exist(gps_data, "GPSLatitude")
gps_latitude_ref = _get_if_exist(gps_data, 'GPSLatitudeRef')
gps_longitude = _get_if_exist(gps_data, 'GPSLongitude')
gps_longitude_ref = _get_if_exist(gps_data, 'GPSLongitudeRef')
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degress(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = _convert_to_degress(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
ret['latitude'] = lat
ret['longitude'] = lon
altitude = _get_if_exist(gps_data, 'GPSAltitude')
if altitude:
altitude = _fractToSimple(altitude)
ret['altitude'] = altitude
#print "altitude", ret['altitude']
else:
ret[decoded] = value
try:
iptc = IptcImagePlugin.getiptcinfo(i)
ret['headline'] = iptc[(2,105)]
ret['caption'] = iptc[(2,120)]
ret['copyright'] = iptc[(2,116)]
ret['keywords'] = iptc[(2,25)]
except:
ret['headline'] = None
ret['caption'] = None
ret['copyright'] = None
ret['keywords'] = []
return ret
def ExtractMakeAndModel(data):
"Extracts make and model from data produced by pyexiv2."
make = data.get('Exif.Image.Make', None)
model = data.get('Exif.Image.Model', None)
if make and model:
return make.value, model.value
return None, None
def ExtractLocationData(data):
try: location_name = ', '.join(data['Iptc.Application2.LocationName'].values)
except: location_name = None
try: city = ', '.join(data['Iptc.Application2.City'].values)
except: city = None
try: province_state = ', '.join(data['Iptc.Application2.ProvinceState'].values)
except: province_state = None
try: country_code = ', '.join(data['Iptc.Application2.CountryCode'].values)
except: country_code = None
try: country_name = ', '.join(data['Iptc.Application2.CountryName'].values)
except KeyError: country_name = None
return location_name, city, province_state, country_code, country_name
def ExtractGPSInfo(data):
"Extracts longitude, latitude and altitude from data produced by pyexiv2."
# http://linfiniti.com/2009/06/reading-geotagging-data-from-blackberry-camera-images/
myLonDirection = None
myLonDegrees = None
myLonMinutes = None
myLatDirection = None
myLatDegrees = None
myLatMinutes = None
try:
# Will be either 'E' or 'W'
myLonDirection = data['Exif.GPSInfo.GPSLongitudeRef']
# Will return a rational number like : '27/1'
myLonDegrees = data['Exif.GPSInfo.GPSLongitude'][0]
# Will return a rational number like : '53295/1000'
myLonMinutes = data['Exif.GPSInfo.GPSLongitude'][1]
# Will be either 'N' or 'S'
myLatDirection = data['Exif.GPSInfo.GPSLatitudeRef']
# Will return a rational number like : '27/1'
myLatDegrees = data['Exif.GPSInfo.GPSLatitude'][0]
# Will return a rational number like : '56101/1000'
myLatMinutes = data['Exif.GPSInfo.GPSLatitude'][1]
except:
return (None, None, None)
# Get the degree and minute values
myRegexp = re.compile( '^[0-9]*' )
myLonDegreesFloat = float(myRegexp.search( str(myLonDegrees) ).group())
myLatDegreesFloat = float(myRegexp.search( str(myLatDegrees) ).group())
myLonMinutesFloat = float(myRegexp.search( str(myLonMinutes) ).group())
myLatMinutesFloat = float(myRegexp.search( str(myLatMinutes) ).group())
# Divide the values by the divisor
myRegexp = re.compile( '[0-9]*$' )
myLon = myLonDegreesFloat / float(myRegexp.search( str(myLonDegrees) ).group())
myLat = myLatDegreesFloat / float(myRegexp.search( str(myLatDegrees) ).group())
myLonMin = myLonMinutesFloat / float(myRegexp.search( str(myLonMinutes) ).group())
myLatMin = myLatMinutesFloat / float(myRegexp.search( str(myLatMinutes) ).group())
# We now have degrees and decimal minutes, so convert to decimal degrees...
myLon = myLon + (myLonMin / 60)
myLat = myLat + (myLatMin / 60)
# Use a negative sign as needed
if myLonDirection == 'W': myLon = 0 - myLon
if myLatDirection == 'S': myLat = 0 - myLat
try:
altitude = data['Exif.GPSInfo.GPSAltitude'][0]
except:
altitude = None
#print myLon, myLat, altitude
return (myLon, myLat, altitude)
def exiv2ToDict(metadata):
result = {}
keys = metadata.exif_keys
keys.extend(metadata.iptc_keys)
keys.extend(metadata.xmp_keys)
for key in keys:
result[key] = metadata[key]
return result
def prettyPrintexiv2(metadata):
result = {}
for k,v in metadata.items():
if hasattr(v, 'values'):
result[k] = ' '.join(v.values)
else:
result[k] = str(v.value)
return result
def ExtractPhotoMetadata(filename):
"""
"""
if not PYEXIV2_SUPPORT:
return {}
metadata = pyexiv2.ImageMetadata(filename)
metadata.read()
result = exiv2ToDict(metadata)
result['width'], result['height'] = metadata.dimensions
return result
metadata_fields = {
'camera_model': None,
'manufacturer': None,
'orientation': None,
'exposure_time': None,
'fnumber': None,
'exposure_program': None,
'iso_speed': None,
'metering_mode': None,
'light_source': None,
'flash_used': None,
'focal_length': None,
'longitude': None,
'latitude': None,
'altitude': None,
'exposure_mode': None,
'whitebalance': None,
'focal_length_in_35mm': None,
'width': None,
'height': None,
'keywords': None,
'headline': None,
'caption': None,
'copyright': None,
'software': None,
}
def get_metadata(filename):
result = metadata_fields.copy()
try:
metadata = get_exif(filename)
#import pprint
#pprint.pprint(metadata)
#for k,v in metadata.items():
# print "TAG", k, v, str(v)
# EXIF
tm = time.strptime(metadata.get('DateTime', metadata.get('DateTimeOriginal', metadata.get('DateTimeDigitized'))),"%Y:%m:%d %H:%M:%S")
result['exif_date'] = datetime.datetime.fromtimestamp(time.mktime(tm))
result['camera_model'] = metadata.get("Model", None)
result['orientation'] = metadata.get("Orientation", None)
result['exposure_time'] = _fractToSimple(metadata.get("ExposureTime", metadata.get('ShutterSpeedValue', 0)))
result['fnumber'] = _fractToSimple(metadata.get("FNumber", -1.0))
result['exposure_program'] = metadata.get("ExposureProgram", None)
result['iso_speed'] = metadata.get("ISOSpeedRatings", None)
result['metering_mode'] = metadata.get("MeteringMode", None)
result['light_source'] = metadata.get("LightSource", None)
result['flash_used'] = metadata.get("Flash", None)
result['focal_length'] = _fractToSimple(metadata.get("FocalLength", -1.0))
result['width'] = metadata.get('ExifImageWidth', None)
result['height'] = metadata.get('ExifImageHeight', None)
result['software'] = metadata.get('Software')
result['manufacturer'] = metadata.get('Make')
# IPTC
result['keywords'] = metadata.get('Keywords', None)
result['headline'] = metadata.get('Headline', metadata.get('By-line'))
result['caption'] = metadata.get('Caption', metadata.get('ImageDescription'))
result['copyright'] = metadata.get('Copyright', None)
# GPS tags
result['longitude'] = metadata.get('longitude')
result['latitude'] = metadata.get('latitude')
result['altitude'] = metadata.get('altitude')
except Exception, e:
print "Error using PIL: %s for file %s." % (e, filename)
if PYEXIV2_SUPPORT:
try:
metadata = ExtractPhotoMetadata(filename)
# EXIF
try:
result['exif_date'] = metadata['Exif.Image.DateTime'].value
except:
result['exif_date'] = datetime.datetime.fromtimestamp(os.stat(filename).st_ctime)
result['camera_model'] = metadata.get("Exif.Image.Model", None)
result['orientation'] = metadata.get("Exif.Image.Orientation", None)
if result['orientation']:
result['orientation'] = result['orientation'].value
result['exposure_time'] = metadata.get("Exif.Photo.ExposureTime", None)
result['fnumber'] = metadata.get("Exif.Photo.FNumber", None)
result['exposure_program'] = metadata.get("Exif.Photo.ExposureProgram", None)
result['iso_speed'] = metadata.get("Exif.Photo.ISOSpeedRatings", None)
result['metering_mode'] = metadata.get("Exif.Photo.MeteringMode", None)
result['light_source'] = metadata.get("Exif.Photo.LightSource", None)
result['flash_used'] = metadata.get("Exif.Photo.Flash", None)
result['focal_length'] = metadata.get("Exif.Photo.FocalLength", None)
longitude, latitude, altitude = ExtractGPSInfo(metadata)
result['longitude'] = longitude
result['latitude'] = latitude
result['altitude'] = altitude
result['exposure_mode'] = metadata.get("Exif.Photo.ExposureMode", None)
result['whitebalance'] = metadata.get("Exif.Photo.WhiteBalance", None)
result['focal_length_in_35mm'] = metadata.get("Exif.Photo.FocalLengthIn35mmFilm", None)
result['width'] = metadata.get('ExifImageWidth', None)
result['height'] = metadata.get('ExifImageHeight', None)
# IPTC
except Exception, e:
print "Error using PYEXIV: %s for file %s." % (e, filename)
if not 'exif_date' in result:
result['exif_date'] = datetime.datetime.fromtimestamp(os.stat(filename).st_ctime)
return result | PypiClean |
/M2CryptoWin32-0.21.1-3.tar.gz/M2CryptoWin32-0.21.1-3/M2Crypto/ASN1.py | import time, datetime
import BIO
import m2
MBSTRING_FLAG = 0x1000
MBSTRING_ASC = MBSTRING_FLAG | 1
MBSTRING_BMP = MBSTRING_FLAG | 2
class ASN1_Integer:
m2_asn1_integer_free = m2.asn1_integer_free
def __init__(self, asn1int, _pyfree=0):
self.asn1int = asn1int
self._pyfree = _pyfree
def __cmp__(self, other):
return m2.asn1_integer_cmp(self.asn1int, other.asn1int)
def __del__(self):
if self._pyfree:
self.m2_asn1_integer_free(self.asn1int)
class ASN1_String:
m2_asn1_string_free = m2.asn1_string_free
def __init__(self, asn1str, _pyfree=0):
self.asn1str = asn1str
self._pyfree = _pyfree
def __str__(self):
buf = BIO.MemoryBuffer()
m2.asn1_string_print( buf.bio_ptr(), self.asn1str )
return buf.read_all()
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_asn1_string_free(self.asn1str)
def _ptr(self):
return self.asn1str
def as_text(self, flags=0):
buf = BIO.MemoryBuffer()
m2.asn1_string_print_ex( buf.bio_ptr(), self.asn1str, flags)
return buf.read_all()
class ASN1_Object:
m2_asn1_object_free = m2.asn1_object_free
def __init__(self, asn1obj, _pyfree=0):
self.asn1obj = asn1obj
self._pyfree = _pyfree
def __del__(self):
if self._pyfree:
self.m2_asn1_object_free(self.asn1obj)
def _ptr(self):
return self.asn1obj
class _UTC(datetime.tzinfo):
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
def utcoffset(self, dt):
return datetime.timedelta(0)
def __repr__(self):
return "<Timezone: %s>" % self.tzname(None)
UTC = _UTC()
class LocalTimezone(datetime.tzinfo):
""" Localtimezone from datetime manual """
def __init__(self):
self._stdoffset = datetime.timedelta(seconds = -time.timezone)
if time.daylight:
self._dstoffset = datetime.timedelta(seconds = -time.altzone)
else:
self._dstoffset = self._stdoffset
self._dstdiff = self._dstoffset - self._stdoffset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dstoffset
else:
return self._stdoffset
def dst(self, dt):
if self._isdst(dt):
return self._dstdiff
else:
return datetime.timedelta(0)
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
class ASN1_UTCTIME:
_ssl_months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug",
"Sep", "Oct", "Nov", "Dec"]
m2_asn1_utctime_free = m2.asn1_utctime_free
def __init__(self, asn1_utctime=None, _pyfree=0):
if asn1_utctime is not None:
assert m2.asn1_utctime_type_check(asn1_utctime), "'asn1_utctime' type error'"
self.asn1_utctime = asn1_utctime
self._pyfree = _pyfree
else:
self.asn1_utctime = m2.asn1_utctime_new ()
self._pyfree = 1
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_asn1_utctime_free(self.asn1_utctime)
def __str__(self):
assert m2.asn1_utctime_type_check(self.asn1_utctime), "'asn1_utctime' type error'"
buf = BIO.MemoryBuffer()
m2.asn1_utctime_print( buf.bio_ptr(), self.asn1_utctime )
return buf.read_all()
def _ptr(self):
assert m2.asn1_utctime_type_check(self.asn1_utctime), "'asn1_utctime' type error'"
return self.asn1_utctime
def set_string (self, string):
"""
Set time from UTC string.
"""
assert m2.asn1_utctime_type_check(self.asn1_utctime), "'asn1_utctime' type error'"
return m2.asn1_utctime_set_string( self.asn1_utctime, string )
def set_time (self, time):
"""
Set time from seconds since epoch (long).
"""
assert m2.asn1_utctime_type_check(self.asn1_utctime), "'asn1_utctime' type error'"
return m2.asn1_utctime_set( self.asn1_utctime, time )
def get_datetime(self):
date = str(self)
timezone = None
if ' ' not in date:
raise ValueError("Invalid date: %s" % date)
month, rest = date.split(' ', 1)
if month not in self._ssl_months:
raise ValueError("Invalid date %s: Invalid month: %s" % (date, m))
if rest.endswith(' GMT'):
timezone = UTC
rest = rest[:-4]
tm = list(time.strptime(rest, "%d %H:%M:%S %Y"))[:6]
tm[1] = self._ssl_months.index(month) + 1
tm.append(0)
tm.append(timezone)
return datetime.datetime(*tm)
def set_datetime(self, date):
local = LocalTimezone()
if date.tzinfo is None:
date = date.replace(tzinfo=local)
date = date.astimezone(local)
return self.set_time(int(time.mktime(date.timetuple()))) | PypiClean |
/Mopidy-Plex-0.1.0b.tar.gz/Mopidy-Plex-0.1.0b/README.rst | ****************************
Mopidy-Plex
****************************
.. image:: https://img.shields.io/pypi/v/Mopidy-Plex.svg?style=flat
:target: https://pypi.python.org/pypi/Mopidy-Plex/
:alt: Latest PyPI version
.. image:: https://img.shields.io/pypi/dm/Mopidy-Plex.svg?style=flat
:target: https://pypi.python.org/pypi/Mopidy-Plex/
:alt: Number of PyPI downloads
.. image:: https://img.shields.io/travis/havardgulldahl/mopidy_plex/master.svg?style=flat
:target: https://travis-ci.org/havardgulldahl/mopidy_plex
:alt: Travis CI build status
.. image:: https://img.shields.io/coveralls/havardgulldahl/mopidy_plex/master.svg?style=flat
:target: https://coveralls.io/r/havardgulldahl/mopidy_plex
:alt: Test coverage
Mopidy extension for playing audio from a Plex server
Installation
============
Install by running::
pip install Mopidy-Plex
Or, if available, install the Debian/Ubuntu package from `apt.mopidy.com
<http://apt.mopidy.com/>`_.
And you need the `python-plexapi` module as well::
pip install plexapi
Extra setup hassle
-------------------
**Please note** that you need the `python-plexapi` package _with_audio/music_support_!
As of 2016-02-02, that functionality is not yet upstream, so you need to install it from
https://github.com/havardgulldahl/python-plexapi for now.
Configuration
=============
Before starting Mopidy, you must add configuration for
Mopidy-Plex to your Mopidy configuration file::
[plex]
enabled = true
server = http://192.168.0.105:32400
Project resources
=================
- `Source code <https://github.com/havardgulldahl/mopidy-plex>`_
- `Issue tracker <https://github.com/havardgulldahl/mopidy-plex/issues>`_
Credits
=======
- Original author: ` <https://github.com/havardgulldahl`__
- Current maintainer: ` <https://github.com/havardgulldahl`__
- `Contributors <https://github.com/havardgulldahl/mopidy-plex/graphs/contributors>`_
Changelog
=========
v0.1.0 (UNRELEASED)
----------------------------------------
v0.1.0b (2016-02-02)
----------------------------------------
- Initial beta release.
- Listing and searching Plex Server content works.
- Playing audio works.
- Please `file bugs <https://github.com/havardgulldahl/mopidy-plex/issues>`.
| PypiClean |
/HPCCSystemsECLDOc-2.0.0.tar.gz/HPCCSystemsECLDOc-2.0.0/ecldoc/Formats/TXT/genTXT.py | import os
import re
from lxml import etree
from ecldoc.Utils import write_to_file
from ecldoc.Utils import joinpath, dirname
###################################################################
from ecldoc.Constants import TEMPLATE_DIR
TXT_TEMPLATE_DIR = joinpath(TEMPLATE_DIR, 'txt')
###################################################################
import jinja2
txt_jinja_env = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.abspath('/'))
)
def indent_doc(s, level, width=4) :
indention = (u' ' * (width-1) + u'| ') * level
rv = (u'\n' + indention).join(s.splitlines())
rv = indention + rv
return rv
txt_jinja_env.filters['indent_doc'] = indent_doc
##################################################################
import textwrap
CPL = 100
def _break(text, CPL_E) :
CPL_E = int(CPL_E)
break_text = textwrap.wrap(text, CPL_E)
if len(break_text) == 0 :
break_text = ['']
return break_text
##################################################################
from ecldoc.parseDoc import getTags
from ecldoc.Taglets import taglets
from .tagTXT import tag_renders
class ParseTXT(object) :
'''
Main class to generate TEXT Documentation for given ecl file
from its XML Repr
'''
def __init__(self, generator, ecl_file) :
self.xml_file = joinpath(generator.xml_root, ecl_file + '.xml')
self.txt_file = joinpath(generator.txt_root, ecl_file + '.txt')
self.template = generator.content_template
self.options = generator.options
os.makedirs(dirname(self.txt_file), exist_ok=True)
def parse(self) :
root = etree.parse(self.xml_file).getroot()
src = root.find('Source')
self.src = src
self.doc = src.find('Documentation')
for child in root.iter() :
attribs = child.attrib
### Convert links from XML FOrmat to TXT Format
if 'target' in attribs :
attribs['target'] = re.sub(r'\$\$_ECLDOC-FORM_\$\$', 'txt', attribs['target'])
attribs['target'] = re.sub(r'\.xml$', '.txt', attribs['target'])
self.parseSource()
render = self.template.render(src=src, defn_tree=self.defn_tree)
write_to_file(self.txt_file, render)
def docstring(self) :
text = ''
if self.doc is not None :
content = self.doc.find('firstline')
if content is not None :
text = content.text
return text
def parseSource(self) :
self.defn_tree = []
for defn in self.src.findall('Definition') :
self.parseDefinition(defn, self.defn_tree)
def parseDefinition(self, defn, defn_tree) :
headers = self.parseSign(defn)
doc = self.parseDocs(defn)
defn_dict = { 'headers' : headers, 'doc' : doc, 'defns' : [] }
for childdefn in defn.findall('Definition') :
self.parseDefinition(childdefn, defn_dict['defns'])
defn_tree.append(defn_dict)
def parseSign(self, defn) :
defn_type = defn.attrib['type']
sign = defn.find('Signature').text
hlen = int(defn.find('Signature').attrib['hlen'])
if defn.attrib['inherittype'] != 'local' :
sign += ' ||| ' + defn.attrib['inherittype'].upper()
heading = defn_type.upper() + ' : '
spaces = len(heading)
sign_break = _break(sign, CPL - spaces)
type_break = [heading] + ([' '*(spaces + hlen)] * (len(sign_break) - 1))
headers = [(a + b) for a,b in zip(type_break, sign_break)]
return headers
def parseDocs(self, defn) :
renders = {}
tags = getTags(defn.find('Documentation'))
always = ['param', 'field', 'return', 'parent', 'content', 'firstline']
common_tags = list(set(tags.keys()) | set(always))
for tag in common_tags :
if tag not in taglets or tag not in tag_renders :
if 'generaltag' in tag_renders :
render = tag_renders['generaltag'](
taglets['generaltag'](doc=tags[tag], defn=defn, tagname=tag))
renders[tag] = render
continue
render = tag_renders[tag](taglets[tag](doc=tags[tag], defn=defn, tagname=tag))
renders[tag] = render
return renders
############################################################################
class GenTXT(object) :
'''
Generate TXT Documentation for all ecl files from XML Format
'''
def __init__(self, input_root, output_root, ecl_file_tree, options) :
self.input_root = input_root
self.output_root = output_root
self.txt_root = joinpath(output_root, 'txt')
self.xml_root = joinpath(output_root, 'xml')
self.template_dir = TXT_TEMPLATE_DIR
self.content_template = txt_jinja_env.get_template(joinpath(self.template_dir, 'content.tpl.txt'))
self.toc_template = txt_jinja_env.get_template(joinpath(self.template_dir, 'toc.tpl.txt'))
self.ecl_file_tree = ecl_file_tree
self.options = options
def gen(self, key, node, content_root) :
'''
Recursively parse source tree dictionary.
If current_node is file : parse file using parseTXT
Else If : current_node is directory : recurse and generate pkg.toc.txt for that dir
(optionally generate bundle info if present)
:param key: string | the name of current_node in path tree
:param node: dict | the parent tree of current_node
:param content_root: string | real path to current node in txt doc dir
'''
current_node = node[key]
if type(current_node) != dict:
if key == 'bundle.ecl' :
return None
parser = ParseTXT(self, current_node)
parser.parse()
file = { 'name' : key, 'target' : key + '.txt', 'type' : 'file', 'doc' : parser.docstring() }
return file
else :
file = { 'name' : key,'target': joinpath(key, 'pkg.toc.txt'), 'type': 'dir', 'doc' : '' }
bundle = None
if 'bundle.ecl' in current_node :
bundle_xml_path = joinpath(self.xml_root, dirname(current_node['bundle.ecl']), 'bundle.xml')
bundle = etree.parse(bundle_xml_path).getroot()
file['type'] = 'bundle'
childfiles = []
child_keys = sorted(current_node.keys(), key=str.lower)
for chkey in child_keys :
child_root = joinpath(content_root, chkey)
child_dict = self.gen(chkey, current_node, child_root)
if child_dict is not None : childfiles.append(child_dict)
childfiles = sorted(childfiles, key=lambda x : x['type'])
os.makedirs(content_root, exist_ok=True)
render = self.toc_template.render(name=key, files=childfiles, bundle=bundle)
render_path = joinpath(content_root, 'pkg.toc.txt')
write_to_file(render_path, render)
return file
def run(self) :
'''
Main function called by ecldoc
'''
print("\nGenerating TEXT Documentation ... ")
self.gen('root', self.ecl_file_tree, self.txt_root) | PypiClean |
/Grid2Op-1.9.3-py3-none-any.whl/grid2op/Rules/rulesByArea.py |
import numpy as np
from itertools import chain
import warnings
import copy
from grid2op.Rules.BaseRules import BaseRules
from grid2op.Rules.LookParam import LookParam
from grid2op.Rules.PreventReconnection import PreventReconnection
from grid2op.Rules.PreventDiscoStorageModif import PreventDiscoStorageModif
from grid2op.Exceptions import (
IllegalAction, Grid2OpException
)
class RulesByArea(BaseRules):
"""
This subclass combine :class:`PreventReconnection`, :class: `PreventDiscoStorageModif` to be applied on the whole grid at once,
while a specifique method look for the legality of simultaneous actions taken on defined areas of a grid.
An action is declared legal if and only if:
- It doesn't reconnect more power lines than what is stated in the actual game _parameters
:class:`grid2op.Parameters`
- It doesn't attempt to act on more substations and lines within each area that what is stated in the actual game _parameters
:class:`grid2op.Parameters`
- It doesn't attempt to modify the power produce by a turned off storage unit
Example
---------
If you want the environment to take into account the rules by area, you can achieve it with:
.. code-block:
import grid2op
from grid2op.Rules.rulesByArea import RulesByArea
# First you set up the areas within the RulesByArea class
my_gamerules_byarea = RulesByArea([[0,1,2,3,4,5,6,7],[8,9,10,11,12,13,14]])
# Then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "l2rpn_case14_sandbox"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,gamerules_class=my_gamerules_byarea)
"""
def __init__(self, areas_list):
"""
The initialization of the rule with a list of list of ids of substations composing the aimed areas.
Parameters
----------
areas_list : list of areas, each placeholder containing the ids of substations of each defined area
"""
if isinstance(areas_list, list):
self.substations_id_by_area = {i : sorted(k) for i, k in enumerate(areas_list)}
elif isinstance(areas_list, dict):
self.substations_id_by_area = {i : copy.deepcopy(k) for i, k in areas_list.items()}
else:
raise Grid2OpException("Impossible to create a rules when area_list is neither a list nor a dict")
needs_cleaning = False
for area_nm, area_subs in self.substations_id_by_area.items():
if not np.array_equal(np.unique(area_subs), area_subs):
warnings.warn(f"There are duplicate substation for area {area_nm}")
needs_cleaning = True
if needs_cleaning:
self.substations_id_by_area = {i : np.unique(k) for i, k in self.substations_id_by_area.items()}
def initialize(self, env):
"""
This function is used to inform the class instance about the environment
specification and check no substation of the grid are left ouside an area.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
An environment instance properly initialized.
"""
n_sub = env.n_sub
n_sub_rule = np.sum([len(set(list_ids)) for list_ids in self.substations_id_by_area.values()])
if n_sub_rule != n_sub:
raise Grid2OpException("The number of listed ids of substations in rule initialization does not match the number of "
"substations of the chosen environement. Look for missing ids or doublon")
else:
self.lines_id_by_area = {key : sorted(list(chain(*[[item for item in np.where(env.line_or_to_subid == subid)[0]
] for subid in subid_list]))) for key,subid_list in self.substations_id_by_area.items()}
def __call__(self, action, env):
"""
See :func:`BaseRules.__call__` for a definition of the _parameters of this function.
"""
is_legal, reason = PreventDiscoStorageModif.__call__(self, action, env)
if not is_legal:
return False, reason
is_legal, reason = self._lookparam_byarea(action, env)
if not is_legal:
return False, reason
return PreventReconnection.__call__(self, action, env)
def can_use_simulate(self, nb_simulate_call_step, nb_simulate_call_episode, param):
return LookParam.can_use_simulate(
self, nb_simulate_call_step, nb_simulate_call_episode, param
)
def _lookparam_byarea(self, action, env):
"""
See :func:`BaseRules.__call__` for a definition of the parameters of this function.
"""
# at first iteration, env.current_obs is None...
powerline_status = env.get_current_line_status()
aff_lines, aff_subs = action.get_topological_impact(powerline_status)
if any([(aff_lines[line_ids]).sum() > env._parameters.MAX_LINE_STATUS_CHANGED for line_ids in self.lines_id_by_area.values()]):
ids = [[k for k in np.where(aff_lines)[0] if k in line_ids] for line_ids in self.lines_id_by_area.values()]
return False, IllegalAction(
"More than {} line status affected by the action in one area: {}"
"".format(env.parameters.MAX_LINE_STATUS_CHANGED, ids)
)
if any([(aff_subs[sub_ids]).sum() > env._parameters.MAX_SUB_CHANGED for sub_ids in self.substations_id_by_area.values()]):
ids = [[k for k in np.where(aff_subs)[0] if k in sub_ids] for sub_ids in self.substations_id_by_area.values()]
return False, IllegalAction(
"More than {} substation affected by the action in one area: {}"
"".format(env.parameters.MAX_SUB_CHANGED, ids)
)
return True, None | PypiClean |
/CGATReport-0.9.1.tar.gz/CGATReport-0.9.1/doc/Tutorials.rst | .. _Tutorials:
*********
Tutorials
*********
The tutorials provide step-by-step introductions to the
main features of cgatreport.
Tutorials
=========
.. toctree::
:maxdepth: 2
Tutorial1.rst
Tutorial2.rst
Tutorial3.rst
Tutorial4.rst
Tutorial5.rst
Tutorial6.rst
Tutorial7.rst
Tutorial8.rst
Tutorial9.rst
Tutorial10.rst
Tutorial11.rst
Tutorial12.rst
Tutorial13.rst
Tutorial14.rst
Tutorial15.rst
Tutorial16.rst
Tutorial results
================
.. toctree::
:maxdepth: 1
Tutorial1Demo.rst
Tutorial2Demo.rst
Tutorial3Demo.rst
Tutorial4Demo.rst
Tutorial5Demo.rst
Tutorial6Demo.rst
| PypiClean |
/Langton-project-0.3.tar.gz/Langton-project-0.3/Langton-project_interface/main.py | import turtle
import random
""" Global variable """
visited_global = {}
orientation = [(0,1),(1,0),(0,-1),(-1,0)]
class Ant:
""" Creation of an ant on the screen """
global visited_global
ant_speed = 0
def __init__(self, ants_data, size_factor):
self.visited_local = [] # list of coordinates of an ant
self.ant_size = 1*size_factor/21
self.ant_id = ants_data['id']
self.ant = turtle.Turtle()
self.initial_direction = ants_data['direction_init']
self.ant.setheading(self.initial_direction) # initialize the direction
self.color = ants_data['color']
self.ant.color(self.color)
self.ant.penup() # lift the pen
self.ant.shape("square") # initialize the form
self.ant.shapesize(self.ant_size, self.ant_size, 0) # initialize the shape
self.ant.speed(self.ant_speed) # initialize the speed
self.x_initial = ants_data['x_init']
self.y_initial = ants_data['y_init']
self.ant.goto(self.x_initial * 21 * self.ant_size, self.y_initial * 21 * self.ant_size) # go to the initial position
self.ant.hideturtle() # hide the ant
def advance(self):
""" Advance the ant """
self.ant.forward(self.ant_size * 21)
def turn_right(self):
""" Rotates the ant 90° to the right """
self.ant.right(90)
def turn_left(self):
""" Rotates the ant 90° to the left """
self.ant.left(90)
def new_position(self):
""" Define how an ant behaves and stores our data """
global visited_global
global orienation
x, y = self.ant.pos()
x, y = round(x), round(y)
if (x, y) not in visited_global:
# unvisited or white box
self.ant.color(self.color)
self.ant.stamp()
self.turn_right()
self.advance()
visited_global[(x, y)] = self.ant_id + 1
self.visited_local.append((x, y))
else:
# visited box
self.ant.color("white")
self.ant.stamp()
self.turn_left()
self.advance()
self.visited_local.append((x, y))
del visited_global[(x, y)]
def initial_noise(self, screen_x, screen_y, noise_ratio, color="black"):
""" Creation of noise """
global visited_global
counter = 0
self.ant.penup()
self.ant.color(color)
while counter != (screen_x * screen_y * noise_ratio):
x, y = random.uniform(-screen_x//2, screen_x//2), random.uniform(-screen_y//2, screen_y//2)
x, y = round(x), round(y)
if (x, y) not in visited_global:
self.ant.goto(x*21*self.ant_size, y*21*self.ant_size)
self.ant.stamp()
counter += 1
visited_global[(round(x * 21 * self.ant_size), round(y * 21 * self.ant_size))] = self.ant_id + 1
self.ant.goto(screen_x + 1, screen_y + 1)
def initialisation(ants_data, screen_x, screen_y, noise, noise_ratio, size_factor):
global visited_global
""" With ants_data a dictionary containing information about each ant: (x_init, y_init, init_direction, color, id) """
""" Screen initialization """
nb_fourmis = len(ants_data)
turtle.TurtleScreen._RUNNING = True # avoid Terminator error
visited_global = {}
screen = turtle.Screen()
screen.title("Langton's ant")
screen.mode("logo") # changes initial orientation to top
screen.clearscreen()
screen.bgcolor("light gray")
screen.screensize(screen_x*size_factor, screen_y*size_factor)
turtle.tracer(0, 0)
ant_list = [0]*nb_fourmis
if noise:
ant = Ant({'x_init': 0, 'y_init': 0, 'direction_init': 0,
'color': (0,0,0), 'id': 0}, size_factor).initial_noise(screen_x,
screen_y, noise_ratio)
for ant_id in range(nb_fourmis):
ant = Ant(ants_data[ant_id], size_factor)
ant_list[ant_id] = ant
return ant_list
def move(ant_list, steps, animation, speed=200):
""" Makes all defined ants move """
if animation :
turtle.tracer(speed, 0)
for step in range(steps):
for ant in ant_list:
ant.new_position()
turtle.exitonclick()
return ant_list | PypiClean |
/DE_Parametros-0.0.16.2.tar.gz/DE_Parametros-0.0.16.2/DE_Parametros.py | import datetime as dt
import DE_DataBase as D
import DE_LibUtil as U
import pandas as pd
import json
import inspect
import ast
import datetime as dt
class PARAMETROS:
NAME_CLASS = inspect.stack()[0].function
def __init__(self, **kwargs):
msg = None
function_name = inspect.stack()[0].function
try:
db = D.DATABASE()
#self._cnn = db.SQLITE(kwargs["database"])
self._cnn = kwargs["conexao"]
self._nome_tabela = kwargs["table"]
if self._nome_tabela is None:
self._nome_tabela = "SYS_PAR"
except Exception as error:
pass
finally:
pass
def _insert_rows(self, rows: list, commit: bool = True):
msg = None
function_name = inspect.stack()[0].function
utl = U.LIB()
try:
dml = f"""
Insert into {self._nome_tabela}
(hash
,hash_parent
,num_ordem
,des_aplicacao
,des_grupo
,nom_parametro
,nom_variavel
,val_parametro
,des_datatype
,des_parametro
,flg_status
,flg_nullable
,flg_updateable
,dat_ini_vigencia
,dat_fim_vigencia
,dat_update)
values (:hash
,:hash_parent
,:num_ordem
,:des_aplicacao
,:des_grupo
,:nom_parametro
,:nom_variavel
,:val_parametro
,:des_datatype
,:des_parametro
,:flg_status
,:flg_nullable
,:flg_updateable
,:dat_ini_vigencia
,:dat_fim_vigencia
,:dat_update)
"""
cur = self._cnn.cursor()
cur.executemany(dml, rows)
cur.close()
if commit:
self._cnn.commit()
msg = f"""Registro(s) incluido(s) com sucesso!"""
except Exception as error:
msg = f"""Não foi possivel incluir o(s) registro(s). Motivo: {error}"""
finally:
return msg
def _update_row(self, row: dict, commit: bool = True):
msg = None
function_name = inspect.stack()[0].function
utl = U.LIB()
try:
dml = f"""UPDATE {self._nome_tabela} SET %s = '%s'" %(.join({row.keys()}), .join({row.values()}))"""
dml = f"""
Update {self._nome_tabela}
set hash_parent = :hash_parent
,num_ordem = :num_ordem
,des_aplicacao = :des_aplicacao
,des_grupo = :des_grupo
,nom_parametro = :nom_parametro
,nom_variavel = :nom_variavel
,val_parametro = :val_parametro
,des_datatype = :des_datatype
,des_parametro = :des_parametro
,flg_status = :flg_status
,flg_nullable = :flg_nullable
,flg_updateable = :flg_updateable
,dat_ini_vigencia = :dat_ini_vigencia
,dat_fim_vigencia = :dat_fim_vigencia
,dat_update = :dat_update
where hash = :hash
"""
cur = self._cnn.cursor()
cur.execute(dml, row)
cur.close()
if commit:
self._cnn.commit()
msg = f"""Registro alterado com sucesso!"""
except Exception as error:
msg = f"""Não foi possivel alterar o registro. Motivo: {error}"""
finally:
return msg
def _delete_row(self, hash: str, commit: bool = True):
msg = None
function_name = inspect.stack()[0].function
utl = U.LIB()
try:
dml = f"""
Delete from {self._nome_tabela}
where hash = '{hash}'
"""
cur = self._cnn.cursor()
cur.execute(dml)
if commit:
self._cnn.commit()
cur.close()
msg = f"""Registro deletado com sucesso!"""
except Exception as error:
msg = f"""Não foi possivel deletar o registro. Motivo: {error}"""
finally:
return msg
def APLICACAO_get(self, des_aplicacao: list = None) -> dict:
result, where_add = None, None
try:
utl = U.LIB()
now = dt.datetime.now()
separador_listas = "|"
if des_aplicacao is None:
where_add = ""
else:
where_add = f"""and p.des_processo in ('{"','".join(des_aplicacao)}')"""
stmt = f"""
Select p.hash
,p.hash_parent
,p.num_ordem
,p.des_processo
,p.des_grupo
,p.nom_parametro
,p.nom_variavel
,p.val_parametro
,p.des_datatype
,p.des_parametro
,p.flg_nullable
,p.flg_updateable
,p.flg_encrypt
,p.dat_ini_vigencia
,p.dat_fim_vigencia
,p.timestamp
from {self._nome_tabela} p
where dat_fim_vigencia = '31-12-9999 23:59:59'
and p.Dat_ini_vigencia <= '{now.strftime("%Y-%m-%d %H:%M:%S")}'
and p.flg_status = 'A'
{where_add}
order by p.des_processo
"""
cur = self._cnn.cursor()
cur.execute(stmt)
columns = [column[0] for column in cur.description]
rs = []
PAR = {}
# populando o ResultSet (rs)
for row in cur.fetchall():
#rs.append(dict(zip(columns, row)))
rs.append(dict(zip(columns, row)))
#PAR.append(rs["nom_variavel"]}] = rs["val_parametro"]
# avalidando os DataTypes para o ResultSet
for row in rs:
if row["flg_encrypt"] == "S":
#token_string = self.VARIAVEL_get(["GERAL_TOKEN_BASE"])[0]
#row["val_parametro"] = utl.CRYPTOGRAPHY(word=row["val_parametro"], token=token_string)
row["val_parametro"] = utl.base64_decrypt(word=row["val_parametro"])
if row["des_datatype"] == "DATETIME":
row["val_parametro"] = dt.datetime.strptime(utl.iif(row["val_parametro"] is None, "", row["val_parametro"]), "%Y-%m-%d %H:%M:%S")
elif row["des_datatype"] == "DATE":
row["val_parametro"] = dt.datetime.strptime(utl.iif(row["val_parametro"] is None, "", row["val_parametro"]), "%Y-%m-%d")
elif row["des_datatype"] == "TIME":
row["val_parametro"] = dt.datetime.strptime(utl.iif(row["val_parametro"] is None, "", row["val_parametro"]), "%H:%M:%S")
elif row["des_datatype"] == "INTEGER":
row["val_parametro"] = int(utl.iif(row["val_parametro"] is None, "0", row["val_parametro"]))
elif row["des_datatype"] == "LIST":
row["val_parametro"] = row["val_parametro"].split(separador_listas)
elif row["des_datatype"] == "JSON":
#row["val_parametro"] = ast.literal_eval(row["val_parametro"])
row["val_parametro"] = json.loads(row["val_parametro"])
elif row["des_datatype"] == "LIST/JSON":
row["val_parametro"] = row["val_parametro"].split(separador_listas)
for i in range(len(row["val_parametro"])):
#row["val_parametro"][i] = ast.literal_eval(row["val_parametro"][i])
#row["val_parametro"][i] = ast.literal_eval(row["val_parametro"][i])
row["val_parametro"][i] = json.loads(row["val_parametro"][i])
except Exception as error:
rs = f"""Falha na obtenção dos parametros para a FAMILIA de PARAMETROS: {des_aplicacao}.\nErro: {error}"""
finally:
return rs
def PARAMETRO_set(self, nome_parametro: str, value: "", commit:bool = True):
result = None
try:
stmt = f"""Update {self._nome_tabela} set val_parametro = '{value}' where nom_parametro = '{nome_parametro}'"""
cur = self._cnn.cursor()
cur.execute(stmt)
if commit:
self._cnn.commit()
except Exception as error:
result = error
finally:
cur.close()
return result
def VARIAVEL_get(self, nom_parametro: list) -> list:
result, where_add, values = None, None, None
try:
now = dt.datetime.now()
if nom_parametro is None:
where_add = ""
else:
where_add = f"""and p.nom_parametro in ('{"','".join(nom_parametro)}')"""
stmt = f"""
Select *
from {self._nome_tabela} p
where dat_fim_vigencia is Null
and p.Dat_ini_vigencia <= '{now.strftime("%Y-%m-%d %H:%M:%S")}'
and p.flg_status = 'A'
{where_add}
order by p.nom_parametro
"""
cur = self._cnn.cursor()
cur.execute(stmt)
columns = [column[0] for column in cur.description]
rs = []
# populando o ResultSet (rs)
for row in cur.fetchall():
rs.append(dict(zip(columns, row)))
# avalidando os DataTypes para o ResultSet
values = []
for row in rs:
if row["des_datatype"] == "JSON":
row["val_parametro"] = ast.literal_eval(row["val_parametro"])
if isinstance(row["val_parametro"], bytes):
values.append(row["val_parametro"].decode())
else:
values.append(row["val_parametro"])
except Exception as error:
values = f"""Falha na obtenção dos parametros para a nom_parametro: {nom_parametro}.\nErro: {error}"""
finally:
return values
def REPLICA_APLICACAO(self, des_aplicacao_origem: str, des_aplicacao_destino: str, commit: bool = True):
try:
stmt = f"""Select *
from {self._nome_tabela}
where nom_familia = :nom_familia
"""
cur = self._cnn.cursor()
cur.execute(stmt)
rows = cur.fetchmany()
for row in rows:
pass
cur.close()
if commit:
self._cnn.commit()
except Exception as error:
pass
finally:
pass
def INSERT_ROWS(self, row: list):
msg = self._insert_rows(row)
return msg
def UPDATE_ROW(self, row: dict):
msg = self._update_row(row)
return msg
def DELETE_ROW(self, hash: str):
msg = self._delete_row(hash)
return msg
def _oracle_DDL(self):
try:
stmt = f"""
(hash VARCHAR2(256) PRIMARY KEY NOT NULL UNIQUE,
hash_parent VARCHAR2(256),
num_ordem VARCHAR (10) DEFAULT ('0'),
des_aplicacao VARCHAR (30) NOT NULL,
des_grupo VARCHAR (30) NOT NULL,
nom_parametro VARCHAR2(256) UNIQUE NOT NULL,
nom_variavel VARCHAR2 (256) NOT NULL
val_parametro CLOB,
des_datatype STRING NOT NULL,
des_parametro VARCHAR2 (500),
flg_status VARCHAR (10) NOT NULL DEFAULT ('A'),
flg_nullable VARCHAR (10) NOT NULL DEFAULT ('N'),
flg_updateable VARCHAR (10) DEFAULT ('N') NOT NULL,
dat_ini_vigencia DATE DEFAULT (CURRENT_TIMESTAMP) NOT NULL,
dat_fim_vigencia DATE,
dat_update TIMESTAMP NOT NULL DEFAULT (CURRENT_TIMESTAMP)
"""
cur = self._cnn.cursor()
cur.execute(stmt)
cur.close()
except Exception as error:
pass
finally:
pass
def _sqlite_DDL(self):
try:
stmt = f"""
CREATE TABLE {self._nome_tabela}
(hash TEXT PRIMARY KEY NOT NULL UNIQUE,
hash_parent TEXT,
num_ordem VARCHAR (10) DEFAULT ('0'),
des_aplicacao VARCHAR (30),
des_grupo VARCHAR (30),
nom_parametro TIME (256),
nom_variavel VARCHAR2 (256) UNIQUE NOT NULL,
val_parametro TEXT,
des_datatype STRING NOT NULL,
des_parametro VARCHAR2 (500),
flg_status VARCHAR (10) NOT NULL DEFAULT ('A'),
flg_nullable VARCHAR (10) NOT NULL DEFAULT ('N'),
flg_updateable VARCHAR (10) DEFAULT ('N') NOT NULL,
dat_ini_vigencia DATETIME DEFAULT (CURRENT_TIMESTAMP) NOT NULL,
dat_fim_vigencia DATETIME,
dat_update DATETIME NOT NULL DEFAULT (CURRENT_TIMESTAMP)
)
"""
cur = self._cnn.cursor()
cur.execute(stmt)
cur.close()
except Exception as error:
pass
finally:
pass | PypiClean |
/Custom%20Accounts%20Django-0.1.1560515473.tar.gz/Custom Accounts Django-0.1.1560515473/custom_accounts_django/views.py | from urllib.parse import urlparse
from django.contrib import auth
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import AuthenticationForm
from django.shortcuts import render, redirect, render_to_response
# Create your views here.
# helper method to generate a context csrf_token
# and adding a login form in this context
from django.template.context_processors import csrf
from django.utils.http import urlunquote, is_safe_url
from django.views import View
# Create your views here.
from .models import CustomUserCreationForm
def index(request):
return render(request, "accounts_index.html")
def create_context_username_csrf(request):
context = {}
context.update(csrf(request))
context["login_form"] = AuthenticationForm
return context
def get_next_url(request):
next = request.META.get("HTTP_REFERER")
if next:
next = urlunquote(next) # HTTP_REFERER may be encoded.
if not is_safe_url(url=next, allowed_hosts=request.get_host()):
next = "/"
return next
class LoginView(View):
def get(self, request):
# if the user is logged in, then do a redirect to the home page
if auth.get_user(request).is_authenticated:
return redirect("/")
else:
# Otherwise, form a context with the authorization form
# and we return to this page context.
# It works, for url - /admin/login/ and for /accounts/login/
context = create_context_username_csrf(request)
return render_to_response("login.html", context=context)
def post(self, request):
# having received the authorization request
form = AuthenticationForm(request, data=request.POST)
# check the correct form, that there is a user and he entered the correct password
print(form.is_valid())
if form.is_valid():
# if successful authorizing user
auth.login(request, form.get_user())
# get previous url
next = urlparse(get_next_url(request)).path
# and if the user of the number of staff and went through url /admin/login/
# then redirect the user to the admin panel
if next == "/admin/login/" and request.user.is_staff:
return redirect("/admin/")
# otherwise do a redirect to the previous page,
# in the case of a / accounts / login / will happen is another redirect to the home page
# in the case of any other url, will return the user to the url
return redirect(next)
# If not true, then the user will appear on the login page
# and see an error message
context = create_context_username_csrf(request)
context["login_form"] = form
return render_to_response("login.html", context=context)
def pagelogout(request):
logout(request)
return redirect("index")
class RegisterView(View):
def get(self, request):
form = CustomUserCreationForm()
return render(request, "register.html", {"form": form})
def post(self, request):
form = CustomUserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
raw_password = form.cleaned_data.get("password1")
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect("index")
return render(request, "register.html", {"form": form}) | PypiClean |
/Electrum-Zcash-Random-Fork-3.1.3b5.tar.gz/Electrum-Zcash-Random-Fork-3.1.3b5/gui/qt/address_list.py | import webbrowser
from electrum_zcash.i18n import _
from electrum_zcash.util import block_explorer_URL
from electrum_zcash.plugins import run_hook
from electrum_zcash.bitcoin import is_address
from .util import *
class AddressList(MyTreeWidget):
filter_columns = [0, 1, 2, 3] # Type, Address, Label, Balance
def __init__(self, parent=None):
MyTreeWidget.__init__(self, parent, self.create_menu, [], 2)
self.refresh_headers()
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
self.show_change = 0
self.show_used = 0
self.change_button = QComboBox(self)
self.change_button.currentIndexChanged.connect(self.toggle_change)
for t in [_('All'), _('Receiving'), _('Change')]:
self.change_button.addItem(t)
self.used_button = QComboBox(self)
self.used_button.currentIndexChanged.connect(self.toggle_used)
for t in [_('All'), _('Unused'), _('Funded'), _('Used')]:
self.used_button.addItem(t)
def get_toolbar_buttons(self):
return QLabel(_("Filter:")), self.change_button, self.used_button
def on_hide_toolbar(self):
self.show_change = 0
self.show_used = 0
self.update()
def save_toolbar_state(self, state, config):
config.set_key('show_toolbar_addresses', state)
def refresh_headers(self):
headers = [_('Type'), _('Address'), _('Label'), _('Balance')]
fx = self.parent.fx
if fx and fx.get_fiat_address_config():
headers.extend([_(fx.get_currency()+' Balance')])
headers.extend([_('Tx')])
self.update_headers(headers)
def toggle_change(self, state):
if state == self.show_change:
return
self.show_change = state
self.update()
def toggle_used(self, state):
if state == self.show_used:
return
self.show_used = state
self.update()
def on_update(self):
self.wallet = self.parent.wallet
item = self.currentItem()
current_address = item.data(0, Qt.UserRole) if item else None
if self.show_change == 1:
addr_list = self.wallet.get_receiving_addresses()
elif self.show_change == 2:
addr_list = self.wallet.get_change_addresses()
else:
addr_list = self.wallet.get_addresses()
self.clear()
for address in addr_list:
num = len(self.wallet.get_address_history(address))
is_used = self.wallet.is_used(address)
label = self.wallet.labels.get(address, '')
c, u, x = self.wallet.get_addr_balance(address)
balance = c + u + x
if self.show_used == 1 and (balance or is_used):
continue
if self.show_used == 2 and balance == 0:
continue
if self.show_used == 3 and not is_used:
continue
balance_text = self.parent.format_amount(balance, whitespaces=True)
fx = self.parent.fx
if fx and fx.get_fiat_address_config():
rate = fx.exchange_rate()
fiat_balance = fx.value_str(balance, rate)
address_item = SortableTreeWidgetItem(['', address, label, balance_text, fiat_balance, "%d"%num])
for i in range(6):
if i > 2:
address_item.setTextAlignment(i, Qt.AlignRight)
address_item.setFont(i, QFont(MONOSPACE_FONT))
else:
address_item = SortableTreeWidgetItem(['', address, label, balance_text, "%d"%num])
for i in range(5):
if i > 2:
address_item.setTextAlignment(i, Qt.AlignRight)
address_item.setFont(i, QFont(MONOSPACE_FONT))
if self.wallet.is_change(address):
address_item.setText(0, _('change'))
address_item.setBackground(0, ColorScheme.YELLOW.as_color(True))
else:
address_item.setText(0, _('receiving'))
address_item.setBackground(0, ColorScheme.GREEN.as_color(True))
address_item.setFont(1, QFont(MONOSPACE_FONT))
address_item.setData(0, Qt.UserRole, address) # column 0; independent from address column
if self.wallet.is_frozen(address):
address_item.setBackground(1, ColorScheme.BLUE.as_color(True))
if self.wallet.is_beyond_limit(address):
address_item.setBackground(1, ColorScheme.RED.as_color(True))
self.addChild(address_item)
if address == current_address:
self.setCurrentItem(address_item)
def create_menu(self, position):
from electrum_zcash.wallet import Multisig_Wallet
is_multisig = isinstance(self.wallet, Multisig_Wallet)
can_delete = self.wallet.can_delete_address()
selected = self.selectedItems()
multi_select = len(selected) > 1
addrs = [item.text(1) for item in selected]
if not addrs:
return
if not multi_select:
item = self.itemAt(position)
col = self.currentColumn()
if not item:
return
addr = addrs[0]
if not is_address(addr):
item.setExpanded(not item.isExpanded())
return
menu = QMenu()
if not multi_select:
column_title = self.headerItem().text(col)
copy_text = item.text(col)
menu.addAction(_("Copy {}").format(column_title), lambda: self.parent.app.clipboard().setText(copy_text))
menu.addAction(_('Details'), lambda: self.parent.show_address(addr))
if col in self.editable_columns:
menu.addAction(_("Edit {}").format(column_title), lambda: self.editItem(item, col))
menu.addAction(_("Request payment"), lambda: self.parent.receive_at(addr))
if self.wallet.can_export():
menu.addAction(_("Private key"), lambda: self.parent.show_private_key(addr))
if not is_multisig and not self.wallet.is_watching_only():
menu.addAction(_("Sign/verify message"), lambda: self.parent.sign_verify_message(addr))
menu.addAction(_("Encrypt/decrypt message"), lambda: self.parent.encrypt_message(addr))
if can_delete:
menu.addAction(_("Remove from wallet"), lambda: self.parent.remove_address(addr))
addr_URL = block_explorer_URL(self.config, 'addr', addr)
if addr_URL:
menu.addAction(_("View on block explorer"), lambda: webbrowser.open(addr_URL))
if not self.wallet.is_frozen(addr):
menu.addAction(_("Freeze"), lambda: self.parent.set_frozen_state([addr], True))
else:
menu.addAction(_("Unfreeze"), lambda: self.parent.set_frozen_state([addr], False))
coins = self.wallet.get_utxos(addrs)
if coins:
menu.addAction(_("Spend from"), lambda: self.parent.spend_coins(coins))
run_hook('receive_menu', menu, addrs, self.wallet)
menu.exec_(self.viewport().mapToGlobal(position))
def on_permit_edit(self, item, column):
# labels for headings, e.g. "receiving" or "used" should not be editable
return item.childCount() == 0 | PypiClean |
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/core/actors/ActorObject.py | from JumpScale import j
import struct
import hashlib
import yaml
import json
from pymodel.serializers import YamlSerializer
class ActorObject(object):
"""
ActorObject
"""
_DATA_TYPE_ID = 2
_KEY_PREFIX = 'ActorObject_'
def __init__(self, service):
"""
ActorObject constructor
@param service: service the ActorObject relates to
@type service: LocalService
"""
self._service = service
self.model = None
def serializeToYAML(self):
"""
Serialize the model object to YAML
"""
return self.model.serialize(YamlSerializer)
def serializeToJSON(self):
"""
Serialize the model object
"""
yaml2=self.serializeToYAML()
objectdicts=yaml.load(yaml2)
#@todo P1 better code required, need to serialize directly to JSON, now too slow
return json.dumps(objectdicts)
def pm_getId(self):
"""
Retrieves the last registered id for this type of ActorObject, increments and registers it.
"""
key = '%(prefix)s%(actorObjectName)s' % {'prefix': self._KEY_PREFIX,
'actorObjectName': self.__class__.__name__}
retryAttempts = 5
id = None
if self._service.db.exists(key):
while retryAttempts > 0:
lastId = int(self._service.db.get(key))
id = lastId + 1
if self._service.db.testAndSet(key, str(lastId), str(id)) == str(lastId):
break
retryAttempts -= 1
if not id:
raise RuntimeError('Could not get an unique id for the actor object')
else:
id = 1
self._service.db.set(key, str(id))
return id
@classmethod
def deserialize(cls, data, service):
"""
Deserializes an ActorObject instance
@return: deserialized ActorObject instance
@rtype: ActorObject
"""
dataTypeId = struct.unpack('B', data[0])[0]
if dataTypeId != cls._DATA_TYPE_ID:
raise RuntimeError('Could not deserialize data, unknown dataTypeId: %(dataTypeId)s' % {'dataTypeId':dataTypeId})
dataHash = hashlib.md5(data[:-16]).digest()
if dataHash != data[-16:]:
raise RuntimeError('Could not deserialize data, incorrect data hash')
actorObjectTypeLen = struct.unpack('B', data[1])[0]
actorObjectType = data[2:actorObjectTypeLen + 2]
actorObjectManagerType = '%(actorObjectType)smanager' % {'actorObjectType': actorObjectType.lower()}
actorObjectManager = getattr(service.extensions, actorObjectManagerType)
actorObject = actorObjectManager.get()
serializedModel = data[actorObjectTypeLen + 2:-16]
actorObject.model = actorObject.model.deserialize(j.db.pymodelserializers.thriftbase64, serializedModel)
return actorObject
def serialize(self):
"""
Serializes an ActorObject instance.
@return: serialized ActorObject instance
@rtype: String
"""
actorObjectType = self.__class__.__name__
serializedModel = self.model.serialize(j.db.pymodelserializers.thriftbase64)
data = struct.pack('B', self._DATA_TYPE_ID)
data += struct.pack('B', len(actorObjectType))
data += actorObjectType
data += serializedModel
data += hashlib.md5(data).digest()
return data
def __repr__(self):
return str(self.model) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/mdnd/AreaManager.js | define("dojox/mdnd/AreaManager",["dojo/_base/kernel","dojo/_base/declare","dojo/_base/connect","dojo/_base/window","dojo/_base/array","dojo/query","dojo/_base/html","./Moveable"],function(_1){
var am=_1.declare("dojox.mdnd.AreaManager",null,{autoRefresh:true,areaClass:"dojoxDndArea",dragHandleClass:"dojoxDragHandle",constructor:function(){
this._areaList=[];
this.resizeHandler=_1.connect(_1.global,"onresize",this,function(){
this._dropMode.updateAreas(this._areaList);
});
this._oldIndexArea=this._currentIndexArea=this._oldDropIndex=this._currentDropIndex=this._sourceIndexArea=this._sourceDropIndex=-1;
},init:function(){
this.registerByClass();
},registerByNode:function(_2,_3){
var _4=this._getIndexArea(_2);
if(_2&&_4==-1){
var _5=_2.getAttribute("accept");
var _6=(_5)?_5.split(/\s*,\s*/):["text"];
var _7={"node":_2,"items":[],"coords":{},"margin":null,"accept":_6,"initItems":false};
_1.forEach(this._getChildren(_2),function(_8){
this._setMarginArea(_7,_8);
_7.items.push(this._addMoveableItem(_8));
},this);
this._areaList=this._dropMode.addArea(this._areaList,_7);
if(!_3){
this._dropMode.updateAreas(this._areaList);
}
_1.publish("/dojox/mdnd/manager/register",[_2]);
}
},registerByClass:function(){
_1.query("."+this.areaClass).forEach(function(_9){
this.registerByNode(_9,true);
},this);
this._dropMode.updateAreas(this._areaList);
},unregister:function(_a){
var _b=this._getIndexArea(_a);
if(_b!=-1){
_1.forEach(this._areaList[_b].items,function(_c){
this._deleteMoveableItem(_c);
},this);
this._areaList.splice(_b,1);
this._dropMode.updateAreas(this._areaList);
return true;
}
return false;
},_addMoveableItem:function(_d){
_d.setAttribute("tabIndex","0");
var _e=this._searchDragHandle(_d);
var _f=new dojox.mdnd.Moveable({"handle":_e,"skip":true},_d);
_1.addClass(_e||_d,"dragHandle");
var _10=_d.getAttribute("dndType");
var _11={"item":_f,"type":_10?_10.split(/\s*,\s*/):["text"],"handlers":[_1.connect(_f,"onDragStart",this,"onDragStart")]};
if(dijit&&dijit.byNode){
var _12=dijit.byNode(_d);
if(_12){
_11.type=_12.dndType?_12.dndType.split(/\s*,\s*/):["text"];
_11.handlers.push(_1.connect(_12,"uninitialize",this,function(){
this.removeDragItem(_d.parentNode,_f.node);
}));
}
}
return _11;
},_deleteMoveableItem:function(_13){
_1.forEach(_13.handlers,function(_14){
_1.disconnect(_14);
});
var _15=_13.item.node,_16=this._searchDragHandle(_15);
_1.removeClass(_16||_15,"dragHandle");
_13.item.destroy();
},_getIndexArea:function(_17){
if(_17){
for(var i=0;i<this._areaList.length;i++){
if(this._areaList[i].node===_17){
return i;
}
}
}
return -1;
},_searchDragHandle:function(_18){
if(_18){
var _19=this.dragHandleClass.split(" "),_1a=_19.length,_1b="";
_1.forEach(_19,function(css,i){
_1b+="."+css;
if(i!=_1a-1){
_1b+=", ";
}
});
return _1.query(_1b,_18)[0];
}
},addDragItem:function(_1c,_1d,_1e,_1f){
var add=true;
if(!_1f){
add=_1c&&_1d&&(_1d.parentNode===null||(_1d.parentNode&&_1d.parentNode.nodeType!==1));
}
if(add){
var _20=this._getIndexArea(_1c);
if(_20!==-1){
var _21=this._addMoveableItem(_1d),_22=this._areaList[_20].items;
if(0<=_1e&&_1e<_22.length){
var _23=_22.slice(0,_1e),_24=_22.slice(_1e,_22.length);
_23[_23.length]=_21;
this._areaList[_20].items=_23.concat(_24);
_1c.insertBefore(_1d,_22[_1e].item.node);
}else{
this._areaList[_20].items.push(_21);
_1c.appendChild(_1d);
}
this._setMarginArea(this._areaList[_20],_1d);
this._areaList[_20].initItems=false;
return true;
}
}
return false;
},removeDragItem:function(_25,_26){
var _27=this._getIndexArea(_25);
if(_25&&_27!==-1){
var _28=this._areaList[_27].items;
for(var j=0;j<_28.length;j++){
if(_28[j].item.node===_26){
this._deleteMoveableItem(_28[j]);
_28.splice(j,1);
return _25.removeChild(_26);
}
}
}
return null;
},_getChildren:function(_29){
var _2a=[];
_1.forEach(_29.childNodes,function(_2b){
if(_2b.nodeType==1){
if(dijit&&dijit.byNode){
var _2c=dijit.byNode(_2b);
if(_2c){
if(!_2c.dragRestriction){
_2a.push(_2b);
}
}else{
_2a.push(_2b);
}
}else{
_2a.push(_2b);
}
}
});
return _2a;
},_setMarginArea:function(_2d,_2e){
if(_2d&&_2d.margin===null&&_2e){
_2d.margin=_1._getMarginExtents(_2e);
}
},findCurrentIndexArea:function(_2f,_30){
this._oldIndexArea=this._currentIndexArea;
this._currentIndexArea=this._dropMode.getTargetArea(this._areaList,_2f,this._currentIndexArea);
if(this._currentIndexArea!=this._oldIndexArea){
if(this._oldIndexArea!=-1){
this.onDragExit(_2f,_30);
}
if(this._currentIndexArea!=-1){
this.onDragEnter(_2f,_30);
}
}
return this._currentIndexArea;
},_isAccepted:function(_31,_32){
this._accept=false;
for(var i=0;i<_32.length;++i){
for(var j=0;j<_31.length;++j){
if(_31[j]==_32[i]){
this._accept=true;
break;
}
}
}
},onDragStart:function(_33,_34,_35){
if(this.autoRefresh){
this._dropMode.updateAreas(this._areaList);
}
var _36=(_1.isWebKit)?_1.body():_1.body().parentNode;
if(!this._cover){
this._cover=_1.create("div",{"class":"dndCover"});
this._cover2=_1.clone(this._cover);
_1.addClass(this._cover2,"dndCover2");
}
var h=_36.scrollHeight+"px";
this._cover.style.height=this._cover2.style.height=h;
_1.body().appendChild(this._cover);
_1.body().appendChild(this._cover2);
this._dragStartHandler=_1.connect(_33.ownerDocument,"ondragstart",_1,"stopEvent");
this._sourceIndexArea=this._lastValidIndexArea=this._currentIndexArea=this._getIndexArea(_33.parentNode);
var _37=this._areaList[this._sourceIndexArea];
var _38=_37.items;
for(var i=0;i<_38.length;i++){
if(_38[i].item.node==_33){
this._dragItem=_38[i];
this._dragItem.handlers.push(_1.connect(this._dragItem.item,"onDrag",this,"onDrag"));
this._dragItem.handlers.push(_1.connect(this._dragItem.item,"onDragEnd",this,"onDrop"));
_38.splice(i,1);
this._currentDropIndex=this._sourceDropIndex=i;
break;
}
}
var _39=null;
if(this._sourceDropIndex!==_37.items.length){
_39=_37.items[this._sourceDropIndex].item.node;
}
if(_1.isIE>7){
this._eventsIE7=[_1.connect(this._cover,"onmouseover",_1,"stopEvent"),_1.connect(this._cover,"onmouseout",_1,"stopEvent"),_1.connect(this._cover,"onmouseenter",_1,"stopEvent"),_1.connect(this._cover,"onmouseleave",_1,"stopEvent")];
}
var s=_33.style;
s.left=_34.x+"px";
s.top=_34.y+"px";
if(s.position=="relative"||s.position==""){
s.position="absolute";
}
this._cover.appendChild(_33);
this._dropIndicator.place(_37.node,_39,_35);
_1.addClass(_33,"dragNode");
this._accept=true;
_1.publish("/dojox/mdnd/drag/start",[_33,_37,this._sourceDropIndex]);
},onDragEnter:function(_3a,_3b){
if(this._currentIndexArea===this._sourceIndexArea){
this._accept=true;
}else{
this._isAccepted(this._dragItem.type,this._areaList[this._currentIndexArea].accept);
}
},onDragExit:function(_3c,_3d){
this._accept=false;
},onDrag:function(_3e,_3f,_40,_41){
var _42=this._dropMode.getDragPoint(_3f,_40,_41);
this.findCurrentIndexArea(_42,_40);
if(this._currentIndexArea!==-1&&this._accept){
this.placeDropIndicator(_42,_40);
}
},placeDropIndicator:function(_43,_44){
this._oldDropIndex=this._currentDropIndex;
var _45=this._areaList[this._currentIndexArea];
if(!_45.initItems){
this._dropMode.initItems(_45);
}
this._currentDropIndex=this._dropMode.getDropIndex(_45,_43);
if(!(this._currentIndexArea===this._oldIndexArea&&this._oldDropIndex===this._currentDropIndex)){
this._placeDropIndicator(_44);
}
return this._currentDropIndex;
},_placeDropIndicator:function(_46){
var _47=this._areaList[this._lastValidIndexArea];
var _48=this._areaList[this._currentIndexArea];
this._dropMode.refreshItems(_47,this._oldDropIndex,_46,false);
var _49=null;
if(this._currentDropIndex!=-1){
_49=_48.items[this._currentDropIndex].item.node;
}
this._dropIndicator.place(_48.node,_49);
this._lastValidIndexArea=this._currentIndexArea;
this._dropMode.refreshItems(_48,this._currentDropIndex,_46,true);
},onDropCancel:function(){
if(!this._accept){
var _4a=this._getIndexArea(this._dropIndicator.node.parentNode);
if(_4a!=-1){
this._currentIndexArea=_4a;
}else{
this._currentIndexArea=0;
}
}
},onDrop:function(_4b){
this.onDropCancel();
var _4c=this._areaList[this._currentIndexArea];
_1.removeClass(_4b,"dragNode");
var _4d=_4b.style;
_4d.position="relative";
_4d.left="0";
_4d.top="0";
_4d.width="auto";
if(_4c.node==this._dropIndicator.node.parentNode){
_4c.node.insertBefore(_4b,this._dropIndicator.node);
}else{
_4c.node.appendChild(_4b);
this._currentDropIndex=_4c.items.length;
}
var _4e=this._currentDropIndex;
if(_4e==-1){
_4e=_4c.items.length;
}
var _4f=_4c.items;
var _50=_4f.slice(0,_4e);
var _51=_4f.slice(_4e,_4f.length);
_50[_50.length]=this._dragItem;
_4c.items=_50.concat(_51);
this._setMarginArea(_4c,_4b);
_1.forEach(this._areaList,function(obj){
obj.initItems=false;
});
_1.disconnect(this._dragItem.handlers.pop());
_1.disconnect(this._dragItem.handlers.pop());
this._resetAfterDrop();
if(this._cover){
_1.body().removeChild(this._cover);
_1.body().removeChild(this._cover2);
}
_1.publish("/dojox/mdnd/drop",[_4b,_4c,_4e]);
},_resetAfterDrop:function(){
this._accept=false;
this._dragItem=null;
this._currentDropIndex=-1;
this._currentIndexArea=-1;
this._oldDropIndex=-1;
this._sourceIndexArea=-1;
this._sourceDropIndex=-1;
this._dropIndicator.remove();
if(this._dragStartHandler){
_1.disconnect(this._dragStartHandler);
}
if(_1.isIE>7){
_1.forEach(this._eventsIE7,_1.disconnect);
}
},destroy:function(){
while(this._areaList.length>0){
if(!this.unregister(this._areaList[0].node)){
throw new Error("Error while destroying AreaManager");
}
}
_1.disconnect(this.resizeHandler);
this._dropIndicator.destroy();
this._dropMode.destroy();
if(dojox.mdnd.autoScroll){
dojox.mdnd.autoScroll.destroy();
}
if(this.refreshListener){
_1.unsubscribe(this.refreshListener);
}
if(this._cover){
_1._destroyElement(this._cover);
_1._destroyElement(this._cover2);
delete this._cover;
delete this._cover2;
}
}});
if(dijit&&dijit._Widget){
_1.extend(dijit._Widget,{dndType:"text"});
}
dojox.mdnd._areaManager=null;
dojox.mdnd.areaManager=function(){
if(!dojox.mdnd._areaManager){
dojox.mdnd._areaManager=new dojox.mdnd.AreaManager();
}
return dojox.mdnd._areaManager;
};
return am;
}); | PypiClean |
/FiPy-3.4.4.tar.gz/FiPy-3.4.4/fipy/matrices/pysparseMatrix.py | from __future__ import unicode_literals
from builtins import range
__docformat__ = 'restructuredtext'
__all__ = []
from pysparse import spmatrix
from fipy.tools import numerix
from fipy.matrices.sparseMatrix import _SparseMatrix
class _PysparseMatrix(_SparseMatrix):
def __init__(self, matrix):
"""Creates a wrapper for a pysparse matrix
Allows basic python operations __add__, __sub__ etc.
Facilitate matrix populating in an easy way.
Parameters
----------
matrix : ~pysparse.spmatrix.ll_mat
The internal Pysparse matrix
"""
self.matrix = matrix
super(_PysparseMatrix, self).__init__()
def copy(self):
return _PysparseMatrix(matrix=self.matrix.copy())
def __getitem__(self, index):
m = self.matrix[index]
if not isinstance(m, (type(0), type(0.))):
m = _PysparseMatrix(matrix=m)
return m
@staticmethod
def _iadd(L, other, sign=1):
if other != 0:
L.shift(sign, other.matrix)
def __iadd__(self, other):
self._iadd(self.matrix, other)
return self
def __add__(self, other):
"""
Add two sparse matrices
>>> L = _PysparseMatrixFromShape(rows=3, cols=3)
>>> L.put([3., 10., numerix.pi, 2.5], [0, 0, 1, 2], [2, 1, 1, 0])
>>> print(L + _PysparseIdentityMatrix(size=3))
1.000000 10.000000 3.000000
--- 4.141593 ---
2.500000 --- 1.000000
>>> print(L + 0)
--- 10.000000 3.000000
--- 3.141593 ---
2.500000 --- ---
>>> print(L + 3)
Traceback (most recent call last):
...
AttributeError: 'int' object has no attribute 'matrix'
"""
if other == 0:
return self
else:
L = self.matrix.copy()
L.shift(1, other.matrix)
return _PysparseMatrix(matrix=L)
__radd__ = __add__
def __sub__(self, other):
if other == 0:
return self
else:
L = self.matrix.copy()
L.shift(-1, other.matrix)
return _PysparseMatrix(matrix=L)
def __rsub__(self, other):
return -self + other
def __isub__(self, other):
return self._iadd(self.matrix, other, -1)
def __mul__(self, other):
"""
Multiply a sparse matrix by another sparse matrix
>>> L1 = _PysparseMatrixFromShape(rows=3, cols=3)
>>> L1.put([3., 10., numerix.pi, 2.5], [0, 0, 1, 2], [2, 1, 1, 0])
>>> L2 = _PysparseIdentityMatrix(size=3)
>>> L2.put([4.38, 12357.2, 1.1], [2, 1, 0], [1, 0, 2])
>>> tmp = numerix.array(((1.23572000e+05, 2.31400000e+01, 3.00000000e+00),
... (3.88212887e+04, 3.14159265e+00, 0.00000000e+00),
... (2.50000000e+00, 0.00000000e+00, 2.75000000e+00)))
>>> numerix.allclose((L1 * L2).numpyArray, tmp)
1
or a sparse matrix by a vector
>>> tmp = numerix.array((29., 6.28318531, 2.5))
>>> numerix.allclose(L1 * numerix.array((1, 2, 3), 'd'), tmp)
1
or a vector by a sparse matrix
>>> tmp = numerix.array((7.5, 16.28318531, 3.))
>>> numerix.allclose(numerix.array((1, 2, 3), 'd') * L1, tmp)
1
(The multiplication is broken. Numpy is calling __rmul__ for every
element instead of with the whole array.)
"""
N = self.matrix.shape[1]
if isinstance(other, _PysparseMatrix):
return _PysparseMatrix(matrix=spmatrix.matrixmultiply(self.matrix, other.matrix))
else:
shape = numerix.shape(other)
if shape == ():
L = spmatrix.ll_mat(N, N, N)
L.put(other * numerix.ones(N, 'l'))
return _PysparseMatrix(matrix=spmatrix.matrixmultiply(self.matrix, L))
elif shape == (N,):
y = numerix.empty((self.matrix.shape[0],))
self.matrix.matvec(other, y)
return y
else:
raise TypeError
def __rmul__(self, other):
if isinstance(numerix.ones(1, 'l'), type(other)):
y = other.copy()
self.matrix.matvec_transp(other, y)
return y
else:
return self * other
@property
def _shape(self):
return self.matrix.shape
@property
def _range(self):
return list(range(self._shape[1])), list(range(self._shape[0]))
def put(self, vector, id1, id2):
"""Put elements of `vector` at positions of the matrix corresponding to (`id1`, `id2`)
Parameters
----------
vector : array_like
The values to insert.
id1 : array_like
The row indices.
id2 : array_like
The column indices.
overlapping : bool
Whether to insert ghosted values or not (Ignored. Default False).
Examples
--------
>>> L = _PysparseMatrixFromShape(rows=3, cols=3)
>>> L.put([3., 10., numerix.pi, 2.5], [0, 0, 1, 2], [2, 1, 1, 0])
>>> print(L)
--- 10.000000 3.000000
--- 3.141593 ---
2.500000 --- ---
"""
self.matrix.put(vector, id1, id2)
def putDiagonal(self, vector):
"""
Put elements of `vector` along diagonal of matrix
>>> L = _PysparseMatrixFromShape(rows=3, cols=3)
>>> L.putDiagonal([3., 10., numerix.pi])
>>> print(L)
3.000000 --- ---
--- 10.000000 ---
--- --- 3.141593
>>> L.putDiagonal([10., 3.])
>>> print(L)
10.000000 --- ---
--- 3.000000 ---
--- --- 3.141593
"""
if isinstance(vector, (int, float)):
ids = numerix.arange(self._shape[0])
tmp = numerix.zeros((self._shape[0],), 'd')
tmp[:] = vector
self.put(tmp, ids, ids)
else:
ids = numerix.arange(len(vector))
self.put(vector, ids, ids)
def take(self, id1, id2):
vector = numerix.zeros(len(id1), 'd')
self.matrix.take(vector, id1, id2)
return vector
def takeDiagonal(self):
ids = numerix.arange(self._shape[0])
return self.take(ids, ids)
def addAt(self, vector, id1, id2):
"""Add elements of `vector` to the positions in the matrix corresponding to (`id1`,`id2`)
Parameters
----------
vector : array_like
The values to insert.
id1 : array_like
The row indices.
id2 : array_like
The column indices.
overlapping : bool
Whether to add ghosted values or not (Ignored. Default False).
Examples
--------
>>> L = _PysparseMatrixFromShape(rows=3, cols=3)
>>> L.put([3., 10., numerix.pi, 2.5], [0, 0, 1, 2], [2, 1, 1, 0])
>>> L.addAt([1.73, 2.2, 8.4, 3.9, 1.23], [1, 2, 0, 0, 1], [2, 2, 0, 0, 2])
>>> print(L)
12.300000 10.000000 3.000000
--- 3.141593 2.960000
2.500000 --- 2.200000
"""
self.matrix.update_add_at(vector,
numerix.asarray(id1, dtype='int32'),
numerix.asarray(id2, dtype='int32'))
def addAtDiagonal(self, vector):
if isinstance(vector, (int, float)):
ids = numerix.arange(self._shape[0])
tmp = numerix.zeros((self._shape[0],), 'd')
tmp[:] = vector
self.addAt(tmp, ids, ids)
else:
ids = numerix.arange(len(vector))
self.addAt(vector, ids, ids)
@property
def numpyArray(self):
shape = self._shape
indices = numerix.indices(shape)
numMatrix = self.take(indices[0].ravel(), indices[1].ravel())
return numerix.reshape(numMatrix, shape)
def matvec(self, x):
"""
This method is required for scipy solvers.
"""
return self * x
def exportMmf(self, filename):
"""
Exports the matrix to a Matrix Market file of the given `filename`.
"""
self.matrix.export_mtx(filename)
@property
def CSR(self):
"""The Compact Sparse Row description of the local matrix
Returns
-------
ptrs : array_like of int
Locations in `cols` and `data` vectors that start a row,
terminated with len(data) + 1
cols : array_like of int
Sequence of non-sparse column indices.
data : array_like of float
Sequence of non-sparse values.
Examples
--------
>>> L = _PysparseMatrixFromShape(rows=3, cols=3, bandwidth=3)
>>> L.put([3.,10.,numerix.pi,2.5], [0,0,1,2], [2,1,1,0])
>>> L.addAt([1.73,2.2,8.4,3.9,1.23], [1,2,0,0,1], [2,2,0,0,2])
>>> ptrs, cols, data = L.CSR
>>> print(numerix.asarray(ptrs))
[0 3 5 7]
>>> print(numerix.asarray(cols))
[0 1 2 1 2 0 2]
>>> print(numerix.asarray(data))
[ 12.3 10. 3. 3.14159265 2.96
2.5 2.2 ]
"""
rows, lildata = self.LIL
ptrs = [0] + [len(row) for row in rows if row]
ptrs = list(numerix.cumsum(ptrs))
cols = [col for row in rows for col in row]
data = [datum for row in lildata for datum in row]
return ptrs, cols, data
@property
def LIL(self):
"""The List of Lists description of the local matrix
Returns
-------
rows : list of sequence of int
List of non-sparse column indices on each row.
data : list of sequence of float
List of non-sparse values on each row.
Examples
--------
>>> L = _PysparseMatrixFromShape(rows=3, cols=3, bandwidth=3)
>>> L.put([3.,10.,numerix.pi,2.5], [0,0,1,2], [2,1,1,0])
>>> L.addAt([1.73,2.2,8.4,3.9,1.23], [1,2,0,0,1], [2,2,0,0,2])
>>> rows, data = L.LIL
>>> from scipy.stats.mstats import argstoarray # doctest: +SCIPY
>>> print(argstoarray(*rows)) # doctest: +SCIPY
[[0.0 1.0 2.0]
[1.0 2.0 --]
[0.0 2.0 --]]
>>> print(argstoarray(*data)) # doctest: +SCIPY
[[12.3 10.0 3.0]
[3.141592653589793 2.96 --]
[2.5 2.2 --]]
"""
nrows, _ = self.matrix.shape
rows = [[] for i in range(nrows)]
data = [[] for i in range(nrows)]
for (row, col), datum in self.matrix.items():
rows[row].append(col)
data[row].append(datum)
return rows, data
@property
def T(self):
"""Transpose matrix
Returns
-------
~fipy.matrices.pysparseMatrix._PysparseMatrix
Examples
--------
>>> import fipy as fp
>>> mesh = fp.Grid1D(nx=10)
>>> ids = fp.CellVariable(mesh=mesh, value=mesh._globalOverlappingCellIDs)
>>> mat = _PysparseColMeshMatrix(mesh=mesh, rows=1)
>>> mat.put(vector=ids.value,
... id1=[fp.parallelComm.procID] * mesh.numberOfCells,
... id2=mesh._localOverlappingCellIDs,
... overlapping=True) # doctest: +SERIAL
>>> print(mat.T.numpyArray) # doctest: +SERIAL
[[ 0.]
[ 1.]
[ 2.]
[ 3.]
[ 4.]
[ 5.]
[ 6.]
[ 7.]
[ 8.]
[ 9.]]
"""
val, irow, jcol = self.matrix.find()
rows, cols = self.matrix.shape
if hasattr(self.matrix, 'storeZeros'):
A_T = spmatrix.ll_mat(cols, rows, self.matrix.nnz, self.matrix.storeZeros)
else:
A_T = spmatrix.ll_mat(cols, rows, self.matrix.nnz)
A_T.put(val, jcol, irow)
return _PysparseMatrix(matrix=A_T)
class _PysparseMatrixFromShape(_PysparseMatrix):
def __init__(self, rows, cols, bandwidth=0, sizeHint=None, matrix=None, storeZeros=True):
"""Instantiates and wraps a Pysparse `ll_mat` matrix
Parameters
----------
rows : int
The number of matrix rows
cols : int
The number of matrix columns
bandwidth : int
The proposed band width of the matrix.
sizeHint : int
Estimate of the number of non-zeros
matrix : ~pysparse.spmatrix.ll_mat
Pre-assembled Pysparse matrix to use for storage
storeZeros : bool
Instructs pysparse to store zero values if possible.
"""
sizeHint = sizeHint or max(rows, cols) * bandwidth
if matrix is None:
tmpMatrix = spmatrix.ll_mat(1, 1, 1)
if hasattr(tmpMatrix, 'storeZeros'):
matrix = spmatrix.ll_mat(rows, cols, sizeHint, storeZeros)
else:
matrix = spmatrix.ll_mat(rows, cols, sizeHint)
super(_PysparseMatrixFromShape, self).__init__(matrix=matrix)
class _PysparseBaseMeshMatrix(_PysparseMatrixFromShape):
def __init__(self, mesh, rows, cols, bandwidth=0, sizeHint=None, matrix=None, storeZeros=True):
"""Creates a `_PysparseMatrixFromShape` associated with a `Mesh`.
Parameters
----------
mesh : ~fipy.meshes.mesh.Mesh
The `Mesh` to assemble the matrix for.
rows : int
The number of local matrix rows.
cols : int
The number of local matrix columns.
bandwidth : int
The proposed band width of the matrix.
sizeHint : int
Estimate of the number of non-zeros.
matrix : ~pysparse.spmatrix.ll_mat
Pre-assembled SciPy matrix to use for storage.
storeZeros : bool
Instructs scipy to store zero values if possible.
"""
self.mesh = mesh
super(_PysparseBaseMeshMatrix, self).__init__(rows=rows,
cols=cols,
bandwidth=bandwidth,
sizeHint=sizeHint,
matrix=matrix,
storeZeros=storeZeros)
def _getGhostedValues(self, var):
"""Obtain current ghost values from across processes
Nothing to do for serial matrix.
Returns
-------
ndarray
Ghosted values
"""
return var.value
def put(self, vector, id1, id2, overlapping=False):
"""Insert local overlapping values and coordinates into global
Parameters
----------
vector : array_like
The overlapping values to insert.
id1 : array_like
The local overlapping row indices.
id2 : array_like
The local overlapping column indices.
overlapping : bool
Whether to insert ghosted values or not (Ignored)
"""
super(_PysparseBaseMeshMatrix, self).put(vector=vector, id1=id1, id2=id2)
def addAt(self, vector, id1, id2, overlapping=False):
"""Accumulate local overlapping values and coordinates into global
Parameters
----------
vector : array_like
The overlapping values to insert.
id1 : array_like
The local overlapping row indices.
id2 : array_like
The local overlapping column indices.
overlapping : bool
Whether to add ghosted values or not (Ignored)
"""
super(_PysparseBaseMeshMatrix, self).addAt(vector=vector, id1=id1, id2=id2)
class _PysparseRowMeshMatrix(_PysparseBaseMeshMatrix):
def __init__(self, mesh, cols, numberOfEquations=1, bandwidth=0,
sizeHint=None, matrix=None, storeZeros=True):
"""Creates a `_PysparseBaseMeshMatrix` with rows associated with equations.
Parameters
----------
mesh : ~fipy.meshes.mesh.Mesh
The `Mesh` to assemble the matrix for.
cols : int
The number of matrix columns.
numberOfEquations : int
The rows of the matrix are determined by
`numberOfEquations * mesh.numberOfCells`.
bandwidth : int
The proposed band width of the matrix.
sizeHint : int
Estimate of the number of non-zeros
matrix : ~pysparse.spmatrix.ll_mat
Pre-assembled Pysparse matrix to use for storage.
storeZeros : bool
Instructs Pysparse to store zero values if possible.
"""
self.numberOfEquations = numberOfEquations
super(_PysparseRowMeshMatrix, self).__init__(mesh=mesh,
rows=numberOfEquations * mesh.numberOfCells,
cols=cols,
bandwidth=bandwidth,
sizeHint=sizeHint,
matrix=matrix,
storeZeros=storeZeros)
class _PysparseColMeshMatrix(_PysparseBaseMeshMatrix):
def __init__(self, mesh, rows, numberOfVariables=1, bandwidth=0,
sizeHint=None, matrix=None, storeZeros=True):
"""Creates a `_PysparseBaseMeshMatrix` with columns associated with solution variables.
Parameters
----------
mesh : ~fipy.meshes.mesh.Mesh
The `Mesh` to assemble the matrix for.
rows : int
The number of matrix rows.
numberOfVariables : int
The columns of the matrix are determined by
`numberOfVariables * mesh.globalNumberOfCells`.
bandwidth : int
The proposed band width of the matrix.
sizeHint : int
Estimate of the number of non-zeros
matrix : ~pysparse.spmatrix.ll_mat
Pre-assembled Pysparse matrix to use for storage.
storeZeros : bool
Instructs Pysparse to store zero values if possible.
"""
self.numberOfVariables = numberOfVariables
super(_PysparseColMeshMatrix, self).__init__(mesh=mesh,
rows=rows,
cols=numberOfVariables * mesh.numberOfCells,
bandwidth=bandwidth,
sizeHint=sizeHint,
matrix=matrix,
storeZeros=storeZeros)
class _PysparseMeshMatrix(_PysparseRowMeshMatrix):
def __init__(self, mesh, numberOfVariables=1, numberOfEquations=1,
bandwidth=0, sizeHint=None, matrix=None, storeZeros=True):
"""Creates a `_PysparseBaseMeshMatrix` with associated with equations and variables.
Parameters
----------
mesh : ~fipy.meshes.mesh.Mesh
The `Mesh` to assemble the matrix for.
numberOfVariables : int
The columns of the matrix are determined by
`numberOfVariables * mesh.numberOfCells`.
numberOfEquations : int
The rows of the matrix are determined by
`numberOfEquations * mesh.numberOfCells`.
bandwidth : int
The proposed band width of the matrix.
sizeHint : int
Estimate of the number of non-zeros
matrix : ~pysparse.spmatrix.ll_mat
Pre-assembled Pysparse matrix to use for storage
storeZeros : bool
Instructs Pysparse to store zero values if possible.
"""
self.numberOfVariables = numberOfVariables
super(_PysparseMeshMatrix, self).__init__(mesh=mesh,
cols=numberOfVariables * mesh.numberOfCells,
numberOfEquations=numberOfEquations,
bandwidth=bandwidth,
sizeHint=sizeHint,
matrix=matrix,
storeZeros=storeZeros)
def __mul__(self, other):
if isinstance(other, _PysparseMeshMatrix):
return _PysparseMeshMatrix(mesh=self.mesh,
matrix=spmatrix.matrixmultiply(self.matrix, other.matrix))
else:
return _PysparseMatrixFromShape.__mul__(self, other)
def asTrilinosMeshMatrix(self):
"""Transforms a Pysparse matrix into a Trilinos matrix
Maintains the Trilinos matrix as an attribute.
Returns
-------
~fipy.matrices.trilinosMatrix._TrilinosMatrix
"""
A = self.matrix.copy()
values, irow, jcol = A.find()
if not hasattr(self, 'trilinosMatrix'):
if A.shape[0] == 0:
bandwidth = 0
else:
bandwidth = int(numerix.ceil(float(len(values)) / float(A.shape[0])))
bandwidth = 1
from fipy.matrices.trilinosMatrix import _TrilinosMeshMatrixKeepStencil
tmp = _TrilinosMeshMatrixKeepStencil(mesh=self.mesh, bandwidth=bandwidth,
numberOfVariables=self.numberOfVariables,
numberOfEquations=self.numberOfEquations)
self.trilinosMatrix = tmp
self.trilinosMatrix.addAt(values, irow, jcol)
self.trilinosMatrix.finalize()
return self.trilinosMatrix
@property
def numpyArray(self):
from fipy.tools import parallelComm
if parallelComm.Nproc == 1:
return super(_PysparseMeshMatrix, self).numpyArray
else:
return self.asTrilinosMeshMatrix().numpyArray
def flush(self):
"""Deletes the pysparse matrix and calls `self.trilinosMatrix.flush()` if necessary.
"""
if hasattr(self, 'trilinosMatrix'):
if hasattr(self.matrix, 'storeZeros'):
self.trilinosMatrix.flush(cacheStencil=self.matrix.storeZeros)
else:
self.trilinosMatrix.flush(cacheStencil=False)
if (not hasattr(self, 'cache')) or (self.cache is False):
del self.matrix
def _test(self):
"""
Tests
>>> m = _PysparseMatrixFromShape(rows=3, cols=3, storeZeros=True)
>>> m.addAt((1., 0., 2.), (0, 2, 1), (1, 2, 0))
>>> print(not hasattr(m.matrix, 'storeZeros')
... or numerix.allequal(list(m.matrix.keys()),
... [(0, 1), (1, 0), (2, 2)]))
True
>>> print(not hasattr(m.matrix, 'storeZeros')
... or numerix.allequal(list(m.matrix.values()), [1., 2., 0.]))
True
>>> m = _PysparseMatrixFromShape(rows=3, cols=3, storeZeros=False)
>>> m.addAt((1., 0., 2.), (0, 2, 1), (1, 2, 0))
>>> print(numerix.allequal(list(m.matrix.keys()), [(0, 1), (1, 0)]))
True
>>> print(numerix.allequal(list(m.matrix.values()), numerix.array([1.0, 2.0])))
True
"""
pass
class _PysparseIdentityMatrix(_PysparseMatrixFromShape):
"""
Represents a sparse identity matrix for pysparse.
"""
def __init__(self, size):
"""Create a sparse matrix with `1` in the diagonal
>>> print(_PysparseIdentityMatrix(size=3))
1.000000 --- ---
--- 1.000000 ---
--- --- 1.000000
"""
_PysparseMatrixFromShape.__init__(self, rows=size, cols=size, bandwidth=1)
ids = numerix.arange(size)
self.put(numerix.ones(size, 'd'), ids, ids)
class _PysparseIdentityMeshMatrix(_PysparseIdentityMatrix):
def __init__(self, mesh):
"""Create a sparse matrix associated with a `Mesh` with `1` in the diagonal
>>> from fipy import Grid1D
>>> from fipy.tools import serialComm
>>> mesh = Grid1D(nx=3, communicator=serialComm)
>>> print(_PysparseIdentityMeshMatrix(mesh=mesh))
1.000000 --- ---
--- 1.000000 ---
--- --- 1.000000
"""
_PysparseIdentityMatrix.__init__(self, size=mesh.numberOfCells)
def _test():
import fipy.tests.doctestPlus
return fipy.tests.doctestPlus.testmod()
if __name__ == "__main__":
_test() | PypiClean |
/Fragmenstein-0.12.11.tar.gz/Fragmenstein-0.12.11/fragmenstein/victor/plip.py | import os
from pathlib import Path
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from rdkit.Chem import PandasTools
from functools import singledispatchmethod
from typing import Tuple, Dict, List, Union, Sequence, Optional, Any
from collections import Counter, defaultdict
from plip.structure.preparation import PDBComplex, PLInteraction
from openbabel.pybel import Atom, Residue
from openbabel.pybel import ob
from .minimalPDB import MinimalPDBParser
import warnings
class SerialPLIPper:
"""
Calling the instance will return a ``Dict[Tuple[str, str, int], int]``,
where the key is interaction type, residue 3-letter name, residue index
and the value is the count of interactions.
Basically, applying Plip to a pd.Series of Chem.Mol.
Unplacking it is kind of wierd, the best way I reckon is a brutal for-loop:
.. code-block:: python
import pandas as pd
import pandera.typing as pdt
intxndexes: pdt.Series[Dict[Tuple[str, str, int], int]] = hits.ROMol.apply(SerialPLIPper(pdb_filename))
# columns will still be a tuple...:
intxn_df = pd.DataFrame(intxndexes.to_list()).fillna(0).astype(int)
hits['N_interactions'] = intxn_df.sum(axis='columns')
for c in sorted(intxn_df.columns, key=lambda kv: kv[2]):
# columns will be a colon-separated string:
hits[':'.join(map(str, c))] = intxn_df[c]
"""
def __init__(self, pdb_block: str, resn='LIG', resi=1, chain='B'):
assert 'ATOM' in pdb_block, f'No ATOM entry in block provided: {pdb_block}'
self.pdb_block = pdb_block
self.resn = resn
self.resi = int(resi) # dont give me alt codes
self.chain = chain
@classmethod
def from_filename(cls, pdb_filename: str, *args, **kwargs):
"""
The main constructor is from PDB block, this is from PDB file
"""
with open(pdb_filename, 'r') as f:
pdb_block = f.read()
return cls(pdb_block, *args, **kwargs)
def __call__(self, mol) -> Dict[Tuple[str, str, int], int]:
if mol is None or not isinstance(mol, Chem.Mol) or mol.GetNumAtoms() == 0:
return {}
holo: str = self.plonk(mol)
interaction_set: PLInteraction = self.get_interaction_set(holo)
return self.get_interaction_counts(interaction_set)
def assign_pdb(self, mol: Chem.Mol):
"""
Fix the PDB info for the molecule, in place
"""
counts = defaultdict(int)
atom: Chem.Atom
for atom in mol.GetAtoms():
element: str = atom.GetSymbol()
counts[element] += 1
info = Chem.AtomPDBResidueInfo(atomName=f'{element: >2}{counts[element]: <2}',
residueName=self.resn,
residueNumber=self.resi, chainId=self.chain)
atom.SetPDBResidueInfo(info)
def plonk(self, mol):
"""
Temporarily here. Do not copy.
There likely is a way to do this in OBabel
This is using Fragmenstein ``MinimalPDBParser``.
:param mol:
:return:
"""
pdbdata = MinimalPDBParser(self.pdb_block, remove_other_hetatms=True, ligname=self.resn)
self.assign_pdb(mol)
moldata = MinimalPDBParser(Chem.MolToPDBBlock(mol))
pdbdata.append(moldata)
return str(pdbdata)
@singledispatchmethod
def get_interaction_set(self) -> PLInteraction:
"""
Overloaded method: block or mol return the iternaction set
:return:
"""
raise NotImplementedError
@get_interaction_set.register
def _(self, block: str) -> PLInteraction:
holo = PDBComplex()
holo.load_pdb(block, as_string=True)
holo.analyze()
return holo.interaction_sets[':'.join([self.resn, self.chain, str(self.resi)])]
@get_interaction_set.register
def _(self, mol: Chem.Mol) -> PLInteraction:
if mol.GetNumAtoms() == 0:
raise ValueError('Molecule has no atoms')
holo = PDBComplex()
holo.load_pdb(self.plonk(mol), as_string=True)
holo.analyze()
return holo.interaction_sets[':'.join([self.resn, self.chain, str(self.resi)])]
def get_atomname(self, atom: Union[Atom, ob.OBAtom], atomnames: Optional[Sequence[str]]=None) -> str:
"""
Given an atom, return its name.
"""
if atomnames is not None and isinstance(atom, Atom):
return atomnames[atom.idx - 1] # Fortran indexing
elif isinstance(atom, Atom):
res: ob.OBResidue = atom.residue.OBResidue
obatom = atom.OBAtom
elif isinstance(atom, ob.OBAtom):
obatom: ob.OBAtom = atom
res: ob.OBResidue = obatom.GetResidue()
else:
raise TypeError
return res.GetAtomID(obatom) # this is likely to be ' C ' as Babel has stripped this info
def get_atom_by_atomname(self,
residue: Union[ob.OBResidue, Residue],
atomname: str,
atomnames: Optional[Sequence[str]]=None) -> ob.OBAtom:
"""
Get an atom by its name in a residue.
Note that the ligand will have its original atom names stripped by Babel.
"""
if isinstance(residue, Residue):
residue = residue.OBResidue
obatom: ob.OBAtom
for obatom in ob.OBResidueAtomIter(residue):
if self.get_atomname(obatom, atomnames).strip() == atomname.strip():
return obatom
else:
raise ValueError(f'No atom with name {atomname} in residue {residue.GetName()}')
def get_interaction_counts(self, interaction_set: PLInteraction) -> Dict[Tuple[str, str, int], int]:
"""
Count the number of interactions of each type for each residue
"""
intxns: List = interaction_set.all_itypes
intxn_dex = defaultdict(int)
for intxn in intxns:
key = (intxn.__class__.__name__, intxn.restype, intxn.resnr)
intxn_dex[key] += 1 # noqa default dict works with tuples
return dict(sorted(intxn_dex.items(), key=lambda kv: kv[0][2]))
def summarize_interactions(self, atom_names: Sequence[str]) -> List[Dict[str, Any]]:
interaction_set = self.get_interaction_set(self.pdb_block)
details: List[Dict[str, Any]] = []
for intxn in interaction_set.all_itypes:
details.append(self.summarize_interaction(intxn))
return details
def summarize_interaction(self, intxn, atom_names: Sequence[str]) -> Dict[str, Any]:
# https://github.com/openbabel/openbabel/blob/master/data/atomtyp.txt
# https://blog.matteoferla.com/2023/07/a-note-no-plip-interactions.html
relevant_atom_names = []
type_name = intxn.__class__.__name__
details = {'type': type_name, 'protein_resn': intxn.restype, 'protein_resi': intxn.resnr,
'protein_chain': intxn.reschain}
if type_name == 'hbond':
if intxn.protisdon:
# assert hbond.a.residue.name != self.resn
details['atom_names'] = [atom_names[intxn.a.idx - 1]]
details['type'] = 'hbond_acceptor'
details['babel_atom_types'] = [intxn.atype]
else:
details['atom_names'] = [atom_names[intxn.d.idx - 1]]
details['type'] = 'hbond_donor'
details['babel_atom_types'] = [intxn.dtype]
details['distance'] = intxn.distance_ad # intxn.distance_ad is to donor, _ah to hydrogen
elif type_name == 'hydroph_interaction':
details['atom_names'] = [atom_names[intxn.ligatom.idx - 1]]
details['babel_atom_types'] = [intxn.ligatom.type]
details['distance'] = intxn.distance
elif type_name == 'pistack':
details['atom_names'] = [atom_names[a.idx - 1] for a in intxn.ligandring.atoms]
details['babel_atom_types'] = [a.type for a in intxn.ligandring.atoms]
details['distance'] = intxn.distance
elif type_name == 'waterbridge':
if intxn.protisdon:
# assert hbond.a.residue.name != self.resn
details['atom_names'] = [atom_names[intxn.a.idx - 1]]
details['type'] = 'water_acceptor'
details['babel_atom_types'] = [intxn.atype]
details['distance'] = intxn.distance_aw
else:
details['atom_names'] = [atom_names[intxn.d.idx - 1]]
details['type'] = 'water_donor'
details['babel_atom_types'] = [intxn.dtype]
details['distance'] = intxn.distance_dw
elif type_name == 'saltbridge':
if intxn.protispos:
details['atom_names'] = [atom_names[a.idx - 1] for a in intxn.negative.atoms]
details['type'] = 'saltbridge_negative'
details['babel_atom_types'] = [a.type for a in intxn.negative.atoms]
details['distance'] = intxn.distance
else:
details['atom_names'] = [atom_names[a.idx - 1] for a in intxn.positive.atoms]
details['type'] = 'saltbridge_positive'
details['babel_atom_types'] = [a.type for a in intxn.positive.atoms]
details['distance'] = intxn.distance
elif type_name == 'pication':
if intxn.protcharged:
details['atom_names'] = [atom_names[a.idx - 1] for a in intxn.ring.atoms]
details['type'] = 'pication_ring'
details['babel_atom_types'] = [a.type for a in intxn.ring.atoms]
details['distance'] = intxn.distance
else:
details['atom_names'] = [atom_names[a.idx - 1] for a in intxn.charge.atoms]
details['type'] = 'pication_charge'
details['babel_atom_types'] = [a.type for a in intxn.charge.atoms]
details['distance'] = intxn.distance
elif type_name == 'halogenbond':
details['atom_names'] = [atom_names[intxn.don.idx - 1]]
details['babel_atom_types'] = [intxn.don.type]
details['distance'] = intxn.distance
else: # metal_complex basically.
raise TypeError(type_name)
return details | PypiClean |
/Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/validation.py |
from .exceptions import EnrichmentFailure, ValidationFailure
GLOBAL_NAMESPACE = "GLOBAL"
class Priority(object):
"""Priority levels that indicate how severe a validation failure is.
Validations have a priority that publishers use to determine whether
or not to publish in the case of failure.
"""
# If a LOW priority Validation fails, the failure be recorded for posterity
# but not reported to a human (e.g. logged in continuous integration server
# job history but not reported to IRC/HipChat)
LOW = 1
# If a NORMAL priority Validation fails, the failure should be reported to
# a human but it should not wake up a human (e.g. send a notification to
# IRC/HipChat, Graphite). Additionally the failure should be logged for
# posterity as in the case of LOW priority failures.
NORMAL = 2
# If a CRITICAL priority Validation fails, a human should be woken up
# (e.g. create an incident in PagerDuty). Additionally the failure should
# be more politely reported to humans (e.g. via IRC/HipChat, Graphite,
# etc.) and recorded for posterity as in the case of NORMAL and LOW
# priority failures.
CRITICAL = 3
@staticmethod
def string(priority):
"""Return the name of the priority (e.g. normal, low, critical)"""
if priority == Priority.NORMAL:
return "normal"
elif priority == Priority.LOW:
return "low"
elif priority == Priority.CRITICAL:
return "critical"
else:
return "unknown priority: {0}".format(priority)
class Validation(object):
"""The base class for validations.
The base class for all classes that represent some form of validation
(e.g. some expected system property that can be checked and categorized as
either passing or failing). Examples of Validations include: an HTTP
service returning an expected result in a specified amount of time, an
Upstart process on a Linux server is in the running state, a Message
Queue's queue length is lower than a maximum value.
:param name: The name of this validation.
:param priority: The :py:class:`.Priority` level of this validation.
:param timeout: How long this validation can take before being considered
a failure. If None, then the validation will never be considered a
failure due to timing out.
:param group: The group this validation belongs to.
"""
def __init__(self, name, priority=Priority.NORMAL,
timeout=None, group=None):
"""Creates a Validation object with the supplied name and priority.
Arguments:
name -- The name of this Validation
Keyword Arguments
priority -- The priority of this Validation.
timeout -- If this validation takes longer than this many seconds,
it will be considered a failure.
group -- The group this validation belongs to.
"""
self.name = name
self.priority = priority
self.timeout = timeout
self.group = group
#this should never be directly manipulated without very good reason
#it is used to store extra data for publishers, and the primary
#method of interaction should be the enric and get_enriched
#functions in publisher.py
self._enriched_data = {GLOBAL_NAMESPACE: {}}
#determines the partial ordering of the validations
#Alarmageddon guarantees that all Validations with lower order than
#this Validation's order will run before this Validation runs.
#most validations have no reason to change this
self.order = 0
def perform(self, group_failures):
"""Perform the validation.
If the validation fails, call self.fail passing it the reason for
the failure.
:param kwargs: A dictionary containing information from the whole
Alarmageddon run.
"""
pass
def fail(self, reason):
"""Log the validation as a failure.
:param reason: The cause of the failure.
:param stack_track: Whether or not to include a stack trace in the
result.
"""
raise ValidationFailure(reason)
def get_elapsed_time(self):
"""Return the amount of time this validation took.
The :py:class:`.reporter.Reporter` will check here before using
the call time.
Overwrite this if you need more precise timing -
eg, if you want to know how long an http request took, as opposed
to how long that whole test took to execute.
This function should return a number, not a timedelta.
"""
raise NotImplementedError
def __str__(self):
return "Validation {{ name: '{0}' priority: '{1}' timeout: {2}}}"\
.format(self.name,
Priority.string(self.priority),
self.timeout)
def timer_name(self):
"""Return the name of the timer that corresponds to this validation.
Used to indicate where a publisher should log the time taken.
"""
return None
def enrich(self, publisher, values, force_namespace=False):
"""Adds publisher-specific information to the validation.
Override at your own peril! Publishers are expected to assume the
standard behavior from this function.
:param publisher: The publisher to add enriched data for.
:param values: The enriched data to add to this validation.
:param force_namespace: If True, will never add the data to the global
namespace.
"""
namespace = str(type(publisher))
enriched = self._enriched_data
if namespace in enriched:
raise EnrichmentFailure(publisher, self, values)
enriched[namespace] = {}
for key, value in list(values.items()):
if force_namespace:
enriched[namespace][key] = value
else:
if key not in enriched[GLOBAL_NAMESPACE]:
enriched[GLOBAL_NAMESPACE][key] = value
else:
enriched[namespace][key] = value
return self
def get_enriched(self, publisher, force_namespace=False):
"""Retrieve the appropriate publisher-specific data.
Will retrieve all global enriched data along with any extra
publisher specific data. This means that if you enrich a
validation for more than one publisher, this function may
return a superset of the enriched data for a given publisher.
Override at your own peril! Publishers are expected to assume the
standard behavior from this function.
:param publisher: The publisher to retrieve enriched data for.
:param force_namespace: If True, will not retrieve global enrichments.
"""
namespace = str(type(publisher))
enriched = self._enriched_data
#copy global
data = {}
if not force_namespace:
data.update(enriched[GLOBAL_NAMESPACE])
try:
data.update(enriched[namespace])
except KeyError:
pass
return data
class GroupValidation(Validation):
"""A validation that checks the number of failures in a test group.
The priority level will be set dynamically based on the number of
failures and the supplied thresholds.
:param name: The name of this validation.
:param checked_group: The name of the group this validation will check.
:param low_threshold: The number of failures at which this validation
will itself fail.
:param normal_threshold: The number of failures at which this validation
will become NORMAL priority.
:param critical_threshold: The number of failures at which this validation
will become CRITICAL priority.
:param order: This validation will run after all validations of lower
order have run. Used when order matters - eg, creating a GroupValidation
for a group of GroupValidations.
:param group: The group this validation belongs to.
"""
def __init__(self, name, checked_group, low_threshold=float("inf"),
normal_threshold=float("inf"),
critical_threshold=float("inf"),
order=1, group=None):
Validation.__init__(self,
name, priority=Priority.LOW, timeout=None, group=group)
self.low_threshold = low_threshold
self.normal_threshold = normal_threshold
self.critical_threshold = critical_threshold
self._clean_thresholds()
self.order = order
self.checked_group = checked_group
def _clean_thresholds(self):
"""Ensure that the thresholds are consistent.
`low_threshold` must be less than `normal_threshold` which must be
less than `critical_threhsold`. If necessary, this function will alter
the thresholds to ensure this condition is met.
"""
if self.normal_threshold > self.critical_threshold:
self.normal_threshold = self.critical_threshold
if self.low_threshold > self.normal_threshold:
self.low_threshold = self.normal_threshold
def perform(self, group_failures):
"""Perform the validation."""
failures = len(group_failures[self.checked_group])
messages = group_failures[self.checked_group]
if failures >= self.low_threshold:
self._set_priority(failures)
self.fail("Group {0} had {1} failures! \n{2}".format(
self.checked_group, failures, messages))
def _set_priority(self, failures):
"""Set priority of this validation based on the number of failures.
:param failures: The number of failures in this validation's checked
group.
"""
if failures >= self.critical_threshold:
self.priority = Priority.CRITICAL
elif failures >= self.normal_threshold:
self.priority = Priority.NORMAL
else:
self.priority = Priority.LOW | PypiClean |
/K_AIKO-0.5.2-py3-none-any.whl/kaiko/devices/engines.py | import time
import bisect
import functools
import contextlib
import dataclasses
import queue
import threading
import numpy
from ..utils import config as cfg
from ..utils import datanodes as dn
from ..utils import markups as mu
from . import terminals as term
from . import audios as aud
from . import clocks
class Monitor:
def __init__(self, path, N=10):
self.path = path
self.N = N
# state
self.count = None
self.eff = None
self.avg = None
self.best = None
self.worst = None
# statistics
self.total_avg = None
self.total_dev = None
self.total_eff = None
@dn.datanode
def monitoring(self, node):
if hasattr(time, "thread_time"):
get_time = time.thread_time
elif hasattr(time, "clock_gettime"):
get_time = lambda: time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)
else:
get_time = time.perf_counter
N = self.N
start = prev = 0.0
stop = numpy.inf
self.count = 0
total = 0.0
total2 = 0.0
spend_N = [0.0] * N
recent_N = [0.0] * N
best_N = [numpy.inf] * N
worst_N = [-numpy.inf] * N
self.path.parent.mkdir(parents=True, exist_ok=True)
with open(self.path, "w") as file:
with node:
try:
data = yield
start = stop = prev = time.perf_counter()
while True:
t0 = get_time()
data = node.send(data)
t = get_time() - t0
stop = time.perf_counter()
spend = stop - prev
prev = stop
print(f"{spend}\t{t}", file=file)
self.count += 1
total += t
total2 += t ** 2
spend_N.insert(0, spend)
spend_N.pop()
recent_N.insert(0, t)
recent_N.pop()
bisect.insort_left(best_N, t)
best_N.pop()
bisect.insort(worst_N, t)
worst_N.pop(0)
self.avg = sum(recent_N) / N
self.eff = sum(recent_N) / sum(spend_N)
self.best = sum(best_N) / N
self.worst = sum(worst_N) / N
data = yield data
except StopIteration:
return
finally:
stop = time.perf_counter()
if self.count > 0:
self.total_avg = total / self.count
self.total_dev = (
total2 / self.count - self.total_avg ** 2
) ** 0.5
self.total_eff = total / (stop - start)
def __str__(self):
if self.count is None:
return f"UNINITIALIZED"
if self.total_avg is None:
return f"count={self.count}"
assert self.total_dev is not None
assert self.total_eff is not None
if self.best is None or self.best == float("inf"):
return (
f"count={self.count}, "
f"avg={self.total_avg*1000:5.3f}±{self.total_dev*1000:5.3f}ms "
f"({self.total_eff: >6.1%})"
)
assert self.worst is not None
return (
f"count={self.count}, "
f"avg={self.total_avg*1000:5.3f}±{self.total_dev*1000:5.3f}ms"
f" ({self.best*1000:5.3f}ms ~ {self.worst*1000:5.3f}ms) "
f"({self.total_eff: >6.1%})"
)
class MixerSettings(cfg.Configurable):
r"""
Fields
------
output_device : int
The index of output device, or -1 for default device.
output_samplerate : int
The samplerate of output device.
output_buffer_length : int
The buffer length of output device. Note that too large will affect the
reaction time, but too small will affect the efficiency.
output_channels : int
The number of channels of output device.
output_format : str
The data format of output device. The valid formats are 'f4', 'i4',
'i2', 'i1', 'u1'.
sound_delay : float
The delay of clock of the mixer.
"""
output_device: int = -1
output_samplerate: int = 44100
output_buffer_length: int = 512 * 4
output_channels: int = 1
output_format: str = "f4"
sound_delay: float = 0.0
class Mixer:
def __init__(self, effects_pipeline, clock, settings, monitor):
self.effects_pipeline = effects_pipeline
self.clock = clock
self.settings = settings
self.monitor = monitor
@staticmethod
def get_task(pipeline, clock, settings, manager, init_time, monitor):
samplerate = settings.output_samplerate
buffer_length = settings.output_buffer_length
nchannels = settings.output_channels
format = settings.output_format
device = settings.output_device
output_node = Mixer._mix_node(pipeline, clock, settings, init_time)
if monitor:
output_node = monitor.monitoring(output_node)
return aud.play(
manager,
output_node,
samplerate=samplerate,
buffer_shape=(buffer_length, nchannels),
format=format,
device=device,
)
@staticmethod
@dn.datanode
def _mix_node(pipeline, clock, settings, init_time):
samplerate = settings.output_samplerate
buffer_length = settings.output_buffer_length
nchannels = settings.output_channels
timer = Mixer._timer_node(clock, init_time, buffer_length, samplerate, settings)
with timer, pipeline:
yield
while True:
data = numpy.zeros((buffer_length, nchannels), dtype=numpy.float32)
try:
slices_map = timer.send(None)
data = pipeline.send((data, slices_map))
except StopIteration:
return
yield data
@staticmethod
def _timer_node(clock, init_time, buffer_length, samplerate, settings):
return dn.pipe(
dn.count(0, 1),
lambda index: slice(
index * buffer_length / samplerate,
(index + 1) * buffer_length / samplerate,
),
clock.clock_slice(settings.sound_delay + init_time, 1),
)
@classmethod
def create(cls, settings, manager, init_time=0.0, monitor=None):
samplerate = settings.output_samplerate
buffer_length = settings.output_buffer_length
nchannels = settings.output_channels
pipeline = dn.DynamicPipeline()
clock = clocks.Clock()
task = cls.get_task(pipeline, clock, settings, manager, init_time, monitor)
return task, cls(pipeline, clock, settings, monitor)
def add_effect(self, node, zindex=(0,)):
return self.effects_pipeline.add_node(node, zindex=zindex)
def remove_effect(self, key):
return self.effects_pipeline.remove_node(key)
@dn.datanode
def tmask(self, node, time):
node = dn.DataNode.wrap(node)
samplerate = self.settings.output_samplerate
buffer_length = self.settings.output_buffer_length
nchannels = self.settings.output_channels
with node:
data, slices_map = yield
if time is None:
time = slices_map[0][1].start
while True:
data_offset = round(slices_map[0][0].start * samplerate)
for data_slice, time_slice, ratio in slices_map:
# pause
if time_slice.start == time_slice.stop:
continue
# skip
if data_slice.start == data_slice.stop:
time_slice = slice(time_slice.stop, time_slice.stop)
offset = round((time - time_slice.start) * samplerate)
# underrun
while offset < 0:
length = min(-offset, buffer_length)
dummy = numpy.zeros((length, nchannels), dtype=numpy.float32)
try:
node.send(dummy)
except StopIteration:
return
offset += length
time += length / samplerate
# overrun
data_start = round(data_slice.start * samplerate) - data_offset
data_stop = round(data_slice.stop * samplerate) - data_offset
if data_stop - data_start <= offset:
continue
try:
data[data_start + offset : data_stop] = node.send(
data[data_start + offset : data_stop]
)
except StopIteration:
return
time = time_slice.stop
data, slices_map = yield data.copy()
def resample(
self, samplerate=None, channels=None, volume=0.0, start=None, end=None
):
pipeline = []
if start is not None or end is not None:
pipeline.append(
dn.tspan(samplerate or self.settings.output_samplerate, start, end)
)
if channels is not None and channels != self.settings.output_channels:
pipeline.append(dn.rechannel(self.settings.output_channels, channels))
if samplerate is not None and samplerate != self.settings.output_samplerate:
pipeline.append(
dn.resample(ratio=(self.settings.output_samplerate, samplerate))
)
if volume != 0:
pipeline.append(lambda s: s * 10 ** (volume / 20))
return dn.pipe(*pipeline)
def play(
self,
node,
samplerate=None,
channels=None,
volume=0.0,
start=None,
end=None,
time=None,
zindex=(0,),
):
node = dn.pipe(node, self.resample(samplerate, channels, volume, start, end))
node = self.tmask(dn.attach(node), time)
return self.add_effect(node, zindex=zindex)
def play_file(self, path, volume=0.0, start=None, end=None, time=None, zindex=(0,)):
meta = aud.AudioMetadata.read(path)
node = aud.load(path)
node = dn.tslice(node, meta.samplerate, start, end)
# initialize before attach; it will seek to the starting frame
node.__enter__()
return self.play(
node,
samplerate=meta.samplerate,
channels=meta.channels,
volume=volume,
time=time,
zindex=zindex,
)
class AsyncAdditiveValue:
def __init__(self, value):
self.value = value
self._queue = queue.Queue()
def add(self, step):
self._queue.put(step)
def get(self):
while not self._queue.empty():
self.value += self._queue.get()
return self.value
class DetectorSettings(cfg.Configurable):
r"""
Fields
------
input_device : int
The index of input device, or -1 for default device.
input_samplerate : int
The samplerate of input device.
input_buffer_length : int
The buffer length of input device. Note that too large will affect the
reaction time, but too small will affect the efficiency.
input_channels : int
The number of channels of input device.
input_format : str
The data format of input device. The valid formats are 'f4', 'i4', 'i2',
'i1', 'u1'.
knock_delay : float
The delay of clock of the detector.
knock_energy : float
The reference volume of the detector.
"""
input_device: int = -1
input_samplerate: int = 44100
input_buffer_length: int = 512
input_channels: int = 1
input_format: str = "f4"
@cfg.subconfig
class detect(cfg.Configurable):
time_res: float = 0.0116099773 # hop_length = 512 if samplerate == 44100
freq_res: float = 21.5332031 # win_length = 512*4 if samplerate == 44100
pre_max: float = 0.03
post_max: float = 0.03
pre_avg: float = 0.03
post_avg: float = 0.03
wait: float = 0.03
delta: float = 5.48e-6 # ~ noise_power * 20
knock_delay: float = 0.0
knock_energy: float = 1.0e-3 # ~ Dt / knock_max_energy
class Detector:
def __init__(self, listeners_pipeline, clock, knock_energy, settings, monitor):
self.listeners_pipeline = listeners_pipeline
self.clock = clock
self.knock_energy = knock_energy
self.settings = settings
self.monitor = monitor
@staticmethod
def get_task(pipeline, clock, knock_energy, settings, manager, init_time, monitor):
samplerate = settings.input_samplerate
buffer_length = settings.input_buffer_length
nchannels = settings.input_channels
format = settings.input_format
device = settings.input_device
time_res = settings.detect.time_res
hop_length = round(samplerate * time_res)
input_node = Detector._detect_node(
pipeline, clock, knock_energy, init_time, settings
)
if buffer_length != hop_length:
input_node = dn.unchunk(input_node, chunk_shape=(hop_length, nchannels))
if monitor:
input_node = monitor.monitoring(input_node)
return aud.record(
manager,
input_node,
samplerate=samplerate,
buffer_shape=(buffer_length, nchannels),
format=format,
device=device,
)
@staticmethod
@dn.datanode
def _detect_node(pipeline, clock, knock_energy, init_time, settings):
samplerate = settings.input_samplerate
time_res = settings.detect.time_res
freq_res = settings.detect.freq_res
hop_length = round(samplerate * time_res)
win_length = round(samplerate / freq_res)
pre_max = round(settings.detect.pre_max / time_res)
post_max = round(settings.detect.post_max / time_res)
pre_avg = round(settings.detect.pre_avg / time_res)
post_avg = round(settings.detect.post_avg / time_res)
wait = round(settings.detect.wait / time_res)
delta = settings.detect.delta
prepare = max(post_max, post_avg)
timer = Detector._timer_node(
clock, init_time, hop_length, samplerate, prepare, settings
)
window = dn.get_half_Hann_window(win_length)
onset = dn.pipe(
dn.frame(win_length=win_length, hop_length=hop_length),
dn.power_spectrum(
win_length=win_length,
samplerate=samplerate,
windowing=window,
weighting=True,
),
dn.onset_strength(1),
)
picker = dn.pipe(
lambda a: (a, a),
dn.pair(
dn.pick_peak(pre_max, post_max, pre_avg, post_avg, wait, delta),
dn.delay(0.0 for _ in range(-prepare, 0)),
),
)
with pipeline, timer, onset, picker:
data = yield
while True:
try:
strength = onset.send(data)
detected, strength = picker.send(strength)
time, ratio = timer.send(None)
normalized_strength = strength / knock_energy.get()
pipeline.send((None, time, ratio, normalized_strength, detected))
except StopIteration:
return
data = yield
@staticmethod
def _timer_node(clock, init_time, hop_length, samplerate, prepare, settings):
return dn.pipe(
dn.count(-prepare * hop_length / samplerate, hop_length / samplerate),
clock.clock(settings.knock_delay + init_time, 1),
)
@classmethod
def create(cls, settings, manager, init_time=0.0, monitor=None):
pipeline = dn.DynamicPipeline()
knock_energy = AsyncAdditiveValue(settings.knock_energy)
clock = clocks.Clock()
task = cls.get_task(
pipeline, clock, knock_energy, settings, manager, init_time, monitor
)
return task, cls(pipeline, clock, knock_energy, settings, monitor)
def add_listener(self, node):
return self.listeners_pipeline.add_node(node, (0,))
def remove_listener(self, key):
self.listeners_pipeline.remove_node(key)
def on_hit(self, func, time=None, duration=None):
return self.add_listener(self._hit_listener(func, time, duration))
@dn.datanode
@staticmethod
def _hit_listener(func, start_time, duration):
_, time, ratio, strength, detected = yield
if start_time is None:
start_time = time
while time < start_time:
_, time, ratio, strength, detected = yield
while duration is None or time < start_time + duration:
if detected and ratio != 0:
finished = func(strength)
if finished:
return
_, time, ratio, strength, detected = yield
@functools.lru_cache(maxsize=32)
def to_range(start, stop, width):
# range of slice without clamp
if start is None:
start = 0
elif start < 0:
start = width + start
else:
start = start
if stop is None:
stop = width
elif stop < 0:
stop = width + stop
else:
stop = stop
return range(start, stop)
class RichBar:
def __init__(self, terminal_settings):
self.markups = []
def add_markup(self, markup, mask=slice(None, None), shift=0):
self.markups.append((markup, mask, shift))
def draw(self, width, renderer):
buffer = [" "] * width
xran = range(width)
for markup, mask, shift in self.markups:
if mask.start is None:
x = shift
elif mask.start >= 0:
x = shift + mask.start
else:
x = shift + mask.start + width
renderer._render_bar(
buffer, markup, x=x, width=width, xmask=xran[mask], attrs=()
)
return "".join(buffer).rstrip()
class RendererSettings(cfg.Configurable):
r"""
Fields
------
display_framerate : float
The framerate of the renderer.
display_delay : float
The delay of clock of the renderer.
resize_delay : float
The delay time to redraw display after resizing.
"""
display_framerate: float = 160.0 # ~ 2 / detect.time_res
display_delay: float = 0.0
resize_delay: float = 0.5
class Renderer:
def __init__(self, drawers_pipeline, clock, settings, monitor):
self.drawers_pipeline = drawers_pipeline
self.clock = clock
self.settings = settings
self.monitor = monitor
@staticmethod
def get_task(pipeline, clock, settings, term_settings, init_time, monitor):
framerate = settings.display_framerate
display_node = Renderer._resize_node(
Renderer._render_node(pipeline, clock, settings, term_settings, init_time),
settings,
term_settings,
)
if monitor:
display_node = monitor.monitoring(display_node)
return term.show(display_node, 1 / framerate, hide_cursor=True)
@staticmethod
@dn.datanode
def _render_node(pipeline, clock, settings, term_settings, init_time):
framerate = settings.display_framerate
rich_renderer = mu.RichRenderer(term_settings.unicode_version)
clear_line = rich_renderer.render(rich_renderer.clear_line().expand())
clear_below = rich_renderer.render(rich_renderer.clear_below().expand())
width = 0
logs = []
msgs = []
curr_msgs = list(msgs)
timer = Renderer._timer_node(clock, init_time, framerate, settings)
with timer, pipeline:
shown, resized, size = yield
while True:
width = size.columns
view = RichBar(term_settings)
try:
time, ratio = timer.send(None)
view, msgs, logs = pipeline.send(((view, msgs, logs), time, width))
except StopIteration:
return
view_str = view.draw(width, rich_renderer)
logs_str = (
rich_renderer.render(mu.Group(tuple(logs))) + "\r" if logs else ""
)
# track changes of the message
if not resized and not logs and curr_msgs == msgs:
res_text = f"{clear_line}{view_str}\r"
else:
msg_text = (
rich_renderer.render_less(
mu.Group((mu.Text("\n"), *msgs)), size
)
if msgs
else ""
)
res_text = f"{clear_below}{logs_str}{view_str}\r{msg_text}"
shown, resized, size = yield res_text
if shown:
logs.clear()
curr_msgs = list(msgs)
@staticmethod
@dn.datanode
def _resize_node(render_node, settings, term_settings):
resize_delay = settings.resize_delay
rich_renderer = mu.RichRenderer(term_settings.unicode_version)
clear_line = rich_renderer.render(rich_renderer.clear_line().expand())
clear_screen = rich_renderer.render(rich_renderer.clear_screen().expand())
size_node = term.terminal_size()
width = 0
resizing_since = time.perf_counter()
resized = False
with render_node, size_node:
shown = yield
while True:
try:
size = size_node.send(None)
except StopIteration:
return
if size.columns < width:
resizing_since = time.perf_counter()
resized = True
if resized and time.perf_counter() < resizing_since + resize_delay:
yield f"{clear_line}resizing...\r"
width = size.columns
continue
width = size.columns
try:
res_text = render_node.send((shown, resized, size))
except StopIteration:
return
if resized:
res_text = f"{clear_screen}{res_text}"
shown = yield res_text
if shown:
resized = False
@staticmethod
def _timer_node(clock, init_time, framerate, settings):
return dn.pipe(
dn.count(1 / framerate, 1 / framerate),
clock.clock(settings.display_delay + init_time, 1),
)
@classmethod
def create(cls, settings, term_settings, init_time=0.0, monitor=None):
pipeline = dn.DynamicPipeline()
clock = clocks.Clock()
task = cls.get_task(
pipeline, clock, settings, term_settings, init_time, monitor
)
return task, cls(pipeline, clock, settings, monitor)
def add_drawer(self, node, zindex=(0,)):
return self.drawers_pipeline.add_node(node, zindex=zindex)
def remove_drawer(self, key):
self.drawers_pipeline.remove_node(key)
def add_log(self, msg, zindex=(0,)):
return self.add_drawer(self._log_drawer(msg), zindex)
def add_message(self, msg, zindex=(0,)):
return self.add_drawer(self._msg_drawer(msg), zindex)
def add_texts(self, texts_node, xmask=slice(None, None), zindex=(0,)):
return self.add_drawer(self._texts_drawer(texts_node, xmask), zindex)
@staticmethod
@dn.datanode
def _log_drawer(msg):
(view, msgs, logs), _, _ = yield
logs.append(msg)
yield (view, msgs, logs)
@staticmethod
@dn.datanode
def _msg_drawer(msg):
(view, msgs, logs), _, _ = yield
msgs.append(msg)
yield (view, msgs, logs)
@staticmethod
@dn.datanode
def _texts_drawer(text_node, xmask=slice(None, None)):
text_node = dn.DataNode.wrap(text_node)
with text_node:
(view, msg, logs), time, width = yield
while True:
xran = to_range(xmask.start, xmask.stop, width)
try:
texts = text_node.send((time, xran))
except StopIteration:
return
for shift, text in texts:
view.add_markup(text, xmask, shift=shift)
(view, msg, logs), time, width = yield (view, msg, logs)
class ControllerSettings(cfg.Configurable):
r"""
Fields
------
update_interval : float
The update interval of controllers. This is not related to precision of
time, it is timeout of stdin select. The user will only notice the
difference in latency when closing the controller.
"""
update_interval: float = 0.1
class Controller:
def __init__(self, handlers_pipeline, clock, settings):
self.handlers_pipeline = handlers_pipeline
self.clock = clock
self.settings = settings
@staticmethod
def get_task(pipeline, clock, settings, term_settings, init_time):
return term.inkey(
Controller._control_node(
pipeline, clock, settings, term_settings, init_time
),
dt=settings.update_interval,
)
@staticmethod
@dn.datanode
def _control_node(pipeline, clock, settings, term_settings, init_time):
keycodes = term_settings.keycodes
timer = Controller._timer_node(clock, init_time, settings.update_interval)
with timer, pipeline:
while True:
_, keycode = yield
if keycode is None:
keyname = None
elif keycode in keycodes:
keyname = keycodes[keycode]
elif keycode in term.printable_ascii_names:
keyname = term.printable_ascii_names[keycode]
elif keycode[0] == "\x1b" and keycode[1:] in term.printable_ascii_names:
keyname = "Alt_" + term.printable_ascii_names[keycode[1:]]
else:
keyname = repr(keycode)
try:
time, ratio = timer.send(None)
pipeline.send((None, time, keyname, keycode))
except StopIteration:
return
@staticmethod
def _timer_node(clock, init_time, update_interval):
return dn.pipe(dn.time(0.0), clock.clock(init_time, 1))
@classmethod
def create(cls, settings, term_settings, init_time=0.0):
pipeline = dn.DynamicPipeline()
clock = clocks.Clock()
task = cls.get_task(pipeline, clock, settings, term_settings, init_time)
return task, cls(pipeline, clock, settings)
def add_handler(self, node, keyname=None):
return self.handlers_pipeline.add_node(self._filter_node(node, keyname), (0,))
def remove_handler(self, key):
self.handlers_pipeline.remove_node(key)
@dn.datanode
def _filter_node(self, node, name):
node = dn.DataNode.wrap(node)
with node:
while True:
_, time, keyname, keycode = yield
if keycode is None:
continue
if name is None or name == keyname:
try:
node.send((None, time, keyname, keycode))
except StopIteration:
return
class EngineLoader:
def __init__(self, engine_factory, delay=0.0):
self.engine_factory = engine_factory
self.delay = delay
self.required = set()
self.require_lock = threading.Lock()
self.engine_task = None
self.engine = None
@contextlib.contextmanager
def require(self):
key = object()
with self.require_lock:
self.required.add(key)
if self.engine is None:
self.engine_task, self.engine = self.engine_factory()
engine = self.engine
try:
yield engine
finally:
with self.require_lock:
self.required.remove(key)
@dn.datanode
def task(self):
while True:
yield
with self.require_lock:
if self.engine_task is None:
continue
with self.engine_task:
expiration = None
while True:
with self.require_lock:
current_time = time.perf_counter()
if expiration is None and not self.required:
expiration = current_time + self.delay
if expiration is not None and self.required:
expiration = None
if expiration is not None and current_time >= expiration:
self.engine_task = None
self.engine = None
break
yield
try:
self.engine_task.send(None)
except StopIteration:
raise RuntimeError("engine stop unexpectedly")
class DevicesSettings(cfg.Configurable):
mixer = cfg.subconfig(MixerSettings)
detector = cfg.subconfig(DetectorSettings)
renderer = cfg.subconfig(RendererSettings)
controller = cfg.subconfig(ControllerSettings)
terminal = cfg.subconfig(term.TerminalSettings) | PypiClean |
/KratosShallowWaterApplication-9.4-cp311-cp311-win_amd64.whl/KratosMultiphysics/ShallowWaterApplication/benchmarks/base_benchmark_process.py | import KratosMultiphysics as KM
import KratosMultiphysics.ShallowWaterApplication as SW
from KratosMultiphysics.kratos_utilities import GenerateVariableListFromInput
def Factory(settings, model):
if not isinstance(settings, KM.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return BaseBenchmarkProcess(model, settings["Parameters"])
class BaseBenchmarkProcess(KM.Process):
"""The base class for the benchmarks."""
def __init__(self, model, settings ):
"""The constructor of the BaseBenchmarkProcess.
It is intended to be called from the constructor of deriving classes.
"""
super().__init__()
self.model = model
self.settings = settings
default_settings = KM.Parameters("""
{
"model_part_name" : "model_part",
"variables_list" : [],
"exact_variables_list" : [],
"error_variables_list" : [],
"benchmark_settings" : {}
}
"""
)
default_settings["benchmark_settings"] = self._GetBenchmarkDefaultSettings()
self.settings.RecursivelyValidateAndAssignDefaults(default_settings)
self.model_part = self.model[self.settings["model_part_name"].GetString()]
self.variables = GenerateVariableListFromInput(self.settings["variables_list"])
self.exact_variables = GenerateVariableListFromInput(self.settings["exact_variables_list"])
self.error_variables = GenerateVariableListFromInput(self.settings["error_variables_list"])
def ExecuteInitialize(self):
"""Set the topography and the initial conditions."""
KM.Timer.Start("Benchmark/Initial state")
time = self.model_part.ProcessInfo[KM.TIME]
for node in self.model_part.Nodes:
if self._Topography(node):
node.SetSolutionStepValue(SW.TOPOGRAPHY, self._Topography(node))
node.SetSolutionStepValue(SW.HEIGHT, self._Height(node, time))
node.SetSolutionStepValue(KM.VELOCITY, self._Velocity(node, time))
node.SetSolutionStepValue(KM.MOMENTUM, self._Momentum(node, time))
node.SetSolutionStepValue(SW.FREE_SURFACE_ELEVATION, self._FreeSurfaceElevation(node, time))
KM.Timer.Stop("Benchmark/Initial state")
def ExecuteBeforeOutputStep(self):
"""Compute the exact values of the benchmark and the error of the simulation."""
KM.Timer.Start("Benchmark/Exact values")
time = self.model_part.ProcessInfo[KM.TIME]
for (variable, exact_variable, error_variable) in zip(self.variables, self.exact_variables, self.error_variables):
if variable == SW.HEIGHT:
exact_value_function = self._Height
elif variable == KM.VELOCITY:
exact_value_function = self._Velocity
elif variable == KM.MOMENTUM:
exact_value_function = self._Momentum
elif variable == SW.FREE_SURFACE_ELEVATION:
exact_value_function = self._FreeSurfaceElevation
for node in self.model_part.Nodes:
exact_value = exact_value_function(node, time)
fem_value = node.GetSolutionStepValue(variable)
node.SetValue(exact_variable, exact_value)
node.SetValue(error_variable, fem_value - exact_value)
KM.Timer.Stop("Benchmark/Exact values")
def Check(self):
"""Check if the input values have physical sense."""
if len(self.variables) != len(self.exact_variables):
raise Exception("The input variables list does not match the input exact variables list")
if len(self.variables) != len(self.error_variables):
raise Exception("The input variables list does not match the input error variables list")
for (var, exact, error) in zip(self.variables, self.exact_variables, self.error_variables):
if KM.KratosGlobals.GetVariableType(var.Name()) != KM.KratosGlobals.GetVariableType(exact.Name()):
msg = var.Name() + " variable type does not match the " + exact.Name() + " variable type"
raise Exception(msg)
if KM.KratosGlobals.GetVariableType(var.Name()) != KM.KratosGlobals.GetVariableType(error.Name()):
msg = var.Name() + " variable type does not match the " + error.Name() + " variable type"
raise Exception(msg)
@classmethod
def _GetBenchmarkDefaultSettings(cls):
raise Exception("Calling the base class of the benchmark. Please, implement the custom benchmark settings")
def _Topography(self, coordinates):
return 0
def _Height(self, coordinates, time):
raise Exception("Calling the base class of the benchmark. Please, implement the custom benchmark")
def _Velocity(self, coordinates, time):
raise Exception("Calling the base class of the benchmark. Please, implement the custom benchmark")
def _Momentum(self, coordinates, time):
return [self._Height(coordinates, time)*v for v in self._Velocity(coordinates, time)]
def _FreeSurfaceElevation(self, coordinates, time):
return self._Topography(coordinates) + self._Height(coordinates, time) | PypiClean |
/Dulcinea-0.11.tar.gz/Dulcinea-0.11/lib/links.py | from datetime import datetime
from dulcinea.attachable import Attachable
from dulcinea.category import Category, Categorized, CategoryDatabase
from dulcinea.item import Item, ItemFolder
from dulcinea.spec import boolean, sequence, spec, init, add_getters_and_setters
from dulcinea.spec import specify, string
from dulcinea.util import wrap_paragraphs, sanitize_url
from durus.persistent import Persistent
import random
class LinkItem(Item, Attachable, Categorized):
link_url_is = spec(
(string, None),
"Link to relevant target URL")
text_is = spec(
(string, None),
"A few sentences describing the link.")
email_is = spec(
(string, None),
"Submitter's e-mail address")
first_class_is = spec(
boolean,
"Flag to indicate that this link can be shown in a prominent "
"place like the home page of a site")
def __init__(self):
Item.__init__(self)
Attachable.__init__(self)
Categorized.__init__(self)
init(self, first_class=False)
def get_allowed_mime_types(self, user=None):
"""() -> [string]
"""
if user and user.is_admin():
return Attachable.get_allowed_mime_types(self)
else:
return ["image/gif", "image/jpeg", "image/png",
"image/tiff", "image/x-ms-bmp"]
def set_link_url(self, url):
specify(self,
link_url=sanitize_url(url),
timestamp=datetime.now())
def is_first_class(self):
return self.first_class
def get_local_url(self):
"Return the URL to this Item"
return "/links/%s/" % self.get_key()
def as_text(self):
return "%s\n\n%s\nURL: %s" % (self.get_title(), self.get_text(),
self.get_link_url())
def as_email(self):
body = "%s\n" % self.get_title()
if self.get_link_url():
body += "(%s)\n" % self.get_link_url()
body += "\n%s" % wrap_paragraphs(self.get_text())
return self.title, body
add_getters_and_setters(LinkItem)
class LinkFolder(ItemFolder, CategoryDatabase):
"""
To add a new leaf Category, create the Category instance, set it's 'name'
and 'label', choose a parent Category from the LinkFolder.categories
dictionary and call it's 'add_child' to add the new category, finally call
LinkFolder.add_category to add the new Category to categories
"""
def __init__(self):
ItemFolder.__init__(self)
CategoryDatabase.__init__(self)
links_category = Category(name='all')
links_category.set_label('All Links')
self.add_category(links_category)
def links_with_category(self, category):
return [link for link in self.get_items() if link.in_category(category)]
def get_all_links(self):
return self.get_items()
def get_first_class_links(self):
return [link for link in self.get_items() if link.is_first_class()]
def random_first_class_link(self):
first_class_links = self.get_first_class_links()
if first_class_links:
return random.choice(first_class_links)
return None
def sort_reverse_by_date(link1, link2):
# Sort links cronologically with most recent first
return cmp(link2.get_timestamp(), link1.get_timestamp())
def sort_by_title(link1, link2):
title1 = (link1.get_title() and link1.get_title().lower()) or None
title2 = (link2.get_title() and link2.get_title().lower()) or None
return cmp(title1, title2)
class LinkTripleDatabase(Persistent):
links_is = sequence(element_spec=(string, string, (string, None)),
container_spec=list)
def __init__(self):
self.links = []
def __getitem__(self, index):
return self.links[index]
def __setitem__(self, index, link):
self._p_note_change()
self.links[index] = link
def __delitem__(self, index):
self._p_note_change()
del self.links[index]
def insert_link(self, link, index=None):
self._p_note_change()
if index is None:
index = len(self.links)
assert 0 <= index <= len(self.links), index
links_after = self.links[index:]
self.links = self.links[:index]
self.links.append(link)
self.links.extend(links_after)
add_getters_and_setters(LinkTripleDatabase) | PypiClean |
/Navycut-0.0.5.tar.gz/Navycut-0.0.5/navycut/cli/_exec_cli.py | from os import makedirs, listdir
from ..utils import path
from nc_console import Console
from ..utils.tools import (generate_random_secret_key,
snake_to_camel_case
)
from ..errors.misc import DirectoryAlreadyExistsError
__baseDir__ = path.abspath(__file__).parent.parent
def _create_boiler_project(name):
project_name = name
if path.exists(project_name):
Console.log.Error(f"A project already exists with the same name: {project_name}. Try some another name.")
raise DirectoryAlreadyExistsError(project_name)
makedirs(project_name)
project_dir = path.realpath(project_name)
boilerplate_dir = __baseDir__ / 'boiler_create_project'
Console.log.Info(f"Empty project folder created.\nProject name: {project_name}\nLocation: {str(project_dir)}")
#start reading the existing boiler plate
Console.log.Info('Started writing the default boiler files for project')
boilerplate_dir__files = listdir(boilerplate_dir)
boilerplate_dir__files.remove("__pycache__") if "__pycache__" in boilerplate_dir__files else None
for boiler_file in boilerplate_dir__files:
if path.isDir(boilerplate_dir / boiler_file) and boiler_file == "project_dir___boiler_dir":
makedirs(project_dir / project_name)
for bff in listdir(boilerplate_dir / boiler_file):
if bff == '__pycache__': continue
with open(boilerplate_dir / boiler_file / bff, 'r') as bfffb:
with open(project_dir / project_name / bff, 'w') as bffwb:
if bff == 'settings.py':
settings_data = bfffb.read()
#now replace the __secretkey__ with the real one at the new project directory.
settings_data=settings_data.replace("__secretkey_____boiler_var", generate_random_secret_key(53)).replace("project_name___boiler_var", project_name)
bffwb.write(settings_data)
elif bff == 'wsgi.py' or bff == "asgi.py":
wsgi_data = bfffb.read()
wsgi_data = wsgi_data.replace("project_name___boiler_var", project_name)
bffwb.write(wsgi_data)
else:
bffwb.write(bfffb.read())
Console.log.Info(f'data from {boilerplate_dir / boiler_file / bff} successfully transfered to {project_dir / project_name / bff}')
elif path.isDir(boilerplate_dir / boiler_file) and boiler_file == 'templates':
with open(boilerplate_dir / boiler_file / "README.md", 'r') as tmr:
makedirs(project_dir / boiler_file)
Console.log.Info(f"Empty template directory created at: {project_dir/boiler_file}")
with open(project_dir / boiler_file / "README.md", 'w') as tmw:
tmw.write(tmr.read())
Console.log.Info(f"Data from README.md successfully transferred to {project_dir/boiler_file/'README.md'}")
else:
with open(boilerplate_dir / boiler_file, 'r') as fb:
with open(project_dir / boiler_file, 'w') as wb:
if boiler_file == 'manage.py':
manage_data = fb.read()
#now replace the __secretkey__ with the real one at the new project directory.
manage_data=manage_data.replace("project_name___boiler_var", project_name)
wb.write(manage_data)
else:
wb.write(fb.read())
Console.log.Info(f'Data from {boilerplate_dir / boiler_file} successfully transferred to {project_dir}/{boiler_file}')
Console.log.Success(f"project {project_name} created successfully.")
def _create_boiler_app(app_name, project_dir, *wargs):
app_dir = project_dir / app_name
if path.exists(app_dir):
Console.log.Error(f"A app already exists with the same name: {app_name} at {project_dir}. Try some another name.")
raise DirectoryAlreadyExistsError(app_dir)
makedirs (app_dir)
Console.log.Info(f"Empty app folder named {app_name} created.\App name: {app_name}\nLocation: {str(app_dir)}")
boilerplate_dir = __baseDir__ / 'boiler_create_app'
Console.log.Info('Started writing the default boiler files for app')
boilerplate_dir__files = listdir(boilerplate_dir)
boilerplate_dir__files.remove("__pycache__") if "__pycache__" in boilerplate_dir__files else None
for boiler_file in boilerplate_dir__files:
with open(boilerplate_dir / boiler_file, 'r') as fb:
with open(app_dir / boiler_file, 'w') as wb:
if boiler_file == 'sister.py':
sister_data = fb.read()
#now replace the import_name with the real one at the new project directory.
sister_data=sister_data.replace("import_name___boiler_var", app_name)
sister_data=sister_data.replace("classname___boiler_var", f"{snake_to_camel_case(app_name)}Sister")
wb.write(sister_data)
else:
wb.write(fb.read())
Console.log.Info(f'Data from {boilerplate_dir}/{boiler_file} successfully transferred to {app_dir}/{boiler_file}')
Console.log.Success(f"app {app_name} created successfully.") | PypiClean |
/Navix-0.3.10.tar.gz/Navix-0.3.10/README.md | # NAVIX: minigrid in JAX
[](https://www.repostatus.org/#wip)
[](https://github.com/epignatelli/navix/actions/workflows/CI.yml)
[](https://github.com/epignatelli/navix/actions/workflows/CD.yml)

**[Quickstart](#what-is-navix)** | **[Installation](#installation)** | **[Examples](#examples)** | **[Cite](#cite)**
## What is NAVIX?
NAVIX is [minigrid](https://github.com/Farama-Foundation/Minigrid) in JAX, **>1000x** faster with Autograd and XLA support.
You can see a superficial performance comparison [here](docs/performance.ipynb).
The library is in active development, and we are working on adding more environments and features.
If you want join the development and contribute, please [open a discussion](https://github.com/epignatelli/navix/discussions/new?category=general) and let's have a chat!
## Installation
We currently support the OSs supported by JAX.
You can find a description [here](https://github.com/google/jax#installation).
You might want to follow the same guide to install jax for your faviourite accelerator
(e.g. [CPU](https://github.com/google/jax#pip-installation-cpu),
[GPU](https://github.com/google/jax#pip-installation-gpu-cuda-installed-locally-harder), or
[TPU](https://github.com/google/jax#pip-installation-colab-tpu)
).
- ### Stable
Then, install the stable version of `navix` and its dependencies with:
```bash
pip install navix
```
- ### Nightly
Or, if you prefer to install the latest version from source:
```bash
pip install git+https://github.com/epignatelli/navix
```
## Examples
### XLA compilation
One straightforward use case is to accelerate the computation of the environment with XLA compilation.
For example, here we vectorise the environment to run multiple environments in parallel, and compile **the full training run**.
You can find a partial performance comparison with [minigrid](https://github.com/Farama-Foundation/Minigrid) in the [docs](docs/profiling.ipynb).
```python
import jax
import navix as nx
def run(seed)
env = nx.environments.Room(16, 16, 8)
key = jax.random.PRNGKey(seed)
timestep = env.reset(key)
actions = jax.random.randint(key, (N_TIMESTEPS,), 0, 6)
def body_fun(timestep, action):
timestep = env.step(timestep, jnp.asarray(action))
return timestep, ()
return jax.lax.scan(body_fun, timestep, jnp.asarray(actions, dtype=jnp.int32))[0]
final_timestep = jax.jit(jax.vmap(run))(jax.numpy.arange(1000))
```
### Backpropagation through the environment
Another use case it to backpropagate through the environment transition function, for example to learn a world model.
TODO(epignatelli): add example.
## Cite
If you use `navix` please consider citing it as:
```bibtex
@misc{pignatelli2023navix,
author = {Pignatelli, Eduardo},
title = {Navix: Accelerated gridworld navigation with JAX},
year = {2023},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/epignatelli/navix}}
}
| PypiClean |
/BatchAdapt-1.2.tar.gz/BatchAdapt-1.2/README.md | RefGeneratr: Dynamic multi-loci/multi-repeat tract microsatellite reference sequence generator
==============================================================================================
RefGeneratr (generatr) is a python script/package which generates a reference genetic sequence (*.fasta) for use in sequence alignment.
Microsatellite repeat regions can vary in scope and loci count, so this software has the ability to dynamically handle an undetermined
amount of repeat regions within each loci, with intervening sequences if desired. Endusers can specify as many regions/loci as desired, through
a simple XML document. This is parsed, and output in the standard *.fasta format is provided.
Generatr requires lxml, which setuptools should install for you during setup.
What's New
==========
Everything
Installation Prerequisites
==========================
Assuming that lxml is installed, or you wish setuptools to handle installation for you, the following should suffice. For now, download the source and run:
$ python setup.py install
You may or may not required sudo, it depends on your system. This will install the package for you, so it can be launched with 'generatr' from the command line.
Eventually, the package will be uploaded onto PIP so that you can install directly from a terminal.
Hardware Requirements
=====================
Nothing spectacular, any computer should run it fine. However, if you desire to generate a reference with a large amount of repeat units and/or loci, available
system memory will be a bottleneck.
Usage
=====
Here's how to use generatr:
$ generatr [-v/--verbose] [-i/--input <Path to input.xml>] [-o/--output <Desired *.fasta file output>]
-v enables terminal user feedback.
-i is a path to an XML file containing your desired information, which adheres to the requirements outlined below.
-o is a path to your desired output *.fasta/*.fa/*.fas file.
XML Requirements
=====
An example XML file is as follows:
<?xml version="1.0"?>
<data>
<loci label="example_loci_one">
<input type="fiveprime" flank="GCGACCCTGGAAAAGCTGATGAAGGCCTTCGAGTCCCTCAAGTCCTTC"/>
<input type="repeat_region" order="1" unit="CAG" start="1" end="100"/>
<input type="intervening" sequence="CAACAGCCGCCA" prior="1"/>
<input type="repeat_region" order="2" unit="CCG" start="1" end="20"/>
<input type="threeprime" flank="CCTCCTCAGCTTCCTCAGCCGCCGCCGCAGGCACAGCCGCTGCT"/>
</loci>
</data>
The input regions have been made as straight forward as possible. If you desire multiple loci within one reference file,
additional <loci> tags should be presented, with the respective sequence parameters nested within. There is technically no limitation
on how many loci you can specify, although testing has not gone beyond any reasonable figures.
The possible sequence parameters are as follows:
<input type="fiveprime" flank="<string>"/>
This is the input for a five prime flank sequence. The 'type' must be 'fiveprime', and any valid sequence can be present within
the 'flank' variable. Valid sequence is a string that consists of A,G,C,T,U,N. No other characters are considered valid.
<input type="repeat_region" order="<integer>" unit="<string>" start="<integer>" end="<integer>"/>
This is the input for a repeat region. The order flag indicates where in the 'sequence' it resides. Unit equates to the repeated unit
of sequence, and start/end are integers for the range you wish this repeat unit to repeat over. Generatr is useful as it can handle an unspecified
number of repeat regions for each loci.
<input type="intervening" sequence="<string>" prior="<integer>"
The intervening flag is for interrupted repeat sequences. Your intervening sequence is specified under 'sequence', and the repeat_region
which this intervening sequence follows, is indicated in 'prior'. E.G. if an intervening sequence follows a repeat_region that was order="1",
the intervening prior value would also be "1". Generatr can handle zero, one or multiple intervening sequences; the only stipulation for the sequence
to appear correctly is for the user to accurately input the preceeding repeat_region's 'order' value under the respective intervening region's 'prior' value.
<input type="threeprime" flank="<string>"/>
The input for a three prime flank follows the same logic as described for five prime.
Thanks for reading. If you have any questions or trouble with installation, please feel free to e-mail me at alastair[dot]maxwell[at]glasgow[dot]ac[dot]uk.
| PypiClean |
/NlvWxPython-4.2.0-cp37-cp37m-win_amd64.whl/wx/lib/plot/README.md | # wx.lib.plot
A simple, light-weight plotting package for wxPython Phoenix.
Based on wxPlotCanvas
Written by K. Hinsen, R. Srinivasan;
Ported to wxPython: Harm van der Heijden, Feb 1999
This is a simple, light weight plotting module that can be used with
Boa or easily integrated into your own wxPython application. The
emphasis is on small size and fast plotting for large data sets. It
has a reasonable number of features to do line and scatter graphs
easily as well as simple bar graphs. It is not as sophisticated or
as powerful as SciPy Plt or Chaco. Both of these are great packages
but consume huge amounts of computer resources for simple plots.
They can be found at http://scipy.com
| PypiClean |
/Atom_avatar-0.0.3.tar.gz/Atom_avatar-0.0.3/avatar/index.py | from io import BytesIO, StringIO
from random import randrange
from cairosvg import svg2png
from jinja2 import Template, Environment, FileSystemLoader
import json
import os
# a= open('male_back.json')
# b= a.read()
# print(type(json.loads(b)))
path=os.path.dirname(os.path.realpath(__file__))
load_base = FileSystemLoader(path+'./templates',)
# open("./male_back.json", )
env = Environment(loader=load_base)
env.trim_blocks = True
env.lstrip_blocks = True
template = env.get_template('base.svg')
male_back = json.load(open(path+'/male_back.json'))
male_face = json.load(open(path+'/male_face.json'))
male_eyes = json.load(open(path+'/male_eyes.json'))
male_ears = json.load(open(path+'/male_ears.json'))
male_iris = json.load(open(path+'/male_iris.json'))
male_nose = json.load(open(path+'/male_nose.json'))
male_mouth = json.load(open(path+'/male_mouth.json'))
male_brows = json.load(open(path+'/male_brows.json'))
male_mustache = json.load(open(path+'/male_mustache.json'))
male_beard = json.load(open(path+'/male_beard.json'))
male_hair = json.load(open(path+'/male_hair.json'))
male_clothes = json.load(open(path+'/male_clothes.json'))
# print(male_back)
# peyes = male_eyes['eyesback']['shapes'][0][0]['left']
# pback=male_back['backs']['shapes'][0]['single']
# print(template.render(back=peyes))
FACELOLORS = [
"#f6e4e2",
"#fbd5c0",
"#ffd0bc",
"#f4baa3",
"#ebaa82",
"#d79468",
"#cb8d60",
"#b2713b",
"#8c5537",
"#875732",
"#73512d",
"#582812"
]
HAIRCOLORS = [
"#2a232b",
"#080806",
"#3b3128",
"#4e4341",
"#504543",
"#6b4e40",
"#a68469",
"#b79675",
"#decfbc",
"#ddbc9b",
"#a46c47",
"#543c32",
"#73625b",
"#b84131",
"#d6c4c4",
"#fef6e1",
"#cac1b2",
"#b7513b",
"#caa478",
]
MATERIALCOLOR = [
"#386e77",
"#6a3a47",
"#591956",
"#864025",
"#dcc96b",
"#638e2f",
"#3f82a4",
"#335529",
"#82cbe2",
"#39557e",
"#1e78a2",
"#a44974",
"#152c5e",
"#9d69bc",
"#601090",
"#d46fbb",
"#cbe9ee",
"#4b2824",
"#653220",
"#1d282e"
]
FRONTEYESCOLORS = [
"#000000",
"#191c29",
"#0f190c",
"#09152e",
"#302040",
"#1b2a40",
"#2c1630",
"#2a150e",
"#131111",
"#1b1929",
"#09112e",
"#092e0c",
"#2e0914",
"#582311",
"#210d34",
"#153a4d",
"#d6f7f4",
"#5fa2a5",
"#782c76",
"#587d90"
]
IRISCOLORS = [
"#4e60a3",
"#7085b3",
"#b0b9d9",
"#3c8d8e",
"#3e4442",
"#66724e",
"#7b5c33",
"#ddb332",
"#8ab42d",
"#681711",
"#282978",
"#9b1d1b",
"#4d3623",
"#9fae70",
"#724f7c",
"#fdd70e",
"#00f0f1",
"#4faaab",
"#ea02f5",
"#bd1c1b"
]
BACKCOLOR = [
"#c4c7f3",
"#F1D4AF",
"#774F38",
"#ECE5CE",
"#C5E0DC",
"#594F4F",
"#547980",
"#45ADA8",
"#9DE0AD",
"#E5FCC2",
"#00A8C6",
"#40C0CB",
"#F9F2E7",
"#AEE239",
"#14305c",
"#5E8C6A",
"#88A65E",
"#036564",
"#CDB380",
"#ce6130"
]
MOUTHCOLORS = [
"#DA7C87",
"#F18F77",
"#e0a4a0",
"#9D6D5F",
"#A06B59",
"#904539",
"#e28c7c",
"#9B565F",
"#ff5027",
"#e66638",
"#fe856a",
"#E2929B",
"#a96a47",
"#335529",
"#1e78a2",
"#39557e",
"#6f147c",
"#43194b",
"#98a2a2",
"#161925"
]
class Canvas:
def __init__(self, back, face, eyes_back, eyes_front,ears, iris, nose, mouth, brows, mustache, beard, hair, cloth,
haircolor,backcolor,faceColor, materialcolor, fronteyescolor,iriscolor,mouthcolors, type=0,) -> None:
self.back = back
self.face = face
self.eyes_back = eyes_back
self.eyes_front = eyes_front
self.ear = ears
self.iris = iris
self.nose = nose
self.mouth = mouth
self.brows = brows
self.mustache = mustache
self.beard = beard
self.hair = hair
self.cloth = cloth
self.type=type
self.haircolor=haircolor
self.backcolor= backcolor
self.facecolor=faceColor
self.materialcolor=materialcolor
self.fronteyescolor=fronteyescolor
self.iriscolor=iriscolor
self.mouthcolor=mouthcolors
def canvas(self, obj=None):
context = self.make()
return template.render(context)
def toPng(temp):
arr = bytes(temp, 'utf-8')
byte=BytesIO(arr)
svg2png(arr,write_to="ade.png")
def make(self):
type=self.type
pback = male_back['backs']['shapes'][0]['single']
peyesback = male_eyes['eyesback']['shapes'][type][self.eyes_back] # not thesame with front
peyesfront = male_eyes['eyesfront']['shapes'][type][self.eyes_front]
pears = male_ears['ears']['shapes'][type][self.ear]
piris = male_iris['eyesiris']['shapes'][type][self.iris]
pnose = male_nose['nose']['shapes'][type][self.nose]['single']
pmouth = male_mouth['mouth']['shapes'][type][self.mouth]['single']
pbrows = male_brows['eyebrows']['shapes'][type][self.brows]
pmustache = male_mustache['mustache']['shapes'][type][self.mustache]['single']
pbeard = male_beard['beard']['shapes'][type][self.beard]['single']
phair = male_hair['hair']['shapes'][type][self.hair]
pclothes = male_clothes['clothes']['shapes'][type][self.cloth]['single']
faceshape = male_face['faceshape']['shapes'][type][self.face]['single']
chinshadow = male_face['chinshadow']['shapes'][type][randrange(3)]['single']
haircolor=self.haircolor
backcolor=self.backcolor
facecolor=self.facecolor
materialcolor=self.materialcolor
fronteyescolor=self.fronteyescolor
iriscolor=self.iriscolor
mouthcolor=self.mouthcolor
# humanbody=male_face['humanbody']['shapes'][0][0]['single']
return {
'back': pback,
'eyesback': peyesback,
'eyesfront': peyesfront,
'ears': pears,
'iris': piris,
'nose': pnose,
'brows': pbrows,
'mouth': pmouth,
'mustache': pmustache,
'beard': pbeard,
'hair': phair,
'cloth': pclothes,
'faceshape': faceshape,
'chinshadow': chinshadow,
# 'humanbody':humanbody,
'haircolor':haircolor,
'backcolor':backcolor,
'facecolor':facecolor,
'materialcolor':materialcolor,
'fronteyescolor':fronteyescolor,
'iriscolors':iriscolor,
'mouthcolor':mouthcolor
}
def toPng(temp):
# arr = bytes(temp, 'utf-8')
# byte=BytesIO(arr)
return svg2png(temp, output_height=200,output_width=200)
def toPngfile(temp:str, outfile:str):
svg2png(temp, write_to=outfile, output_height=200,output_width=200)
# a = Canvas(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,2,"#582311",
# "#210d34",
# "#153a4d",
# "#d6f7f4",
# "#5fa2a5",
# "#782c76",
# "#587d90").canvas()
# b=toPng(a)
# print(b)
# with open("./sample.svg", 'w') as f:
# f.write(a)
# f.close() | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/fs-minipass/node_modules/minipass/README.md | # minipass
A _very_ minimal implementation of a [PassThrough
stream](https://nodejs.org/api/stream.html#stream_class_stream_passthrough)
[It's very
fast](https://docs.google.com/spreadsheets/d/1oObKSrVwLX_7Ut4Z6g3fZW-AX1j1-k6w-cDsrkaSbHM/edit#gid=0)
for objects, strings, and buffers.
Supports `pipe()`ing (including multi-`pipe()` and backpressure transmission),
buffering data until either a `data` event handler or `pipe()` is added (so
you don't lose the first chunk), and most other cases where PassThrough is
a good idea.
There is a `read()` method, but it's much more efficient to consume data
from this stream via `'data'` events or by calling `pipe()` into some other
stream. Calling `read()` requires the buffer to be flattened in some
cases, which requires copying memory.
If you set `objectMode: true` in the options, then whatever is written will
be emitted. Otherwise, it'll do a minimal amount of Buffer copying to
ensure proper Streams semantics when `read(n)` is called.
`objectMode` can also be set by doing `stream.objectMode = true`, or by
writing any non-string/non-buffer data. `objectMode` cannot be set to
false once it is set.
This is not a `through` or `through2` stream. It doesn't transform the
data, it just passes it right through. If you want to transform the data,
extend the class, and override the `write()` method. Once you're done
transforming the data however you want, call `super.write()` with the
transform output.
For some examples of streams that extend Minipass in various ways, check
out:
- [minizlib](http://npm.im/minizlib)
- [fs-minipass](http://npm.im/fs-minipass)
- [tar](http://npm.im/tar)
- [minipass-collect](http://npm.im/minipass-collect)
- [minipass-flush](http://npm.im/minipass-flush)
- [minipass-pipeline](http://npm.im/minipass-pipeline)
- [tap](http://npm.im/tap)
- [tap-parser](http://npm.im/tap-parser)
- [treport](http://npm.im/treport)
- [minipass-fetch](http://npm.im/minipass-fetch)
- [pacote](http://npm.im/pacote)
- [make-fetch-happen](http://npm.im/make-fetch-happen)
- [cacache](http://npm.im/cacache)
- [ssri](http://npm.im/ssri)
- [npm-registry-fetch](http://npm.im/npm-registry-fetch)
- [minipass-json-stream](http://npm.im/minipass-json-stream)
- [minipass-sized](http://npm.im/minipass-sized)
## Differences from Node.js Streams
There are several things that make Minipass streams different from (and in
some ways superior to) Node.js core streams.
Please read these caveats if you are familiar with node-core streams and
intend to use Minipass streams in your programs.
You can avoid most of these differences entirely (for a very
small performance penalty) by setting `{async: true}` in the
constructor options.
### Timing
Minipass streams are designed to support synchronous use-cases. Thus, data
is emitted as soon as it is available, always. It is buffered until read,
but no longer. Another way to look at it is that Minipass streams are
exactly as synchronous as the logic that writes into them.
This can be surprising if your code relies on `PassThrough.write()` always
providing data on the next tick rather than the current one, or being able
to call `resume()` and not have the entire buffer disappear immediately.
However, without this synchronicity guarantee, there would be no way for
Minipass to achieve the speeds it does, or support the synchronous use
cases that it does. Simply put, waiting takes time.
This non-deferring approach makes Minipass streams much easier to reason
about, especially in the context of Promises and other flow-control
mechanisms.
Example:
```js
const Minipass = require('minipass')
const stream = new Minipass({ async: true })
stream.on('data', () => console.log('data event'))
console.log('before write')
stream.write('hello')
console.log('after write')
// output:
// before write
// data event
// after write
```
### Exception: Async Opt-In
If you wish to have a Minipass stream with behavior that more
closely mimics Node.js core streams, you can set the stream in
async mode either by setting `async: true` in the constructor
options, or by setting `stream.async = true` later on.
```js
const Minipass = require('minipass')
const asyncStream = new Minipass({ async: true })
asyncStream.on('data', () => console.log('data event'))
console.log('before write')
asyncStream.write('hello')
console.log('after write')
// output:
// before write
// after write
// data event <-- this is deferred until the next tick
```
Switching _out_ of async mode is unsafe, as it could cause data
corruption, and so is not enabled. Example:
```js
const Minipass = require('minipass')
const stream = new Minipass({ encoding: 'utf8' })
stream.on('data', chunk => console.log(chunk))
stream.async = true
console.log('before writes')
stream.write('hello')
setStreamSyncAgainSomehow(stream) // <-- this doesn't actually exist!
stream.write('world')
console.log('after writes')
// hypothetical output would be:
// before writes
// world
// after writes
// hello
// NOT GOOD!
```
To avoid this problem, once set into async mode, any attempt to
make the stream sync again will be ignored.
```js
const Minipass = require('minipass')
const stream = new Minipass({ encoding: 'utf8' })
stream.on('data', chunk => console.log(chunk))
stream.async = true
console.log('before writes')
stream.write('hello')
stream.async = false // <-- no-op, stream already async
stream.write('world')
console.log('after writes')
// actual output:
// before writes
// after writes
// hello
// world
```
### No High/Low Water Marks
Node.js core streams will optimistically fill up a buffer, returning `true`
on all writes until the limit is hit, even if the data has nowhere to go.
Then, they will not attempt to draw more data in until the buffer size dips
below a minimum value.
Minipass streams are much simpler. The `write()` method will return `true`
if the data has somewhere to go (which is to say, given the timing
guarantees, that the data is already there by the time `write()` returns).
If the data has nowhere to go, then `write()` returns false, and the data
sits in a buffer, to be drained out immediately as soon as anyone consumes
it.
Since nothing is ever buffered unnecessarily, there is much less
copying data, and less bookkeeping about buffer capacity levels.
### Hazards of Buffering (or: Why Minipass Is So Fast)
Since data written to a Minipass stream is immediately written all the way
through the pipeline, and `write()` always returns true/false based on
whether the data was fully flushed, backpressure is communicated
immediately to the upstream caller. This minimizes buffering.
Consider this case:
```js
const {PassThrough} = require('stream')
const p1 = new PassThrough({ highWaterMark: 1024 })
const p2 = new PassThrough({ highWaterMark: 1024 })
const p3 = new PassThrough({ highWaterMark: 1024 })
const p4 = new PassThrough({ highWaterMark: 1024 })
p1.pipe(p2).pipe(p3).pipe(p4)
p4.on('data', () => console.log('made it through'))
// this returns false and buffers, then writes to p2 on next tick (1)
// p2 returns false and buffers, pausing p1, then writes to p3 on next tick (2)
// p3 returns false and buffers, pausing p2, then writes to p4 on next tick (3)
// p4 returns false and buffers, pausing p3, then emits 'data' and 'drain'
// on next tick (4)
// p3 sees p4's 'drain' event, and calls resume(), emitting 'resume' and
// 'drain' on next tick (5)
// p2 sees p3's 'drain', calls resume(), emits 'resume' and 'drain' on next tick (6)
// p1 sees p2's 'drain', calls resume(), emits 'resume' and 'drain' on next
// tick (7)
p1.write(Buffer.alloc(2048)) // returns false
```
Along the way, the data was buffered and deferred at each stage, and
multiple event deferrals happened, for an unblocked pipeline where it was
perfectly safe to write all the way through!
Furthermore, setting a `highWaterMark` of `1024` might lead someone reading
the code to think an advisory maximum of 1KiB is being set for the
pipeline. However, the actual advisory buffering level is the _sum_ of
`highWaterMark` values, since each one has its own bucket.
Consider the Minipass case:
```js
const m1 = new Minipass()
const m2 = new Minipass()
const m3 = new Minipass()
const m4 = new Minipass()
m1.pipe(m2).pipe(m3).pipe(m4)
m4.on('data', () => console.log('made it through'))
// m1 is flowing, so it writes the data to m2 immediately
// m2 is flowing, so it writes the data to m3 immediately
// m3 is flowing, so it writes the data to m4 immediately
// m4 is flowing, so it fires the 'data' event immediately, returns true
// m4's write returned true, so m3 is still flowing, returns true
// m3's write returned true, so m2 is still flowing, returns true
// m2's write returned true, so m1 is still flowing, returns true
// No event deferrals or buffering along the way!
m1.write(Buffer.alloc(2048)) // returns true
```
It is extremely unlikely that you _don't_ want to buffer any data written,
or _ever_ buffer data that can be flushed all the way through. Neither
node-core streams nor Minipass ever fail to buffer written data, but
node-core streams do a lot of unnecessary buffering and pausing.
As always, the faster implementation is the one that does less stuff and
waits less time to do it.
### Immediately emit `end` for empty streams (when not paused)
If a stream is not paused, and `end()` is called before writing any data
into it, then it will emit `end` immediately.
If you have logic that occurs on the `end` event which you don't want to
potentially happen immediately (for example, closing file descriptors,
moving on to the next entry in an archive parse stream, etc.) then be sure
to call `stream.pause()` on creation, and then `stream.resume()` once you
are ready to respond to the `end` event.
However, this is _usually_ not a problem because:
### Emit `end` When Asked
One hazard of immediately emitting `'end'` is that you may not yet have had
a chance to add a listener. In order to avoid this hazard, Minipass
streams safely re-emit the `'end'` event if a new listener is added after
`'end'` has been emitted.
Ie, if you do `stream.on('end', someFunction)`, and the stream has already
emitted `end`, then it will call the handler right away. (You can think of
this somewhat like attaching a new `.then(fn)` to a previously-resolved
Promise.)
To prevent calling handlers multiple times who would not expect multiple
ends to occur, all listeners are removed from the `'end'` event whenever it
is emitted.
### Emit `error` When Asked
The most recent error object passed to the `'error'` event is
stored on the stream. If a new `'error'` event handler is added,
and an error was previously emitted, then the event handler will
be called immediately (or on `process.nextTick` in the case of
async streams).
This makes it much more difficult to end up trying to interact
with a broken stream, if the error handler is added after an
error was previously emitted.
### Impact of "immediate flow" on Tee-streams
A "tee stream" is a stream piping to multiple destinations:
```js
const tee = new Minipass()
t.pipe(dest1)
t.pipe(dest2)
t.write('foo') // goes to both destinations
```
Since Minipass streams _immediately_ process any pending data through the
pipeline when a new pipe destination is added, this can have surprising
effects, especially when a stream comes in from some other function and may
or may not have data in its buffer.
```js
// WARNING! WILL LOSE DATA!
const src = new Minipass()
src.write('foo')
src.pipe(dest1) // 'foo' chunk flows to dest1 immediately, and is gone
src.pipe(dest2) // gets nothing!
```
One solution is to create a dedicated tee-stream junction that pipes to
both locations, and then pipe to _that_ instead.
```js
// Safe example: tee to both places
const src = new Minipass()
src.write('foo')
const tee = new Minipass()
tee.pipe(dest1)
tee.pipe(dest2)
src.pipe(tee) // tee gets 'foo', pipes to both locations
```
The same caveat applies to `on('data')` event listeners. The first one
added will _immediately_ receive all of the data, leaving nothing for the
second:
```js
// WARNING! WILL LOSE DATA!
const src = new Minipass()
src.write('foo')
src.on('data', handler1) // receives 'foo' right away
src.on('data', handler2) // nothing to see here!
```
Using a dedicated tee-stream can be used in this case as well:
```js
// Safe example: tee to both data handlers
const src = new Minipass()
src.write('foo')
const tee = new Minipass()
tee.on('data', handler1)
tee.on('data', handler2)
src.pipe(tee)
```
All of the hazards in this section are avoided by setting `{
async: true }` in the Minipass constructor, or by setting
`stream.async = true` afterwards. Note that this does add some
overhead, so should only be done in cases where you are willing
to lose a bit of performance in order to avoid having to refactor
program logic.
## USAGE
It's a stream! Use it like a stream and it'll most likely do what you
want.
```js
const Minipass = require('minipass')
const mp = new Minipass(options) // optional: { encoding, objectMode }
mp.write('foo')
mp.pipe(someOtherStream)
mp.end('bar')
```
### OPTIONS
* `encoding` How would you like the data coming _out_ of the stream to be
encoded? Accepts any values that can be passed to `Buffer.toString()`.
* `objectMode` Emit data exactly as it comes in. This will be flipped on
by default if you write() something other than a string or Buffer at any
point. Setting `objectMode: true` will prevent setting any encoding
value.
* `async` Defaults to `false`. Set to `true` to defer data
emission until next tick. This reduces performance slightly,
but makes Minipass streams use timing behavior closer to Node
core streams. See [Timing](#timing) for more details.
### API
Implements the user-facing portions of Node.js's `Readable` and `Writable`
streams.
### Methods
* `write(chunk, [encoding], [callback])` - Put data in. (Note that, in the
base Minipass class, the same data will come out.) Returns `false` if
the stream will buffer the next write, or true if it's still in "flowing"
mode.
* `end([chunk, [encoding]], [callback])` - Signal that you have no more
data to write. This will queue an `end` event to be fired when all the
data has been consumed.
* `setEncoding(encoding)` - Set the encoding for data coming of the stream.
This can only be done once.
* `pause()` - No more data for a while, please. This also prevents `end`
from being emitted for empty streams until the stream is resumed.
* `resume()` - Resume the stream. If there's data in the buffer, it is all
discarded. Any buffered events are immediately emitted.
* `pipe(dest)` - Send all output to the stream provided. When
data is emitted, it is immediately written to any and all pipe
destinations. (Or written on next tick in `async` mode.)
* `unpipe(dest)` - Stop piping to the destination stream. This
is immediate, meaning that any asynchronously queued data will
_not_ make it to the destination when running in `async` mode.
* `options.end` - Boolean, end the destination stream when
the source stream ends. Default `true`.
* `options.proxyErrors` - Boolean, proxy `error` events from
the source stream to the destination stream. Note that
errors are _not_ proxied after the pipeline terminates,
either due to the source emitting `'end'` or manually
unpiping with `src.unpipe(dest)`. Default `false`.
* `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are EventEmitters. Some
events are given special treatment, however. (See below under "events".)
* `promise()` - Returns a Promise that resolves when the stream emits
`end`, or rejects if the stream emits `error`.
* `collect()` - Return a Promise that resolves on `end` with an array
containing each chunk of data that was emitted, or rejects if the stream
emits `error`. Note that this consumes the stream data.
* `concat()` - Same as `collect()`, but concatenates the data into a single
Buffer object. Will reject the returned promise if the stream is in
objectMode, or if it goes into objectMode by the end of the data.
* `read(n)` - Consume `n` bytes of data out of the buffer. If `n` is not
provided, then consume all of it. If `n` bytes are not available, then
it returns null. **Note** consuming streams in this way is less
efficient, and can lead to unnecessary Buffer copying.
* `destroy([er])` - Destroy the stream. If an error is provided, then an
`'error'` event is emitted. If the stream has a `close()` method, and
has not emitted a `'close'` event yet, then `stream.close()` will be
called. Any Promises returned by `.promise()`, `.collect()` or
`.concat()` will be rejected. After being destroyed, writing to the
stream will emit an error. No more data will be emitted if the stream is
destroyed, even if it was previously buffered.
### Properties
* `bufferLength` Read-only. Total number of bytes buffered, or in the case
of objectMode, the total number of objects.
* `encoding` The encoding that has been set. (Setting this is equivalent
to calling `setEncoding(enc)` and has the same prohibition against
setting multiple times.)
* `flowing` Read-only. Boolean indicating whether a chunk written to the
stream will be immediately emitted.
* `emittedEnd` Read-only. Boolean indicating whether the end-ish events
(ie, `end`, `prefinish`, `finish`) have been emitted. Note that
listening on any end-ish event will immediateyl re-emit it if it has
already been emitted.
* `writable` Whether the stream is writable. Default `true`. Set to
`false` when `end()`
* `readable` Whether the stream is readable. Default `true`.
* `buffer` A [yallist](http://npm.im/yallist) linked list of chunks written
to the stream that have not yet been emitted. (It's probably a bad idea
to mess with this.)
* `pipes` A [yallist](http://npm.im/yallist) linked list of streams that
this stream is piping into. (It's probably a bad idea to mess with
this.)
* `destroyed` A getter that indicates whether the stream was destroyed.
* `paused` True if the stream has been explicitly paused, otherwise false.
* `objectMode` Indicates whether the stream is in `objectMode`. Once set
to `true`, it cannot be set to `false`.
### Events
* `data` Emitted when there's data to read. Argument is the data to read.
This is never emitted while not flowing. If a listener is attached, that
will resume the stream.
* `end` Emitted when there's no more data to read. This will be emitted
immediately for empty streams when `end()` is called. If a listener is
attached, and `end` was already emitted, then it will be emitted again.
All listeners are removed when `end` is emitted.
* `prefinish` An end-ish event that follows the same logic as `end` and is
emitted in the same conditions where `end` is emitted. Emitted after
`'end'`.
* `finish` An end-ish event that follows the same logic as `end` and is
emitted in the same conditions where `end` is emitted. Emitted after
`'prefinish'`.
* `close` An indication that an underlying resource has been released.
Minipass does not emit this event, but will defer it until after `end`
has been emitted, since it throws off some stream libraries otherwise.
* `drain` Emitted when the internal buffer empties, and it is again
suitable to `write()` into the stream.
* `readable` Emitted when data is buffered and ready to be read by a
consumer.
* `resume` Emitted when stream changes state from buffering to flowing
mode. (Ie, when `resume` is called, `pipe` is called, or a `data` event
listener is added.)
### Static Methods
* `Minipass.isStream(stream)` Returns `true` if the argument is a stream,
and false otherwise. To be considered a stream, the object must be
either an instance of Minipass, or an EventEmitter that has either a
`pipe()` method, or both `write()` and `end()` methods. (Pretty much any
stream in node-land will return `true` for this.)
## EXAMPLES
Here are some examples of things you can do with Minipass streams.
### simple "are you done yet" promise
```js
mp.promise().then(() => {
// stream is finished
}, er => {
// stream emitted an error
})
```
### collecting
```js
mp.collect().then(all => {
// all is an array of all the data emitted
// encoding is supported in this case, so
// so the result will be a collection of strings if
// an encoding is specified, or buffers/objects if not.
//
// In an async function, you may do
// const data = await stream.collect()
})
```
### collecting into a single blob
This is a bit slower because it concatenates the data into one chunk for
you, but if you're going to do it yourself anyway, it's convenient this
way:
```js
mp.concat().then(onebigchunk => {
// onebigchunk is a string if the stream
// had an encoding set, or a buffer otherwise.
})
```
### iteration
You can iterate over streams synchronously or asynchronously in platforms
that support it.
Synchronous iteration will end when the currently available data is
consumed, even if the `end` event has not been reached. In string and
buffer mode, the data is concatenated, so unless multiple writes are
occurring in the same tick as the `read()`, sync iteration loops will
generally only have a single iteration.
To consume chunks in this way exactly as they have been written, with no
flattening, create the stream with the `{ objectMode: true }` option.
```js
const mp = new Minipass({ objectMode: true })
mp.write('a')
mp.write('b')
for (let letter of mp) {
console.log(letter) // a, b
}
mp.write('c')
mp.write('d')
for (let letter of mp) {
console.log(letter) // c, d
}
mp.write('e')
mp.end()
for (let letter of mp) {
console.log(letter) // e
}
for (let letter of mp) {
console.log(letter) // nothing
}
```
Asynchronous iteration will continue until the end event is reached,
consuming all of the data.
```js
const mp = new Minipass({ encoding: 'utf8' })
// some source of some data
let i = 5
const inter = setInterval(() => {
if (i-- > 0)
mp.write(Buffer.from('foo\n', 'utf8'))
else {
mp.end()
clearInterval(inter)
}
}, 100)
// consume the data with asynchronous iteration
async function consume () {
for await (let chunk of mp) {
console.log(chunk)
}
return 'ok'
}
consume().then(res => console.log(res))
// logs `foo\n` 5 times, and then `ok`
```
### subclass that `console.log()`s everything written into it
```js
class Logger extends Minipass {
write (chunk, encoding, callback) {
console.log('WRITE', chunk, encoding)
return super.write(chunk, encoding, callback)
}
end (chunk, encoding, callback) {
console.log('END', chunk, encoding)
return super.end(chunk, encoding, callback)
}
}
someSource.pipe(new Logger()).pipe(someDest)
```
### same thing, but using an inline anonymous class
```js
// js classes are fun
someSource
.pipe(new (class extends Minipass {
emit (ev, ...data) {
// let's also log events, because debugging some weird thing
console.log('EMIT', ev)
return super.emit(ev, ...data)
}
write (chunk, encoding, callback) {
console.log('WRITE', chunk, encoding)
return super.write(chunk, encoding, callback)
}
end (chunk, encoding, callback) {
console.log('END', chunk, encoding)
return super.end(chunk, encoding, callback)
}
}))
.pipe(someDest)
```
### subclass that defers 'end' for some reason
```js
class SlowEnd extends Minipass {
emit (ev, ...args) {
if (ev === 'end') {
console.log('going to end, hold on a sec')
setTimeout(() => {
console.log('ok, ready to end now')
super.emit('end', ...args)
}, 100)
} else {
return super.emit(ev, ...args)
}
}
}
```
### transform that creates newline-delimited JSON
```js
class NDJSONEncode extends Minipass {
write (obj, cb) {
try {
// JSON.stringify can throw, emit an error on that
return super.write(JSON.stringify(obj) + '\n', 'utf8', cb)
} catch (er) {
this.emit('error', er)
}
}
end (obj, cb) {
if (typeof obj === 'function') {
cb = obj
obj = undefined
}
if (obj !== undefined) {
this.write(obj)
}
return super.end(cb)
}
}
```
### transform that parses newline-delimited JSON
```js
class NDJSONDecode extends Minipass {
constructor (options) {
// always be in object mode, as far as Minipass is concerned
super({ objectMode: true })
this._jsonBuffer = ''
}
write (chunk, encoding, cb) {
if (typeof chunk === 'string' &&
typeof encoding === 'string' &&
encoding !== 'utf8') {
chunk = Buffer.from(chunk, encoding).toString()
} else if (Buffer.isBuffer(chunk))
chunk = chunk.toString()
}
if (typeof encoding === 'function') {
cb = encoding
}
const jsonData = (this._jsonBuffer + chunk).split('\n')
this._jsonBuffer = jsonData.pop()
for (let i = 0; i < jsonData.length; i++) {
try {
// JSON.parse can throw, emit an error on that
super.write(JSON.parse(jsonData[i]))
} catch (er) {
this.emit('error', er)
continue
}
}
if (cb)
cb()
}
}
```
| PypiClean |
/FFTA-0.3.5.1-py3-none-any.whl/ffta/pixel_utils/fitting.py | import numpy as np
from scipy.optimize import minimize
'''
Fit Equations
'''
def ddho_freq_product(t, A, tau1, tau2):
'''
Uses a product of exponentials as the functional form
:param t:
:type t:
:param A:
:type A:
:param tau1:
:type tau1:
:param tau2:
:type tau2:
:returns:
:rtype:
'''
decay = np.exp(-t / tau1) - 1
relaxation = -1 * np.exp(-t / tau2)
return A * decay * relaxation
def ddho_freq_sum(t, A1, A2, tau1, tau2):
'''
Uses a sum of exponentials as the functional form
:param t:
:type t:
:param A1:
:type A1:
:param A2:
:type A2:
:param tau1:
:type tau1:
:param tau2:
:type tau2:
:returns:
:rtype:
'''
decay = np.exp(-t / tau1) - 1
relaxation = -1 * np.exp(-t / tau2)
return A1 * decay + A2 * relaxation
def cut_exp(t, A, y0, tau):
'''
Uses a single exponential for the case of no drive
:param t:
:type t:
:param A:
:type A:
:param y0:
:type y0:
:param tau:
:type tau:
:returns:
:rtype:
'''
return y0 + A * np.exp(-t / tau)
def ddho_phase(t, A, tau1, tau2):
"""
:param t:
:type t:
:param A:
:type A:
:param tau1:
:type tau1:
:param tau2:
:type tau2:
:returns:
:rtype:
"""
prefactor = tau2 / (tau1 + tau2)
return A * tau1 * np.exp(-t / tau1) * (-1 + prefactor * np.exp(-t / tau2)) + A * tau1 * (1 - prefactor)
'''
Fit functions
Product: product of two exponential functions (default)
Sum: sum of two exponential functions
Exp: Single exponential decay
Ringdown: Same as Exp but with different bounds
Phase: integrated product of two exponential functions
'''
def fit_product(Q, drive_freq, t, inst_freq):
"""
:param Q:
:type Q:
:param drive_freq:
:type drive_freq:
:param t:
:type t:
:param inst_freq:
:type inst_freq:
:returns:
:rtype:
"""
# Initial guess for relaxation constant.
inv_beta = Q / (np.pi * drive_freq)
# Cost function to minimize.
cost = lambda p: np.sum((ddho_freq_product(t, *p) - inst_freq) ** 2)
# bounded optimization using scipy.minimize
pinit = [inst_freq.min(), 1e-4, inv_beta]
popt = minimize(cost, pinit, method='TNC', options={'disp': False},
bounds=[(-10000, -1.0),
(5e-7, 0.1),
(1e-5, 0.1)])
return popt.x
def fit_product_unbound(Q, drive_freq, t, inst_freq):
"""
Fit without any bound constraints
:param Q:
:type Q:
:param drive_freq:
:type drive_freq:
:param t:
:type t:
:param inst_freq:
:type inst_freq:
:returns:
:rtype:
"""
# Initial guess for relaxation constant.
inv_beta = Q / (np.pi * drive_freq)
# Cost function to minimize.
cost = lambda p: np.sum((ddho_freq_product(t, *p) - inst_freq) ** 2)
# bounded optimization using scipy.minimize
pinit = [inst_freq.min(), 1e-4, inv_beta]
popt = minimize(cost, pinit, method='TNC', options={'disp': False})
return popt.x
def fit_sum(Q, drive_freq, t, inst_freq):
"""
Fit without any bound constraints
:param Q:
:type Q:
:param drive_freq:
:type drive_freq:
:param t:
:type t:
:param inst_freq:
:type inst_freq:
:returns:
:rtype:
"""
# Initial guess for relaxation constant.
inv_beta = Q / (np.pi * drive_freq)
# Cost function to minimize.
cost = lambda p: np.sum((ddho_freq_sum(t, *p) - inst_freq) ** 2)
# bounded optimization using scipy.minimize
pinit = [inst_freq.min(), inst_freq.min(), 1e-4, inv_beta]
popt = minimize(cost, pinit, method='TNC', options={'disp': False},
bounds=[(-10000, -1.0),
(-10000, -1.0),
(5e-7, 0.1),
(1e-5, 0.1)])
return popt.x
def fit_exp(t, inst_freq):
"""
:param t:
:type t:
:param inst_freq:
:type inst_freq:
:return:
:rtype:
"""
# Cost function to minimize.
cost = lambda p: np.sum((cut_exp(t, *p) - inst_freq) ** 2)
pinit = [inst_freq.max() - inst_freq.min(), inst_freq.min(), 1e-4]
# popt = minimize(cost, pinit, method='TNC', options={'disp': False},
# bounds=[(1e-5, 1000),
# (np.abs(inst_freq.min()) * -2, np.abs(inst_freq.max()) * 2),
# (1e-6, 0.1)])
popt = minimize(cost, pinit, method='TNC', options={'disp': False})
return popt.x
def fit_ringdown(t, cut):
"""
:param t:
:type t:
:param cut:
:type cut:
:returns:
:rtype:
"""
# Cost function to minimize. Faster than normal scipy optimize or lmfit
cost = lambda p: np.sum((cut_exp(t, *p) - cut) ** 2)
pinit = [cut.max() - cut.min(), cut.min(), 1e-4]
popt = minimize(cost, pinit, method='TNC', options={'disp': False},
bounds=[(0, 5 * (cut.max() - cut.min())),
(0, cut.min()),
(1e-8, 1)])
return popt.x
def fit_phase(Q, drive_freq, t, phase):
"""
:param Q:
:type Q:
:param drive_freq:
:type drive_freq:
:param t:
:type t:
:param phase:
:type phase:
:returns:
:rtype:
"""
# Initial guess for relaxation constant.
inv_beta = Q / (np.pi * drive_freq)
# Cost function to minimize.
cost = lambda p: np.sum((ddho_phase(t, *p) - phase) ** 2)
# bounded optimization using scipy.minimize
pinit = [phase.max() - phase.min(), 1e-4, inv_beta]
maxamp = phase[-1] / (1e-4 * (1 - inv_beta / (inv_beta + 1e-4)))
popt = minimize(cost, pinit, method='TNC', options={'disp': False},
bounds=[(0, 5 * maxamp),
(5e-7, 0.1),
(1e-5, 0.1)])
return popt.x | PypiClean |
/Cacophony-0.1.tar.gz/Cacophony-0.1/flask_cacophony/slack.py | import json
import uuid
import weakref
from .Cacophony import Command
from flask import request, make_response, abort
class Cacophony(object):
def __init__(self, cacophony, app=None):
self.commands = {}
self.app = app
self.cacophony = cacophony
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault('CACOPHONY_SLACK_TOKEN', str(uuid.uuid4()))
app.config.setdefault('CACOPHONY_SLACK_EVENTS', '/slack/events')
if app.config.get('CACOPHONY_SLACK_VERIFICATION_TOKEN') is None:
raise ValueError('CACOPHONY_SLACK_VERIFICATION_TOKEN is not set')
app.add_url_rule(app.config['CACOPHONY_SLACK_EVENTS'], view_func=self.incoming_event, methods=['POST', ])
def command(self, name):
def decorator(f):
self.commands[name] = weakref.ref(f)
print(f)
return f
return decorator
def incoming_event(self):
# Parse the request payload into JSON
if not request.data:
abort(404)
event_data = json.loads(request.data.decode('utf-8'))
# Echo the URL verification challenge code
if "challenge" in event_data:
return make_response(
event_data.get("challenge"), 200, {"content_type": "application/json"}
)
# Parse the Event payload and emit the event to the event listener
if "event" in event_data:
# Verify the request token
request_token = event_data.get("token")
if self.app.config['CACOPHONY_SLACK_VERIFICATION_TOKEN'] != request_token:
message = "Request contains invalid Slack verification token"
return make_response(message, 403)
event_type = event_data["event"]["type"]
# request.environ["appenlight.username"] = event_data['team_id']
if event_type == 'message' and event_data['event']['type'] == 'message':
if 'subtype' in event_data["event"]:
return make_response("", 200)
self.app.logger.info(event_data)
command_data = event_data['event']['text'].strip()
command_name = command_data.split(' ', 2)[0]
command = Command(command_name, command_data, 'slack', event_data['team_id']) # eh
return self.cacophony.event(command)
return make_response("", 200) | PypiClean |
/Gemtools-1.7.1.tar.gz/Gemtools-1.7.1/python/gem/reports.py |
from string import Template
import errno
import json
import locale
import os
import re
import shutil
import zipfile
#import matplotlib stuff
__plotlib_avail = False
try:
from pylab import *
from matplotlib.ticker import FuncFormatter
__plotlib_avail = True
except Exception, e:
pass
## set locale to get thousands separator easily in 2.6
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
## default colors
__colors = ['#3182bd', '#6baed6', '#9ecae1', '#c6dbef', '#e6550d', '#fd8d3c']
__default_color = __colors[1]
__default_template = '''
<html>
<head>
<link rel="stylesheet" type="text/css" href="style.css">
<style type="text/css">
/*RESET DEFAULTS*/
html, body, div, span, applet, object, iframe,
h1, h2, h3, h4, h5, h6, p, blockquote, pre,
a, abbr, acronym, address, big, cite, code,
del, dfn, em, font, ins, kbd, q, s, samp,
small, strike, strong, sub, sup, tt, var,
dl, dt, dd, ol, ul, li,
fieldset, form, label, legend,
table, caption, tbody, tfoot, thead, tr, th, td {
border: 0;
font-family: inherit;
font-size: 100%;
font-style: inherit;
font-weight: inherit;
margin: 0;
padding: 0;
vertical-align: baseline;
}
:focus { /* remember to define focus styles! */
outline: 0;
}
body, input, textarea {
color: #373737;
font: 15px "Helvetica Neue", Helvetica, Arial, sans-serif;
font-weight: 300;
line-height: 1.625;
}
body {
background: #FFF;
}
/* Alignment */
.alignleft {
display: inline;
float: left;
margin-right: 2em;
}
.alignright {
display: inline;
float: right;
}
.aligncenter {
clear: both;
display: block;
margin-left: auto;
margin-right: auto;
}
strong {
font-weight: bold;
}
a {
color: #1C231C;
text-decoration: none;
}
a:focus,
a:active,
a:hover {
text-decoration: underline;
}
.page{
margin: 2em;
}
/*HEADLINES*/
.header{
border-bottom: 1px solid #DDD;
}
.header h1{
font-size: 2.5em;
text-align: center;
}
h1{
font-size: 1.6em;
text-decoration: underline;
}
.data{
border-bottom: 1px solid #DDD;
}
/*PLOTS*/
.plot{
width: 60em;
}
.general_plot{
width: 40em;
margin: -35px 0 0 0;
}
.transitions_plot{
width: 30em;
}
.transitions_1context_plot{
width: 35em;
}
</style>
</head>
<body>
<div class="page">
<div class="header">
<h1>${name} mapping stats</h1>
</div>
<div class="general data">
<h1>General Stats</h1>
<img src="general.png" class="general_plot plot"/>
<div class="alignleft">
<table>
<tr>
<td>#Reads</td>
<td>${reads}</td>
</tr>
<tr>
<td>Reads length (min, avg, max)</td>
<td>${min}, ${avg}, ${max}</td>
</tr>
<tr>
<td>Reads Mapped</td>
<td>${mapped} (${mapped_p})</td>
</tr>
<tr>
<td>#Alignments</td>
<td>${alignments}</td>
</tr>
<tr>
<td>#Maps</td>
<td>${maps} (${maps_p} map/alg)</td>
</tr>
</table>
</div>
</div>
<div class="errorprofile data">
<h1>Error Profile</h1>
<img src="error_profile.png" class="errors_plot plot"/>
</div>
<div class="ranges data">
<h1>Ranges</h1>
<img src="ranges.png" class="ranges_plot plot"/>
</div>
<div class="transitions data">
<h1>Transitions</h1>
<div>
<img src="transitions.png" class="transitions_plot plot"/>
</div>
<div>
<img src="transitions_1context.png" class="transitions_1context_plot plot"/>
</div>
</div>
<div class="junctions data">
<h1>Junctions Profile</h1>
<img src="junctions_profile.png" class="junctions_plot plot"/>
</div>
</div>
</body>
</html>
'''
def __zipfolder(foldername, target_dir):
zipobj = zipfile.ZipFile(foldername + '.zip', 'w', zipfile.ZIP_DEFLATED)
rootlen = len(target_dir) + 1
bd = os.path.basename(target_dir)
for base, dirs, files in os.walk(target_dir):
for file in files:
fn = os.path.join(base, file)
zipobj.write(fn, os.path.join(bd, fn[rootlen:]))
# the tick formatter for percentage and reads
def __percent_format(x, pos=0):
"""Tick formatter to render as
percentage
"""
return '%1.0f%%' % (100 * x)
def __reads_format(x, pos=0):
"""Render number of reads with thousands
separator
"""
return locale.format("%.0f", x, True)
def write_general_stats(data, out_dir, paired=True):
"""Generate general stats plot and
save it to the given out_dir
data -- the stats data
out_dir -- the output directory
paired -- paired reads
"""
num_blocks = (float)(data["num_blocks"])
num_split_maps = data["splits_profile"]["num_mapped_with_splitmaps"]
num_mapped = data["num_mapped"]
if paired:
num_blocks = num_blocks / 2
num_unmapped = num_blocks - num_mapped
total = (float)(num_mapped + num_unmapped)
fig = plt.figure(figsize=(10, 10))
ax1 = fig.add_subplot(111)
ax1.tick_params(top=False, bottom=False)
plt.xticks(visible=False)
plt.xlim([0, 3])
ax1.bar([1.5], num_mapped + num_unmapped, 1, color=__colors[5], label="Unmapped")
ax1.bar([1.5], num_mapped, 1, color=__colors[1], label="Mapped")
ax1.bar([1.5], num_split_maps, 1, color=__colors[0], label="Split-Mapped")
ax1.yaxis.set_major_formatter(FuncFormatter(__reads_format))
ax1.set_ylabel("Reads")
ax2 = ax1.twinx()
ax2.set_ylabel("Percent")
ax2.yaxis.set_major_formatter(FuncFormatter(__percent_format))
# legend
lgd = ax1.legend(loc='lower center', bbox_to_anchor=(0.5, -0.1), ncol=3)
# add descriptor lines
percent = (num_split_maps / total)
ax1.axhline(y=num_split_maps, color=__colors[0])
ax1.text(0.1, num_split_maps, "Split-Maps %s (%.1f%%)" % (locale.format("%.0f", num_split_maps, True), (percent * 100.0)), verticalalignment='bottom')
percent = (num_mapped / total)
ax1.axhline(y=num_mapped, color=__colors[1])
ax1.text(0.1, num_mapped, "Mapped %s (%.1f%%)" % (locale.format("%.0f", num_mapped, True), (percent * 100.0)), verticalalignment='top')
percent = (num_unmapped / total)
ax1.axhline(y=total, color=__colors[5])
ax1.text(0.1, total, "Unmapped %s (%.1f%%)" % (locale.format("%.0f", num_unmapped, True), (percent * 100.0)), verticalalignment='top')
fig.savefig('%s/general.png' % (out_dir), bbox='tight')
def write_mmaps_and_uniq_ranges(data, out_dir):
fig = plt.figure(figsize=(20, 15))
# get data, add to list and transform to percent
mmap_ranges_values = data["mmap"]
alignments = (float)(data["num_alignments"])
rest = [alignments - sum(mmap_ranges_values)]
[rest.append(d) for d in mmap_ranges_values]
rest = [(d / alignments) * 100.0 for d in rest]
subplots_adjust(hspace=0.3)
## mmap ranges plot
subplot2grid((3, 2), (0, 0))
grid(True)
bar(xrange(9), rest, color=__default_color, align="center")
plt.xticks(xrange(10), ("0", "1", "(1,5]", "(5,10]", "(10,50]", "(50,100]", "(100,500]", "(500,1000]", "(1000,inf]"), rotation=45, )
ylim([0, 100])
title("Multi-Map Ranges")
xlabel("Ranges")
ylabel("% Alignments")
# get data and transform to percentage
uniq_ranges_values = data["uniq"]
rest = [uniq_ranges_values[-1]]
[rest.append(d) for d in uniq_ranges_values[:7]]
alignments = (float)(sum(rest))
rest = [(d / alignments) * 100.0 for d in rest]
# plot
## unique ranges
subplot2grid((3, 2), (0, 1))
grid(True)
ylim([0, 100])
bar(xrange(8), rest, color=__default_color, align="center")
xticks(xrange(8), ("X", "0", "1", "2", "3", "(3,10]", "(10,50]", "(50,inf]"), rotation=45, )
title("Unique Ranges")
xlabel("Ranges")
ylabel("% Alignments")
subplot2grid((3, 2), (1, 0), colspan=2)
inss = data["maps_profile"]["inss"]
labels = ["(-inf, 0)", "(-100, 0)", "(0, 100]", "(100, 200]", "(200, 300]", "(300, 400]", "(400, 500]", "(500, 600]", "(600, 700]", "(700, 800]", "(800, 900]", "(900, 1000]", "(1000, 2000]", "(2000, 5000]", "(5000, 10000]", "(10000, inf]"]
num_maps = (float)(data["num_maps"])
rest = [(d / num_maps) * 100.0 for d in inss]
grid(True)
ylim([0, 100])
bar(xrange(16), rest, color=__default_color)
xticks(xrange(16), labels, rotation=45, )
title("Insert sizes")
xlabel("Ranges")
ylabel("% Alignments")
subplot2grid((3, 2), (2, 0), colspan=2)
inss = data["maps_profile"]["inss_fine_grain"]
num_maps = (float)(data["num_maps"])
rest = [(d / num_maps) * 100.0 for d in inss]
grid(True)
xlim([-1000, 10000])
bar(xrange(-1000, 10000, 10), rest, color=__default_color)
# labels = ["<1000"]
# ticks = [0]
# for x in range(-900, 10000, 100):
# labels.append(str(x))
# ticks.append(x + 901)
# xticks(ticks, labels, rotation=0, )
title("Insert sizes (fine grained)")
xlabel("Ranges")
ylabel("% Alignments")
fig.savefig('%s/ranges.png' % (out_dir), bbox_inches='tight')
def write_error_profiles(data, out_dir, offset=33):
# ## error profiles
def plot_e_profile(da, _title, xlab="% Errors", ylab="% Alignments"):
labels = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "(10,20]", "(20,50]", "(50,100]")
num_maps = (float)(data["num_maps"])
rest = da
rest = [(d / num_maps) * 100.0 for d in rest]
grid(True)
bar(xrange(14), rest, color=__default_color)
xticks(xrange(14), labels[:len(rest)], rotation=45, )
title(_title)
xlabel(xlab)
ylabel(ylab)
ylim([0, 100])
xlim([0, len(rest)])
figure(figsize=(15, 20))
subplots_adjust(hspace=0.5)
subplot2grid((4, 2), (0, 0))
plot_e_profile(data["maps_profile"]["mismatches"], "Mismatch Profile")
subplot2grid((4, 2), (0, 1))
plot_e_profile(data["maps_profile"]["insertion_length"], "Insertion Profile")
subplot2grid((4, 2), (1, 0))
plot_e_profile(data["maps_profile"]["deletion_length"], "Deletion Profile")
subplot2grid((4, 2), (1, 1))
plot_e_profile(data["maps_profile"]["levenshtein"], "Levenshtein Profile")
# plot errors and mismatches
subplot2grid((4, 2), (2, 0), colspan=2)
max_len = 41
total = (float)(data["maps_profile"]["total_errors_events"])
da = data["maps_profile"]["qual_score_errors"][offset:offset + max_len]
da = [(d / total) * 100.0 for d in da]
plot(da, color="#FF5533", label="Errors")
fill_between(range(max_len), da[:max_len], color="#FF5533", alpha=0.5)
total = (float)(data["maps_profile"]["total_mismatches"])
da = data["maps_profile"]["qual_score_misms"][offset:offset + max_len]
da = [(d / total) * 100.0 for d in da]
plot(da, color=__default_color, label="Mismatches")
fill_between(range(max_len), da[:max_len], color=__default_color, alpha=0.5)
ylim(bottom=0)
title("Quality Errors/Mismatches Profile")
xlabel("Quality Score")
ylabel("Errors/Mismatches")
legend(loc="upper left")
subplot2grid((4, 2), (3, 0), colspan=2)
max_len = data["max_length"]
error_events = (float)(data["maps_profile"]["total_errors_events"])
da = data["maps_profile"]["error_position"]
da = [(d / error_events) * 100.0 for d in da]
plot(da[:max_len], color=__default_color)
fill_between(range(max_len), da[:max_len], color=__default_color, alpha=0.5)
ylim(bottom=0)
xlim([0, max_len])
grid(True)
title("Error events")
xlabel("Position")
ylabel("% Alignments")
savefig('%s/error_profile.png' % (out_dir), bbox_inches='tight')
def __exclude_zero(data, labels, delta=0.1):
"""Exclude values <= delta from data and labels
"""
d = []
l = []
for i, x in enumerate(data):
if x > delta:
d.append(x)
l.append(labels[i])
return (d, l)
def write_junctions_profile(data, out_dir):
# ## junctions
sp = data["splits_profile"]
max_len = data["max_length"]
total_junctions = (float)(sp["total_junctions"])
if total_junctions == 0:
total_junctions = 1.0
figure(figsize=(20, 12))
#subplots_adjust( hspace=0, wspace=0 )
subplot2grid((2, 3), (0, 0))
da = [(d / total_junctions) * 100.0 for d in sp["num_junctions"]]
labels = ["[1]", "[2]", "[3]", "(3, inf)"]
da, labels = __exclude_zero(da, labels)
pie(da, labels=labels, autopct="%1.1f%%", shadow=False, colors=__colors)
title("Number of Junctions")
subplot2grid((2, 3), (0, 1))
da = [(d / total_junctions) * 100.0 for d in sp["length_junctions"]]
labels = ["[0,100]", "(100, 1000]", "(1000, 5000]", "(5000, 10000]", "(10000, 50000]", "(50000, inf)"]
da, labels = __exclude_zero(da, labels)
pie(da, labels=labels, autopct="%1.1f%%", shadow=False, colors=__colors)
title("Junction Lengths")
subplot2grid((2, 3), (0, 2))
pe = [sp["pe_rm_rm"], sp["pe_sm_rm"], sp["pe_sm_sm"]]
sum_pe = (float)(sum(pe))
if sum_pe == 0:
sum_pe = 1.0
da = [(d / sum_pe) * 100.0 for d in pe]
labels = ["RM+RM", "SM+RM", "SM+SM"]
da, labels = __exclude_zero(da, labels)
pie(da, labels=labels, autopct="%1.1f%%", shadow=False, colors=__colors)
title("Pair combinations")
subplot2grid((2, 3), (1, 0), colspan=3)
da = sp["junction_position"]
da = [(d / total_junctions) * 100.0 for d in da]
max_len = min(max_len, len(da))
plot(da[:max_len], color="black")
fill_between(range(max_len), da[:max_len], color=__default_color)
xlim([0, max_len])
ylim([0, max(da) + 1])
grid(True)
title("Junction Positions")
xlabel("Position")
ylabel("% Junctions")
savefig('%s/junctions_profile.png' % (out_dir), bbox_inches='tight')
def write_transitions(data, out_dir):
figure(figsize=(10, 10))
# transisitons
total = (float)(data["maps_profile"]["total_mismatches"])
da = data["maps_profile"]["misms_transition"][:5 * 5]
da = [(d / total) * 100.0 for d in da]
da = array([da[5 * x:5 * x + 5] for x in range(5)])
column_labels = list('ACGTN')
row_labels = list('ACGTN')
subplot2grid((2, 1), (0, 0))
ax = gca()
pcolor(da, cmap=plt.cm.Blues, edgecolors="black")
colorbar()
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(da.shape[0]) + 0.5, minor=False)
ax.set_yticks(np.arange(da.shape[1]) + 0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(row_labels, minor=False, family='monospace')
ax.set_yticklabels(column_labels, minor=False, family='monospace')
tick_params(top=False, left=False, right=False)
for x in range(5):
for y in range(5):
text(0.5 + x, 0.5 + y, "%.2f%%" % (da[y, x]), horizontalalignment='center', verticalalignment='center')
ylabel("Transitions")
savefig('%s/transitions.png' % (out_dir), bbox_inches='tight')
# context 1 transitions
raw = data["maps_profile"]["misms_1context"]
raw = [(d / total) * 100.0 for d in raw]
da = []
def __get_index(a, b, c, i):
return ((((a * 5 + b) * 5) + c) * 5 + i)
for b in range(4):
for a in range(4):
for c in range(4):
for i in range(5):
da.append(raw[__get_index(a, b, c, i)])
da = array([da[5 * x:5 * x + 5] for x in range(4 * 4 * 4)])
row_labels = list('ACGTN')
column_labels = list([a + b + c for b in "ACGT" for a in "ACGT" for c in "ACGT"])
figure(figsize=(10, 30))
pcolor(da, cmap=plt.cm.Blues, edgecolors="black")
colorbar()
ax = gca()
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(da.shape[1]) + 0.5, minor=False)
ax.set_yticks(np.arange(da.shape[0]) + 0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
tick_params(top=False, left=False, right=False)
ax.set_xticklabels(row_labels, minor=False, family='monospace')
ax.set_yticklabels(column_labels, minor=False, family='monospace')
for x in range(5):
for y in range((4 * 4 * 4)):
text(0.5 + x, 0.5 + y, "%.2f%%" % (da[y, x]), horizontalalignment='center', verticalalignment='center')
ylabel("Transitions")
savefig('%s/transitions_1context.png' % (out_dir), bbox_inches='tight')
def write_template(data, out, paired=True, name=None):
if paired:
avg_length = data["total_bases_aligned"] / float(data["num_mapped"] * 2)
else:
avg_length = data["total_bases_aligned"] / float(data["num_mapped"])
num_blocks = (float)(data["num_blocks"])
num_split_maps = data["splits_profile"]["num_mapped_with_splitmaps"]
num_mapped = data["num_mapped"]
if paired:
num_blocks = num_blocks / 2
num_unmapped = num_blocks - num_mapped
total = (float)(num_mapped + num_unmapped)
if name is None:
name = os.path.basename(out)
tmpl = Template(__default_template).safe_substitute({
"name": name,
"reads": data["num_blocks"],
"min": data["mapped_min_length"],
"max": data["mapped_max_length"],
"avg": "%.0f" % avg_length,
"mapped": data["num_mapped"],
"mapped_p": "%.2f%%" % ((num_mapped / total) * 100.0),
"alignments": data["num_blocks"],
"maps": data["num_maps"],
"maps_p": "%.3f" % (data["num_maps"] / float(data["num_mapped"])),
})
with open("%s/index.html" % (out), 'w') as f:
f.write(tmpl)
def create_report(input_file, output_name, paired=True, extract=False, name=None):
"""Create a stats report from a json stats file and store it in a zip
file using the given output name.
Parameters
----------
input_file: string or file handle
The input file either as a string pointing to the json report file
or as an open readable stream.
output_name: stirng
The output prefix is used to create a directory that hosts the
html report
paired: bool
Set this to false if the input is single end
extract: bool
Set this to true to keep the directory next to the zip file
name:
Name of the dataset
"""
if not __plotlib_avail:
raise Exception("""
Matplotlib could not be imported. We need the matplotlib library
to render the stats report!
Please install matplotlib and its dependencies. For example:
pip install numpy
pip install matplotlib
""")
if input_file is None:
raise ValueError("No input file specified")
if output_name is None:
raise ValueError("No output name specified")
# load input data
of = None
if isinstance(input_file, basestring):
of = open(input_file, 'r')
data = json.load(of)
of.close()
# guess name
if name is None:
m = re.match("(.*)(\.stats\.(all|best)\.json$)", input_file)
if m:
name = m.group(1)
else:
idx = input_file.rfind(".")
if idx > 0:
name = input_file[:idx]
# create output directory
try:
os.makedirs(output_name)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(output_name):
pass
else:
raise
# create plots
write_general_stats(data, output_name, paired=paired)
write_mmaps_and_uniq_ranges(data, output_name)
write_error_profiles(data, output_name)
write_junctions_profile(data, output_name)
write_transitions(data, output_name)
# print the data to the folder
with open("%s/stats.json" % output_name, 'w') as of:
json.dump(data, of, indent=2)
# write the html template
write_template(data, output_name, paired=paired, name=name)
# zip the folder
__zipfolder(output_name, output_name)
# remove folder
if not extract:
shutil.rmtree(output_name) | PypiClean |
/LFake-18.9.0.tar.gz/LFake-18.9.0/lfake/providers/address/hi_IN/__init__.py | from .. import Provider as AddressProvider
class Provider(AddressProvider):
city_formats = ("{{city_name}}",)
street_name_formats = (
"{{first_name}} {{last_name}}",
"{{last_name}}",
)
street_address_formats = ("{{building_number}} {{street_name}}",)
address_formats = (
"{{street_address}}\n{{city}} {{postcode}}",
"{{street_address}}\n{{city}}-{{postcode}}",
)
building_number_formats = (
"####",
"###",
"##",
"#",
"#/#",
"##/##",
"##/###",
"##/####",
)
postcode_formats = ("######",)
cities = (
"आदिलाबाद",
"अगरतला",
"अहमदाबाद",
"अहमदनगर",
"अजमेर",
"अम्बाजी",
"अमरपुर",
"इलाहाबाद",
"अकोला",
"अखनूर",
"अन्तर्गत",
"अलांग",
"अलीगढ",
"दादरा और नगर हवेली",
"अमरावती",
"अमरोहा",
"अनन्तपुर",
"करना",
"जिससेबेलारी",
"अनंतनाग",
"भागलपुर",
"भद्रक",
"बचेली",
"बहादुरगंज",
"बहादुरगढ",
"चिरमिरी",
"चिराला",
"चित्रदुर्ग",
"चित्तूर",
"चित्रकूट",
"देवगढ़",
"दालखोला",
"देवास",
"चंडीगढ",
"चिपलुन",
"चक्रधरपुर",
"चंबा",
"फतहपुर",
"फतेहपुर",
"फतेहगढ",
"सभापतिने",
"देवगढ़",
"धर्मापुरी",
"पाकाला",
"धारवाड",
"असम",
"देहरा",
"रानीताल",
"खडगपुर",
"मोकामा",
"मोकोकचुंग",
"जिलोंपर",
"विस्तारण",
"मोतिहारी",
"लखनऊ",
"मुंबई",
"हैदराबाद",
)
states = (
"अरूणाचल प्रदेश",
"बिहार",
"असम",
"आंध्र प्रदेश",
"छत्तीसगढ",
"हरियाणा",
"गुजरात",
"हिमाचल प्रदेश",
"गोवा",
"मध्य प्रदेश",
"महाराष्ट्र",
"जम्मू और कश्मीर",
"केरल",
"कर्नाटक",
"मणिपुर",
"मिजोरम",
"मेघालय",
"सिक्किम",
"राजस्थान",
"पंजाब",
"उडीसा",
"उत्तरांचल",
"उत्तर प्रदेश",
"तमिलनाडु",
"त्रिपुरा",
"पश्चिमी बंगाल",
"अंडमान और निकोबार",
"दमन और दीव",
"दादरा और नगर हवेली",
"दिल्ली",
"पांडिचेरी",
"लक्षद्वीप",
)
countries = (
"आर्मीनिया",
"यू.के.",
"फ्रांस",
"फलस्तीन",
"मिस्र",
"ब्राज़ील",
"ईरान",
"यूनान",
"स्पेन",
"जॉर्जिया",
"लेबनान",
"सायप्रस",
"सीरिया",
"कनाडा",
"रूस",
"संयुक्त राज्य अमरीका",
"नेदर्लान्ड",
"ऑस्ट्रेलिया",
"एंटीगुआ",
"बार्बुडा",
"ऑस्ट्रिया",
"अज़रबाइजान",
"बारबाडोस",
"बेलारूस",
"बेल्जियम",
"बेलीज़",
"बेनिन",
"बहामास",
"बहरीन",
"बांग्लादेश",
"भूटान",
"बोलिविया",
"बोस्निया",
"हर्जेगोविना",
"बोत्सवाना",
"ब्रुनेई",
"बुल्गारिया",
"बुर्किना फ़ासो",
"बर्मा",
"बुरूंडी",
"डोमिनिकन रिपब्लिक",
"गिनिया",
"टीमोर",
"फ़िनलैंड",
"गेबोन",
"गाम्बिया",
"जर्मनी",
"ग्रेनेडा",
"घाना",
"ग्रेट ब्रिटेन",
"हंगरी",
"भारत",
"हिन्दुस्तान",
"इराक",
"आयरलैंड",
"इंडोनेशिया",
"इटली",
"जमैका",
"जॉर्डन",
"जापान",
"क़जाख़स्तान",
"केन्या",
"किरिबाती",
"दक्षिण कोरिया",
"लातविया",
"लाओस",
"उत्तर कोरिया",
"कोसोवो",
"कुवैत",
"लेबनान",
"लिचटीनस्टीन",
"लिथुआनिया",
"लक्समबर्ग",
"लीबिया",
"लाइबेरिया",
"लेसोथो",
"नेपाल",
"न्यूज़ीलैण्ड",
"निकारागुआ",
"नाइजर",
"नाउरू",
"सेंट लुसिया",
"रोमानिया",
"अरब अमीरात",
"यूएई",
"युगांडा",
"यूक्रेन",
"उरूग्वे",
"उज़बेकिस्तान",
"यूनाइटेड किंगडम",
"वानुआतू",
"वेटिकन सिटी",
"वेनेजुएला",
"पश्चिमी सहारा",
"वियतनाम",
"यमन",
"ज़ायर",
"ज़ाम्बिया",
"ज़िम्बाब्वे",
"पाकिस्तान",
"सउदी अरब",
"ओमान",
"क़तर",
"ट्यूनीशिया",
"मोरक्को",
"तुर्की",
"श्रीलंका",
"अफ़ग़ानिस्तान",
)
def city_name(self) -> str:
return self.random_element(self.cities)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit | PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/djblets/cache/serials.py | import importlib
import itertools
import logging
import os
from django.conf import settings
logger = logging.getLogger(__name__)
def generate_media_serial():
"""Generate a media serial number for static media files.
The media serial number can be appended to a media filename in order to
make a URL that can be cached forever without fear of change. The next
time the file is updated and the server is restarted, a new path will be
accessed and cached.
This will crawl the media files (using directories in
:setting:`MEDIA_SERIAL_DIRS` if specified, or all of
:django:setting:`STATIC_ROOT` otherwise), figuring out the latest
timestamp, and return that value.
"""
MEDIA_SERIAL = getattr(settings, "MEDIA_SERIAL", 0)
if not MEDIA_SERIAL:
media_dirs = getattr(settings, "MEDIA_SERIAL_DIRS", ["."])
for media_dir in media_dirs:
media_path = os.path.join(settings.STATIC_ROOT, media_dir)
for root, dirs, files in os.walk(media_path):
for name in files:
mtime = int(os.stat(os.path.join(root, name)).st_mtime)
if mtime > MEDIA_SERIAL:
MEDIA_SERIAL = mtime
setattr(settings, "MEDIA_SERIAL", MEDIA_SERIAL)
def generate_ajax_serial():
"""Generate a template-based AJAX serial number for requests and ETags.
The serial number can be appended to filenames involving dynamic loads of
URLs in order to make a URL that can be cached forever without fear of
change.
This will crawl the template files (using directories in
:django:setting:`TEMPLATE_DIRS`), figuring out the latest timestamp, and
return that value.
"""
AJAX_SERIAL = getattr(settings, "AJAX_SERIAL", 0)
if not AJAX_SERIAL:
template_dirs = itertools.chain.from_iterable(
template_settings.get('DIRS', [])
for template_settings in getattr(settings, 'TEMPLATES', [])
)
for template_path in template_dirs:
for root, dirs, files in os.walk(template_path):
for name in files:
mtime = int(os.stat(os.path.join(root, name)).st_mtime)
if mtime > AJAX_SERIAL:
AJAX_SERIAL = mtime
setattr(settings, "AJAX_SERIAL", AJAX_SERIAL)
def generate_locale_serial(packages):
"""Generate a locale serial for the given set of packages.
This will be equal to the most recent mtime of all the .mo files that
contribute to the localization of the given packages.
Unlike the other serial-generation functions, this will return the
value, rather than setting it on ``settings``.
Args:
packages (list of unicode):
A list of Python module paths containing :file:`locale`
directories.
Returns:
int: The resulting serial number.
"""
serial = 0
paths = []
for package in packages:
try:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(p.__file__), 'locale')
paths.append(path)
except Exception as e:
logger.exception(
'Failed to import package %s to compute locale serial: %s',
package, e)
for locale_path in paths:
for root, dirs, files in os.walk(locale_path):
for name in files:
if name.endswith('.mo'):
mtime = int(os.stat(os.path.join(root, name)).st_mtime)
if mtime > serial:
serial = mtime
return serial
def generate_cache_serials():
"""Generate both static media and AJAX serial numbers.
This is a wrapper around :py:func:`generate_media_serial` and
:py:func:`generate_ajax_serial`, which generates all the serial numbers
in one go.
This should be called early in the startup, such as in the site's
main :file:`urls.py`.
"""
generate_media_serial()
generate_ajax_serial() | PypiClean |
/OGRE-embed-0.0.3.tar.gz/OGRE-embed-0.0.3/src/OGRE/evaluation_tasks/link_prediction.py | try:
import cPickle as pickle
except:
import pickle
from numpy import linalg as LA
from sklearn import model_selection as sk_ms
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import f1_score, accuracy_score, roc_auc_score
from sklearn.multiclass import OneVsRestClassifier as oneVr
from sklearn.linear_model import LogisticRegression as lr
import random
from .eval_utils import *
def choose_true_edges(edges, K):
"""
Randomly choose a fixed number of existing edges
:param edges: The graph's edges
:param K: Fixed number of edges to choose
:return: A list of K true edges
"""
indexes = random.sample(list(range(1, len(edges))), K)
true_edges = []
for i in indexes:
true_edges.append(edges[i])
return true_edges
def choose_false_edges(non_edges, K):
"""
Randomly choose a fixed number of non-existing edges
:param non_edges: Edges that are not in the graph
:param K: Fixed number of edges to choose
:return: A list of K false edges
"""
indexes = random.sample(range(1, len(non_edges)), K)
false_edges = []
for i in indexes:
false_edges.append(non_edges[i])
return false_edges
def calculate_classifier_value(dict_projections, true_edges, false_edges, K, mapping=None):
"""
Create X and Y for Logistic Regression Classifier.
:param dict_projections: A dictionary of all nodes emnbeddings, where keys==nodes and values==embeddings
:param true_edges: A list of K false edges
:param false_edges: A list of K false edges
:param K: Fixed number of edges to choose
:param mapping: Only for yelp dataset, ignore otherwise
:return: X - The feature matrix for logistic regression classifier. Its size is 2K,1 and the the i'th row is the
norm score calculated for each edge, as explained in the attached pdf file.
Y - The edges labels, 0 for true, 1 for false
"""
X = np.zeros(shape=(2 * K, 1))
Y = np.zeros(shape=(2 * K, 1))
count = 0
node = list(dict_projections.keys())[0]
a = False if isinstance(node, str) is True else True
for edge in true_edges:
if mapping is not None:
edge = (mapping[edge[0]], mapping[edge[1]])
if dict_projections.get(edge[0]) is None or dict_projections.get(edge[1]) is None:
continue
embd1 = dict_projections[edge[0]]
embd2 = dict_projections[edge[1]]
norm = LA.norm(embd1 - embd2, 2)
X[count, 0] = norm
Y[count, 0] = int(1)
count += 1
for edge in false_edges:
if a:
edge = (int(edge[0]), int(edge[1]))
if dict_projections.get(edge[0]) is None or dict_projections.get(edge[1]) is None:
continue
embd1 = dict_projections[edge[0]]
embd2 = dict_projections[edge[1]]
norm = LA.norm(embd1 - embd2, 2)
X[count, 0] = norm
Y[count, 0] = int(0)
count += 1
return X, Y.ravel()
def create_model(X, Y, test_ratio):
X_train, X_test, Y_train, Y_test = sk_ms.train_test_split(X, Y, test_size=test_ratio)
model = lr()
parameters = {"penalty": ["l2"], "C": [0.01, 0.1, 1]}
model = GridSearchCV(model, param_grid=parameters, cv=2, scoring='roc_auc', n_jobs=28, verbose=0,
pre_dispatch='n_jobs')
model.fit(X_train, Y_train)
train_prob_preds = model.predict_proba(X_train)[:, 1]
test_prob_preds = model.predict_proba(X_test)[:, 1]
del model
train_auc = roc_auc_score(Y_train, train_prob_preds)
test_auc = roc_auc_score(Y_test, test_prob_preds)
micro = 0
macro = 0
accuracy = 0
return micro, macro, accuracy, test_auc
def exp_lp(X, Y, test_ratio_arr, rounds):
"""
The final node classification task as explained in our git.
:param X: The features' graph- norm
:param Y: The edges labels- 0 for true, 1 for false
:param test_ratio_arr: To determine how to split the data into train and test. This an array
with multiple options of how to split.
:param rounds: How many times we're doing the mission. Scores will be the average.
:return: Scores for all splits and all splits- F1-micro, F1-macro accuracy and auc
"""
micro = [None] * rounds
macro = [None] * rounds
acc = [None] * rounds
auc = [None] * rounds
for round_id in range(rounds):
micro_round = [None] * len(test_ratio_arr)
macro_round = [None] * len(test_ratio_arr)
acc_round = [None] * len(test_ratio_arr)
auc_round = [None] * len(test_ratio_arr)
for i, test_ratio in enumerate(test_ratio_arr):
micro_round[i], macro_round[i], acc_round[i], auc_round[i] = create_model(X, Y, test_ratio)
micro[round_id] = micro_round
macro[round_id] = macro_round
acc[round_id] = acc_round
auc[round_id] = auc_round
micro = np.asarray(micro)
macro = np.asarray(macro)
acc = np.asarray(acc)
auc = np.asarray(auc)
return micro, macro, acc, auc
def calculate_avg_score(score, rounds):
"""
Given the lists of scores for every round of every split, calculate the average score of every split.
:param score: F1-micro / F1-macro / Accuracy / Auc
:param rounds: How many times the experiment has been applied for each split.
:return: Average score for every split
"""
all_avg_scores = []
for i in range(score.shape[1]):
avg_score = (np.sum(score[:, i])) / rounds
all_avg_scores.append(avg_score)
return all_avg_scores
def calculate_all_avg_scores_lp(micro, macro, acc, auc, rounds):
"""
For all scores calculate the average score for every split. The function returns list for every
score type- 1 for cheap node2vec and 2 for regular node2vec.
"""
all_avg_micro = calculate_avg_score(micro, rounds)
all_avg_macro = calculate_avg_score(macro, rounds)
all_avg_acc = calculate_avg_score(acc, rounds)
all_avg_auc = calculate_avg_score(auc, rounds)
return all_avg_micro, all_avg_macro, all_avg_acc, all_avg_auc
def initialize_scores():
"""
Helper function to initialize the scores for link prediction mission
"""
my_micro = [0, 0, 0, 0, 0]
my_macro = [0, 0, 0, 0, 0]
my_acc = [0, 0, 0, 0, 0]
my_auc = [0, 0, 0, 0, 0]
return my_micro, my_macro, my_acc, my_auc
def first_help_calculate_lp(score, avg_score):
"""
Helper function for scores calculation
"""
score = [x + y for x, y in zip(score, avg_score)]
return score
def second_help_calculate_lp(score, number_of_sub_graphs):
"""
Helper function for scores calculation
"""
score = [x / number_of_sub_graphs for x in score]
return score
def lp_mission(key, number_true_false, z, edges, non_edges, ratio_arr, rounds, number_choose):
"""
Link prediction Task where one wants the scores as a function of size of the initial embedding. Notice test ratio
must be fixed. The variable that changes here is the size of the initial embedding. For more explanation, see our
pdf file attached in out git.
:param key: Name of the method
:param number_true_false: Number of true (and false) edges to take
:param z: Embedding dictionary of the given graph (with all types of our methods, no state-of-the-art)
:param edges: List of edges of the given graph
:param non_edges: How many sub graphs to create for evaluation
:param ratio_arr: Test ratio
:param rounds: How many rounds to repeat the score calculation.
:param number_choose: Number of times to choose random edges
:return: Scores of link prediction task for each dataset- Micro-F1, Macro-F1, Accuracy and AUC. They return as
lists for each size of initial embedding for each method
"""
dict_initial = {}
for r in ratio_arr:
all_micro = []
all_macro = []
all_acc = []
all_auc = []
if " + " in key:
list_dict_projections = z[key].list_dicts_embedding
else:
list_dict_projections = [z[key][1]]
for j in range(len(list_dict_projections)):
my_micro, my_macro, my_acc, my_auc = initialize_scores()
for i in range(number_choose):
true_edges = choose_true_edges(edges, number_true_false)
false_edges = choose_false_edges(non_edges, number_true_false)
X, Y = calculate_classifier_value(list_dict_projections[j], true_edges, false_edges, number_true_false)
micro, macro, acc, auc = exp_lp(X, Y, [r], rounds)
avg_micro, avg_macro, avg_acc, avg_auc = calculate_all_avg_scores_lp(micro, macro, acc, auc, rounds)
my_micro = first_help_calculate_lp(my_micro, avg_micro)
my_macro = first_help_calculate_lp(my_macro, avg_macro)
my_acc = first_help_calculate_lp(my_acc, avg_acc)
my_auc = first_help_calculate_lp(my_auc, avg_auc)
my_micro = second_help_calculate_lp(my_micro, number_choose)
my_macro = second_help_calculate_lp(my_macro, number_choose)
my_acc = second_help_calculate_lp(my_acc, number_choose)
my_auc = second_help_calculate_lp(my_auc, number_choose)
print(my_micro)
print(my_macro)
print(my_acc)
print(my_auc)
all_micro.append(my_micro[0])
all_macro.append(my_macro[0])
all_acc.append(my_acc[0])
all_auc.append(my_auc[0])
dict_initial.update({r: [all_micro, all_macro, all_acc, all_auc]})
return dict_initial
def final_link_prediction(dict_all_embeddings, params_lp, file, mapping=None):
"""
Link Prediction Task
:param dict_all_embeddings: Dictionary with all dict embeddings for all applied embedding method
:param params_lp: Parameters for link prediction task
:return: Dict where keys are applied methods and keys are dicts of scores for each test ratio.
"""
dict_lp_mission = {}
number_true_false = params_lp["number_true_false"]
rounds = params_lp["rounds"]
ratio_arr = params_lp["test_ratio"]
number_choose = params_lp["number_choose"]
keys = list(dict_all_embeddings.keys())
G = dict_all_embeddings[keys[0]].graph
edges = list(G.edges())
non_edges = []
csvfile = open(file, 'r', newline='')
obj = csv.reader(csvfile)
for row in obj:
non_edges.append((row[0], row[1]))
for key in keys:
dict_initial = lp_mission(key, number_true_false, dict_all_embeddings, edges, non_edges, ratio_arr, rounds,
number_choose)
dict_lp_mission.update({key: dict_initial})
return dict_lp_mission | PypiClean |
/LDB_Inventory_Barcode-0.14.1.tar.gz/LDB_Inventory_Barcode-0.14.1/docs/getting-started.rst | Getting started
===============
Installation
------------
The usual way is to use pip:
.. code:: shell
pip install python-barcode
Don't forget to add this to our app's dependencies.
If you'll be exporting to images (eg: not just SVG), you'll need the "images" extras:
.. code:: shell
pip install "python-barcode[images]"
# Note: keep the quotes, most shells don't play nice with square brackets.
Usage
-----
Let's start off with some code samples.
Keep in mind that checksums are calculated automatically -- you don't need to do the
math before passing the value for the barcode.
In some systems (Code 39) the checksum is optional. For these, you can provide the
``add_checksum=False`` keyword argument.
Generating SVG files
~~~~~~~~~~~~~~~~~~~~
.. code:: python
from io import BytesIO
from barcode import EAN13
from barcode.writer import SVGWriter
# Write to a file-like object:
rv = BytesIO()
EAN13(str("100000902922"), writer=SVGWriter()).write(rv)
# Or to an actual file:
with open("somefile.svg", "wb") as f:
EAN13(str(100000011111), writer=SVGWriter()).write(f)
Generating image files
~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.4b1
.. attention::
Keep in mind that SVG files are vectorized, so they will scale a lot better than
images. It's recommended to use images only if your medium or target usages does not
support SVG.
.. code:: python
from io import BytesIO
from barcode import EAN13
from barcode.writer import ImageWriter
# Write to a file-like object:
rv = BytesIO()
EAN13(str(100000902922), writer=ImageWriter()).write(rv)
# Or to an actual file:
with open("somefile.jpeg", "wb") as f:
EAN13("100000011111", writer=ImageWriter()).write(f)
Interactive generating an SVG
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Using an interactive python interpreter to generate SVG files.
.. code:: pycon
>>> import barcode
>>> barcode.PROVIDED_BARCODES
['code128', 'code39', 'ean', 'ean13', 'ean14', 'ean8', 'gs1', 'gs1_128', 'gtin', 'isbn', 'isbn10', 'isbn13', 'issn', 'itf', 'jan', 'pzn', 'upc', 'upca']
>>> EAN = barcode.get_barcode_class('ean13')
>>> EAN
<class 'barcode.ean.EuropeanArticleNumber13'>
>>> my_ean = EAN('5901234123457')
>>> my_ean
<EuropeanArticleNumber13('5901234123457')>
>>> fullname = my_ean.save('ean13_barcode')
>>> fullname
'ean13_barcode.svg'
>>>
You can check the generated files (e.g.: ``ean13_barcode.svg``) by opening them with
any graphical app (e.g.: Firefox).
Interactive generating a PNG
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Using an interactive python interpreter to generate PNG files.
.. code:: pycon
>>> import barcode
>>> from barcode.writer import ImageWriter
>>> EAN = barcode.get_barcode_class('ean13')
>>> my_ean = EAN('5901234123457', writer=ImageWriter())
>>> fullname = my_ean.save('ean13_barcode')
>>> fullname
'ean13_barcode.png'
>>> from io import BytesIO
>>> fp = BytesIO()
>>> my_ean.write(fp)
>>> my_ean
<EuropeanArticleNumber13('5901234123457')>
>>> with open("path/to/file", "wb") as f:
... my_ean.write(f) # Pillow (ImageWriter) produces RAW format here
...
>>> from barcode import generate
>>> name = generate('EAN13', '5901234123457', output='barcode_svg')
>>> name
'barcode_svg.svg'
>>> fp = BytesIO()
>>> generate('EAN13', '5901234123457', writer=ImageWriter(), output=fp)
>>>
You can check the generated files (e.g.: ``ean13_barcode.png``) by opening them with
any graphical app (e.g.: Firefox).
Command Line usage
~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.7beta4
This library also includes a cli app for quickly generating barcodes from the command
line or from shell scripts:
.. code:: console
$ # Save a barcode to outfile.svg:
$ python-barcode create "123456789000" outfile -b ean --text "text to appear under barcode"
$ # Generate a PNG (Require Pillow):
$ python-barcode create -t png "My Text" outfile
$ python-barcode --help
usage: python-barcode [-h] [-v] {create,list} ...
Create standard barcodes via cli.
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
Actions:
{create,list}
create Create a barcode with the given options.
list List available image and code types.
Image output enabled, use --type option to give image format (png, jpeg, ...).
$
| PypiClean |
/django-chuck-0.2.3.tar.gz/django-chuck/modules/django-cms/project/apps/filer/migrations/0001_initial.py | from south.db import db
from django.db import models
from filer.models import *
class Migration:
def forwards(self, orm):
# Adding model 'Image'
db.create_table('filer_image', (
('file_ptr', orm['filer.Image:file_ptr']),
('_height', orm['filer.Image:_height']),
('_width', orm['filer.Image:_width']),
('date_taken', orm['filer.Image:date_taken']),
('default_alt_text', orm['filer.Image:default_alt_text']),
('default_caption', orm['filer.Image:default_caption']),
('author', orm['filer.Image:author']),
('must_always_publish_author_credit', orm['filer.Image:must_always_publish_author_credit']),
('must_always_publish_copyright', orm['filer.Image:must_always_publish_copyright']),
('subject_location', orm['filer.Image:subject_location']),
))
db.send_create_signal('filer', ['Image'])
# Adding model 'ClipboardItem'
db.create_table('filer_clipboarditem', (
('id', orm['filer.ClipboardItem:id']),
('file', orm['filer.ClipboardItem:file']),
('clipboard', orm['filer.ClipboardItem:clipboard']),
))
db.send_create_signal('filer', ['ClipboardItem'])
# Adding model 'File'
db.create_table('filer_file', (
('id', orm['filer.File:id']),
('folder', orm['filer.File:folder']),
('file_field', orm['filer.File:file_field']),
('_file_type_plugin_name', orm['filer.File:_file_type_plugin_name']),
('_file_size', orm['filer.File:_file_size']),
('has_all_mandatory_data', orm['filer.File:has_all_mandatory_data']),
('original_filename', orm['filer.File:original_filename']),
('name', orm['filer.File:name']),
('owner', orm['filer.File:owner']),
('uploaded_at', orm['filer.File:uploaded_at']),
('modified_at', orm['filer.File:modified_at']),
))
db.send_create_signal('filer', ['File'])
# Adding model 'Folder'
db.create_table('filer_folder', (
('id', orm['filer.Folder:id']),
('parent', orm['filer.Folder:parent']),
('name', orm['filer.Folder:name']),
('owner', orm['filer.Folder:owner']),
('uploaded_at', orm['filer.Folder:uploaded_at']),
('created_at', orm['filer.Folder:created_at']),
('modified_at', orm['filer.Folder:modified_at']),
('lft', orm['filer.Folder:lft']),
('rght', orm['filer.Folder:rght']),
('tree_id', orm['filer.Folder:tree_id']),
('level', orm['filer.Folder:level']),
))
db.send_create_signal('filer', ['Folder'])
# Adding model 'Clipboard'
db.create_table('filer_clipboard', (
('id', orm['filer.Clipboard:id']),
('user', orm['filer.Clipboard:user']),
))
db.send_create_signal('filer', ['Clipboard'])
# Adding model 'FolderPermission'
db.create_table('filer_folderpermission', (
('id', orm['filer.FolderPermission:id']),
('folder', orm['filer.FolderPermission:folder']),
('type', orm['filer.FolderPermission:type']),
('user', orm['filer.FolderPermission:user']),
('group', orm['filer.FolderPermission:group']),
('everybody', orm['filer.FolderPermission:everybody']),
('can_edit', orm['filer.FolderPermission:can_edit']),
('can_read', orm['filer.FolderPermission:can_read']),
('can_add_children', orm['filer.FolderPermission:can_add_children']),
))
db.send_create_signal('filer', ['FolderPermission'])
# Creating unique_together for [parent, name] on Folder.
db.create_unique('filer_folder', ['parent_id', 'name'])
def backwards(self, orm):
# Deleting unique_together for [parent, name] on Folder.
db.delete_unique('filer_folder', ['parent_id', 'name'])
# Deleting model 'Image'
db.delete_table('filer_image')
# Deleting model 'ClipboardItem'
db.delete_table('filer_clipboarditem')
# Deleting model 'File'
db.delete_table('filer_file')
# Deleting model 'Folder'
db.delete_table('filer_folder')
# Deleting model 'Clipboard'
db.delete_table('filer_clipboard')
# Deleting model 'FolderPermission'
db.delete_table('filer_folderpermission')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.clipboard': {
'files': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clipboards'", 'to': "orm['auth.User']"})
},
'filer.clipboarditem': {
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Clipboard']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'filer.file': {
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'file_field': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'unique_together': "(('parent', 'name'),)"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folderpermission': {
'can_add_children': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_read': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'everybody': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Folder']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'filer.image': {
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['filer'] | PypiClean |
/LDB_Inventory_Barcode-0.14.1.tar.gz/LDB_Inventory_Barcode-0.14.1/ldb/inventory/barcode/ean.py | __docformat__ = "restructuredtext en"
from functools import reduce
from ldb.inventory.barcode.base import Barcode
from ldb.inventory.barcode.charsets import ean as _ean
from ldb.inventory.barcode.errors import (
IllegalCharacterError,
NumberOfDigitsError,
WrongCountryCodeError,
)
# EAN13 Specs (all sizes in mm)
SIZES = {
"SC0": 0.27,
"SC1": 0.297,
"SC2": 0.33,
"SC3": 0.363,
"SC4": 0.396,
"SC5": 0.445,
"SC6": 0.495,
"SC7": 0.544,
"SC8": 0.61,
"SC9": 0.66,
}
class EuropeanArticleNumber13(Barcode):
"""Initializes EAN13 object.
:parameters:
ean : String
The ean number as string.
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
"""
name = "EAN-13"
digits = 12
def __init__(self, ean, writer=None, no_checksum=False):
ean = ean[: self.digits]
if not ean.isdigit():
raise IllegalCharacterError("EAN code can only contain numbers.")
if len(ean) != self.digits:
raise NumberOfDigitsError(
"EAN must have {} digits, not {}.".format(
self.digits,
len(ean),
)
)
self.ean = ean
# If no checksum
if no_checksum:
# Add a thirteen char if given in parameter,
# otherwise pad with zero
self.ean = "{}{}".format(
ean, ean[self.digits] if len(ean) > self.digits else 0
)
else:
self.ean = "{}{}".format(ean, self.calculate_checksum())
self.writer = writer or Barcode.default_writer()
def __str__(self):
return self.ean
def get_fullcode(self):
return self.ean
def calculate_checksum(self):
"""Calculates the checksum for EAN13-Code.
:returns: The checksum for `self.ean`.
:rtype: Integer
"""
def sum_(x, y):
return int(x) + int(y)
evensum = reduce(sum_, self.ean[-2::-2])
oddsum = reduce(sum_, self.ean[-1::-2])
return (10 - ((evensum + oddsum * 3) % 10)) % 10
def build(self):
"""Builds the barcode pattern from `self.ean`.
:returns: The pattern as string
:rtype: String
"""
code = _ean.EDGE[:]
pattern = _ean.LEFT_PATTERN[int(self.ean[0])]
for i, number in enumerate(self.ean[1:7]):
code += _ean.CODES[pattern[i]][int(number)]
code += _ean.MIDDLE
for number in self.ean[7:]:
code += _ean.CODES["C"][int(number)]
code += _ean.EDGE
return [code]
def to_ascii(self):
"""Returns an ascii representation of the barcode.
:rtype: String
"""
code = self.build()
for i, line in enumerate(code):
code[i] = line.replace("1", "|").replace("0", " ")
return "\n".join(code)
def render(self, writer_options=None, text=None):
options = {"module_width": SIZES["SC2"]}
options.update(writer_options or {})
return Barcode.render(self, options, text)
class JapanArticleNumber(EuropeanArticleNumber13):
"""Initializes JAN barcode.
:parameters:
jan : String
The jan number as string.
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
"""
name = "JAN"
valid_country_codes = list(range(450, 460)) + list(range(490, 500))
def __init__(self, jan, writer=None):
if int(jan[:3]) not in JapanArticleNumber.valid_country_codes:
raise WrongCountryCodeError(
"Country code isn't between 450-460 or 490-500."
)
EuropeanArticleNumber13.__init__(self, jan, writer)
class EuropeanArticleNumber8(EuropeanArticleNumber13):
"""Represents an EAN-8 barcode. See EAN13's __init__ for details.
:parameters:
ean : String
The ean number as string.
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
"""
name = "EAN-8"
digits = 7
def __init__(self, ean, writer=None):
EuropeanArticleNumber13.__init__(self, ean, writer)
def build(self):
"""Builds the barcode pattern from `self.ean`.
:returns: The pattern as string
:rtype: String
"""
code = _ean.EDGE[:]
for number in self.ean[:4]:
code += _ean.CODES["A"][int(number)]
code += _ean.MIDDLE
for number in self.ean[4:]:
code += _ean.CODES["C"][int(number)]
code += _ean.EDGE
return [code]
class EuropeanArticleNumber14(EuropeanArticleNumber13):
"""Represents an EAN-14 barcode. See EAN13's __init__ for details.
:parameters:
ean : String
The ean number as string.
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
"""
name = "EAN-14"
digits = 13
def calculate_checksum(self):
"""Calculates the checksum for EAN13-Code.
:returns: The checksum for `self.ean`.
:rtype: Integer
"""
def sum_(x, y):
return int(x) + int(y)
evensum = reduce(sum_, self.ean[::2])
oddsum = reduce(sum_, self.ean[1::2])
return (10 - (((evensum * 3) + oddsum) % 10)) % 10
# Shortcuts
EAN14 = EuropeanArticleNumber14
EAN13 = EuropeanArticleNumber13
EAN8 = EuropeanArticleNumber8
JAN = JapanArticleNumber | PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/mathics/builtin/atomic/strings.py | import io
import re
import unicodedata
from binascii import hexlify, unhexlify
from heapq import heappop, heappush
from typing import Any, List
from mathics_scanner import TranslateError
from mathics.builtin.base import Builtin, Predefined, PrefixOperator, Test
from mathics.core.atoms import Integer, Integer0, Integer1, String
from mathics.core.attributes import A_LISTABLE, A_PROTECTED
from mathics.core.convert.expression import to_mathics_list
from mathics.core.convert.python import from_bool
from mathics.core.evaluation import Evaluation
from mathics.core.expression import Expression
from mathics.core.list import ListExpression
from mathics.core.parser import MathicsFileLineFeeder, parse
from mathics.core.symbols import Symbol, SymbolTrue
from mathics.core.systemsymbols import (
SymbolBlank,
SymbolDirectedInfinity,
SymbolFailed,
SymbolInputForm,
SymbolOutputForm,
)
from mathics.eval.strings import eval_ToString
from mathics.settings import SYSTEM_CHARACTER_ENCODING
SymbolToExpression = Symbol("ToExpression")
_regex_longest = {
"+": "+",
"*": "*",
}
_regex_shortest = {
"+": "+?",
"*": "*?",
}
# A better thing to do would be to write a pymathics module that
# covers all of the variations. Here we just give some minimal basics
# Data taken from:
# https://unicode-org.github.io/cldr-staging/charts/37/summary/root.html
# The uppercase letters often don't have the accents that lower-case
# letters have. I don't understand, or I may have interpreted the charts wrong.
#
alphabet_descriptions = {
"Cyrillic": {
"Lowercase": r"абвгґдђѓеёєжзѕиіїйјклљмнњопрстћќуўфхцчџшщъыьэюя",
"Uppercase": r"АБВГҐДЂЃЕЁЄЖЗЅИІЇЙЈКЛЉМНЊОПРСТЋЌУЎФХЦЧЏШЩЪЫЬЭЮЯ",
},
"English": {
"Lowercase": r"abcdefghijklmnopqrstuvwxyz",
"Uppercase": r"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
},
"French": {
"Lowercase": r"aàâæbcçdeéèêëfghiîïjklmnoôœpqrstuùûüvwxyÿz",
"Uppercase": r"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
},
"German": {
"Lowercase": r"aäbcdefghijklmnoöpqrsßtuüvwxyz",
"Uppercase": r"AÄBCDEFGHIJKLMNOÖPQRSTUÜVWXYZ",
},
"Greek": {
"Lowercase": "αβγδεζηθικλμνξοπρστυφχψω",
"Uppercase": "ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩ",
},
"Italian": {
"Lowercase": "aàbcdeéèfghiìjklmnoóòpqrstuùvwxyz",
"Uppercase": r"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
},
"Spanish": {
"Lowercase": "aábcdeéfghiíjklmnñoópqrstuúüvwxyz",
"Uppercase": "ABCDEFGHIJKLMNÑOPQRSTUVWXYZ",
},
"Swedish": {
"Lowercase": "aàbcdeéfghijklmnopqrstuvwxyzåäö",
"Uppercase": "ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ",
},
"Turkish": {
"Lowercase": "abcçdefgğhıiİjklmnoöprsştuüvyz",
"Uppercase": "ABCÇDEFGHIİJKLMNOÖPQRSŞTUÜVWXYZ",
},
}
alphabet_alias = {
"Russian": "Cyrillic",
}
def _encode_pname(name):
return "n" + hexlify(name.encode("utf8")).decode("utf8")
def _decode_pname(name):
return unhexlify(name[1:]).decode("utf8")
def _evaluate_match(s, m, evaluation):
replace = dict(
(_decode_pname(name), String(value)) for name, value in m.groupdict().items()
)
return s.replace_vars(replace, in_scoping=False).evaluate(evaluation)
def _parallel_match(text, rules, flags, limit):
heap = []
def push(i, iter, form):
m = None
try:
m = next(iter)
except StopIteration:
pass
if m is not None:
heappush(heap, (m.start(), i, m, form, iter))
for i, (patt, form) in enumerate(rules):
push(i, re.finditer(patt, text, flags=flags), form)
k = 0
n = 0
while heap:
start, i, match, form, iter = heappop(heap)
if start >= k:
yield match, form
n += 1
if n >= limit > 0:
break
k = match.end()
push(i, iter, form)
def _pattern_search(name, string, patt, evaluation, options, matched):
# Get the pattern list and check validity for each
if patt.has_form("List", None):
patts = patt.elements
else:
patts = [patt]
re_patts = []
for p in patts:
py_p = to_regex(p, evaluation)
if py_p is None:
evaluation.message("StringExpression", "invld", p, patt)
return
re_patts.append(py_p)
flags = re.MULTILINE
if options["System`IgnoreCase"] is SymbolTrue:
flags = flags | re.IGNORECASE
def _search(patts, str, flags, matched):
if any(re.search(p, str, flags=flags) for p in patts):
return from_bool(matched)
return from_bool(not matched)
# Check string validity and perform regex searchhing
if string.has_form("List", None):
py_s = [s.get_string_value() for s in string.elements]
if any(s is None for s in py_s):
evaluation.message(
name, "strse", Integer1, Expression(Symbol(name), string, patt)
)
return
return to_mathics_list(*[_search(re_patts, s, flags, matched) for s in py_s])
else:
py_s = string.get_string_value()
if py_s is None:
evaluation.message(
name, "strse", Integer1, Expression(Symbol(name), string, patt)
)
return
return _search(re_patts, py_s, flags, matched)
def to_regex(
expr, evaluation, q=_regex_longest, groups=None, abbreviated_patterns=False
):
if expr is None:
return None
if groups is None:
groups = {}
def recurse(x, quantifiers=q):
return to_regex(x, evaluation, q=quantifiers, groups=groups)
if isinstance(expr, String):
result = expr.get_string_value()
if abbreviated_patterns:
pieces = []
i, j = 0, 0
while j < len(result):
c = result[j]
if c == "\\" and j + 1 < len(result):
pieces.append(re.escape(result[i:j]))
pieces.append(re.escape(result[j + 1]))
j += 2
i = j
elif c == "*":
pieces.append(re.escape(result[i:j]))
pieces.append("(.*)")
j += 1
i = j
elif c == "@":
pieces.append(re.escape(result[i:j]))
# one or more characters, excluding uppercase letters
pieces.append("([^A-Z]+)")
j += 1
i = j
else:
j += 1
pieces.append(re.escape(result[i:j]))
result = "".join(pieces)
else:
result = re.escape(result)
return result
if expr.has_form("RegularExpression", 1):
regex = expr.elements[0].get_string_value()
if regex is None:
return regex
try:
re.compile(regex)
# Don't return the compiled regex because it may need to composed
# further e.g. StringExpression["abc", RegularExpression[regex2]].
return regex
except re.error:
return None # invalid regex
if isinstance(expr, Symbol):
return {
"System`NumberString": r"[-|+]?(\d+(\.\d*)?|\.\d+)?",
"System`Whitespace": r"(?u)\s+",
"System`DigitCharacter": r"\d",
"System`WhitespaceCharacter": r"(?u)\s",
"System`WordCharacter": r"(?u)[^\W_]",
"System`StartOfLine": r"^",
"System`EndOfLine": r"$",
"System`StartOfString": r"\A",
"System`EndOfString": r"\Z",
"System`WordBoundary": r"\b",
"System`LetterCharacter": r"(?u)[^\W_0-9]",
"System`HexadecimalCharacter": r"[0-9a-fA-F]",
}.get(expr.get_name())
if expr.has_form("CharacterRange", 2):
(start, stop) = (element.get_string_value() for element in expr.elements)
if all(x is not None and len(x) == 1 for x in (start, stop)):
return "[{0}-{1}]".format(re.escape(start), re.escape(stop))
if expr.has_form("Blank", 0):
return r"(.|\n)"
if expr.has_form("BlankSequence", 0):
return r"(.|\n)" + q["+"]
if expr.has_form("BlankNullSequence", 0):
return r"(.|\n)" + q["*"]
if expr.has_form("Except", 1, 2):
if len(expr.elements) == 1:
# TODO: Check if this shouldn't be SymbolBlank
# instead of SymbolBlank[]
elements = [expr.elements[0], Expression(SymbolBlank)]
else:
elements = [expr.elements[0], expr.elements[1]]
elements = [recurse(element) for element in elements]
if all(element is not None for element in elements):
return "(?!{0}){1}".format(*elements)
if expr.has_form("Characters", 1):
element = expr.elements[0].get_string_value()
if element is not None:
return "[{0}]".format(re.escape(element))
if expr.has_form("StringExpression", None):
elements = [recurse(element) for element in expr.elements]
if None in elements:
return None
return "".join(elements)
if expr.has_form("Repeated", 1):
element = recurse(expr.elements[0])
if element is not None:
return "({0})".format(element) + q["+"]
if expr.has_form("RepeatedNull", 1):
element = recurse(expr.elements[0])
if element is not None:
return "({0})".format(element) + q["*"]
if expr.has_form("Alternatives", None):
elements = [recurse(element) for element in expr.elements]
if all(element is not None for element in elements):
return "|".join(elements)
if expr.has_form("Shortest", 1):
return recurse(expr.elements[0], quantifiers=_regex_shortest)
if expr.has_form("Longest", 1):
return recurse(expr.elements[0], quantifiers=_regex_longest)
if expr.has_form("Pattern", 2) and isinstance(expr.elements[0], Symbol):
name = expr.elements[0].get_name()
patt = groups.get(name, None)
if patt is not None:
if expr.elements[1].has_form("Blank", 0):
pass # ok, no warnings
elif not expr.elements[1].sameQ(patt):
evaluation.message(
"StringExpression", "cond", expr.elements[0], expr, expr.elements[0]
)
return "(?P=%s)" % _encode_pname(name)
else:
groups[name] = expr.elements[1]
return "(?P<%s>%s)" % (_encode_pname(name), recurse(expr.elements[1]))
return None
def anchor_pattern(patt):
"""
anchors a regex in order to force matching against an entire string.
"""
if not patt.endswith(r"\Z"):
patt = patt + r"\Z"
if not patt.startswith(r"\A"):
patt = r"\A" + patt
return patt
# FIXME: Generalize string.lower() and ord()
def letter_number(chars: List[str], start_ord) -> List["Integer"]:
# Note caller has verified that everything isalpha() and
# each char has length 1.
return [Integer(ord(char.lower()) - start_ord) for char in chars]
def mathics_split(patt, string, flags):
"""
Python's re.split includes the text of groups if they are capturing.
Furthermore, you can't split on empty matches. Trying to do this returns
the original string for Python < 3.5, raises a ValueError for
Python >= 3.5, <= X and works as expected for Python >= X, where 'X' is
some future version of Python (> 3.6).
For these reasons we implement our own split.
"""
# (start, end) indices of splits
indices = list((m.start(), m.end()) for m in re.finditer(patt, string, flags))
# (start, end) indices of stuff to keep
indices = [(None, 0)] + indices + [(len(string), None)]
indices = [(indices[i][1], indices[i + 1][0]) for i in range(len(indices) - 1)]
# slice up the string
return [string[start:stop] for start, stop in indices]
_encodings = {
# see https://docs.python.org/2/library/codecs.html#standard-encodings
"ASCII": "ascii",
"CP949": "cp949",
"CP950": "cp950",
"EUC-JP": "euc_jp",
"IBM-850": "cp850",
"ISOLatin1": "iso8859_1",
"ISOLatin2": "iso8859_2",
"ISOLatin3": "iso8859_3",
"ISOLatin4": "iso8859_4",
"ISOLatinCyrillic": "iso8859_5",
"ISO8859-1": "iso8859_1",
"ISO8859-2": "iso8859_2",
"ISO8859-3": "iso8859_3",
"ISO8859-4": "iso8859_4",
"ISO8859-5": "iso8859_5",
"ISO8859-6": "iso8859_6",
"ISO8859-7": "iso8859_7",
"ISO8859-8": "iso8859_8",
"ISO8859-9": "iso8859_9",
"ISO8859-10": "iso8859_10",
"ISO8859-13": "iso8859_13",
"ISO8859-14": "iso8859_14",
"ISO8859-15": "iso8859_15",
"ISO8859-16": "iso8859_16",
"koi8-r": "koi8_r",
"MacintoshCyrillic": "mac_cyrillic",
"MacintoshGreek": "mac_greek",
"MacintoshIcelandic": "mac_iceland",
"MacintoshRoman": "mac_roman",
"MacintoshTurkish": "mac_turkish",
"ShiftJIS": "shift_jis",
"Unicode": "utf_16",
"UTF-8": "utf_8",
"UTF8": "utf_8",
"WindowsANSI": "cp1252",
"WindowsBaltic": "cp1257",
"WindowsCyrillic": "cp1251",
"WindowsEastEurope": "cp1250",
"WindowsGreek": "cp1253",
"WindowsTurkish": "cp1254",
}
def to_python_encoding(encoding):
return _encodings.get(encoding)
class Alphabet(Builtin):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/Alphabet.html</url>
<dl>
<dt>'Alphabet'[]
<dd>gives the list of lowercase letters a-z in the English alphabet .
<dt>'Alphabet[$type$]'
<dd> gives the alphabet for the language or class $type$.
</dl>
>> Alphabet[]
= {a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z}
>> Alphabet["German"]
= {a, ä, b, c, d, e, f, g, h, i, j, k, l, m, n, o, ö, p, q, r, s, ß, t, u, ü, v, w, x, y, z}
Some languages are aliases. "Russian" is the same letter set as "Cyrillic"
>> Alphabet["Russian"] == Alphabet["Cyrillic"]
= True
"""
messages = {
"nalph": "The alphabet `` is not known or not available.",
}
rules = {
"Alphabet[]": """Alphabet["English"]""",
}
summary_text = "lowercase letters in an alphabet"
def eval(self, alpha, evaluation):
"""Alphabet[alpha_String]"""
alphakey = alpha.value
alphakey = alphabet_alias.get(alphakey, alphakey)
if alphakey is None:
evaluation.message("Alphabet", "nalph", alpha)
return
alphabet = alphabet_descriptions.get(alphakey, None)
if alphabet is None:
evaluation.message("Alphabet", "nalph", alpha)
return
return to_mathics_list(*alphabet["Lowercase"], elements_conversion_fn=String)
class CharacterEncoding(Predefined):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/$CharacterEncoding.html</url>
<dl>
<dt>'$CharacterEncoding'
<dd>specifies the default raw character encoding to use for input and \
output when no encoding is explicitly specified. \
Initially this is set to '$SystemCharacterEncoding'.
</dl>
See the character encoding current is in effect and used in input and \
output functions functions like 'OpenRead[]':
>> $CharacterEncoding
= ...
See also <url>
:$SystemCharacterEncoding:
/doc/reference-of-built-in-symbols/atomic-elements-of-expressions/string-manipulation/$systemcharacterencoding/</url>.
"""
name = "$CharacterEncoding"
value = f'"{SYSTEM_CHARACTER_ENCODING}"'
rules = {
"$CharacterEncoding": value,
}
summary_text = "default character encoding"
class CharacterEncodings(Predefined):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/$CharacterEncodings.html</url>
<dl>
<dt>'$CharacterEncodings'
<dd>stores the list of available character encodings.
</dl>
>> $CharacterEncodings
= ...
"""
name = "$CharacterEncodings"
value = "{%s}" % ",".join(map(lambda s: '"%s"' % s, _encodings.keys()))
rules = {
"$CharacterEncodings": value,
}
summary_text = "available character encodings"
class HexadecimalCharacter(Builtin):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/HexadecimalCharacter.html</url>
<dl>
<dt>'HexadecimalCharacter'
<dd>represents the characters 0-9, a-f and A-F.
</dl>
>> StringMatchQ[#, HexadecimalCharacter] & /@ {"a", "1", "A", "x", "H", " ", "."}
= {True, True, True, False, False, False, False}
"""
summary_text = "hexadecimal digits"
# This isn't your normal Box class. We'll keep this here rather than
# in mathics.builtin.box for now.
class InterpretedBox(PrefixOperator):
r"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/InterpretedBox.html</url>
<dl>
<dt>'InterpretedBox[$box$]'
<dd>is the ad hoc fullform for \! $box$. just for internal use...
</dl>
>> \! \(2+2\)
= 4
"""
operator = "\\!"
precedence = 670
summary_text = "interpret boxes as an expression"
def eval(self, boxes, evaluation: Evaluation):
"""InterpretedBox[boxes_]"""
# TODO: the following is a very raw and dummy way to
# handle these expressions.
# In the first place, this should handle different kind
# of boxes in different ways.
reinput = boxes.boxes_to_text()
return Expression(SymbolToExpression, String(reinput)).evaluate(evaluation)
class LetterNumber(Builtin):
r"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/LetterNumber.html</url>
<dl>
<dt>'LetterNumber'[$c$]
<dd>returns the position of the character $c$ in the English alphabet.
<dt>'LetterNumber["string"]'
<dd>returns a list of the positions of characters in string.
<dt>'LetterNumber["string", $alpha$]'
<dd>returns a list of the positions of characters in string, regarding the alphabet $alpha$.
</dl>
>> LetterNumber["b"]
= 2
LetterNumber also works with uppercase characters
>> LetterNumber["B"]
= 2
>> LetterNumber["ss2!"]
= {19, 19, 0, 0}
Get positions of each of the letters in a string:
>> LetterNumber[Characters["Peccary"]]
= {16, 5, 3, 3, 1, 18, 25}
>> LetterNumber[{"P", "Pe", "P1", "eck"}]
= {16, {16, 5}, {16, 0}, {5, 3, 11}}
#> LetterNumber[4]
: The argument 4 is not a string.
= LetterNumber[4]
>> LetterNumber["\[Beta]", "Greek"]
= 2
"""
# FIXME: put the right unicode characters in a way that the
# following test works...
r"""
# #> LetterNumber["\[CapitalBeta]", "Greek"]
# = 2
"""
messages = {
"nalph": "The alphabet `` is not known or not available.",
"nas": ("The argument `1` is not a string."),
}
summary_text = "position of a letter in an alphabet"
def eval_alpha_str(self, chars: List[Any], alpha: String, evaluation):
"LetterNumber[chars_, alpha_String]"
alphakey = alpha.value
alphakey = alphabet_alias.get(alphakey, alphakey)
if alphakey is None:
evaluation.message("LetterNumber", "nalph", alpha)
return
if alphakey == "English":
return self.apply(chars, evaluation)
alphabet = alphabet_descriptions.get(alphakey, None)
if alphabet is None:
evaluation.message("LetterNumber", "nalph", alpha)
return
# TODO: handle Uppercase
if isinstance(chars, String):
py_chars = chars.value
if len(py_chars) == 1:
# FIXME generalize ord("a")
res = alphabet["Lowercase"].find(py_chars) + 1
if res == -1:
res = alphabet["Uppercase"].find(py_chars) + 1
return Integer(res)
else:
r = []
for c in py_chars:
cp = alphabet["Lowercase"].find(c) + 1
if cp == -1:
cp = alphabet["Uppercase"].find(c) + 1
r.append(cp)
return ListExpression(*r)
elif chars.has_form("List", 1, None):
result = []
for element in chars.elements:
result.append(self.eval_alpha_str(element, alpha, evaluation))
return ListExpression(*result)
else:
evaluation.message(self.__class__.__name__, "nas", chars)
return
return None
def eval(self, chars: List[Any], evaluation):
"LetterNumber[chars_]"
start_ord = ord("a") - 1
if isinstance(chars, String):
py_chars = chars.value
if len(py_chars) == 1:
# FIXME generalize ord("a")
return letter_number([py_chars[0]], start_ord)[0]
else:
r = [
letter_number(c, start_ord)[0] if c.isalpha() else 0
for c in py_chars
]
return to_mathics_list(*r)
elif chars.has_form("List", 1, None):
result = []
for element in chars.elements:
result.append(self.eval(element, evaluation))
return ListExpression(*result)
else:
evaluation.message(self.__class__.__name__, "nas", chars)
return None
class NumberString(Builtin):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/NumberString.html</url>
<dl>
<dt>'NumberString'
<dd>represents the characters in a number.
</dl>
>> StringMatchQ["1234", NumberString]
= True
>> StringMatchQ["1234.5", NumberString]
= True
>> StringMatchQ["1.2`20", NumberString]
= False
"""
summary_text = "characters in string representation of a number"
class RemoveDiacritics(Builtin):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/RemoveDiacritics.html</url>
<dl>
<dt>'RemoveDiacritics[$s$]'
<dd>returns a version of $s$ with all diacritics removed.
</dl>
>> RemoveDiacritics["en prononçant pêcher et pécher"]
= en prononcant pecher et pecher
>> RemoveDiacritics["piñata"]
= pinata
"""
summary_text = "remove diacritics"
def eval(self, s, evaluation: Evaluation):
"RemoveDiacritics[s_String]"
return String(
unicodedata.normalize("NFKD", s.value)
.encode("ascii", "ignore")
.decode("ascii")
)
class _StringFind(Builtin):
options = {
"IgnoreCase": "False",
"MetaCharacters": "None",
}
messages = {
"strse": "String or list of strings expected at position `1` in `2`.",
"srep": "`1` is not a valid string replacement rule.",
"innf": (
"Non-negative integer or Infinity expected at " "position `1` in `2`."
),
}
def _find(py_stri, py_rules, py_n, flags):
raise NotImplementedError()
def _apply(self, string, rule, n, evaluation, options, cases):
if n.sameQ(Symbol("System`Private`Null")):
expr = Expression(Symbol(self.get_name()), string, rule)
n = None
else:
expr = Expression(Symbol(self.get_name()), string, rule, n)
# convert string
if string.has_form("List", None):
py_strings = [stri.get_string_value() for stri in string.elements]
if None in py_strings:
evaluation.message(self.get_name(), "strse", Integer1, expr)
return
else:
py_strings = string.get_string_value()
if py_strings is None:
evaluation.message(self.get_name(), "strse", Integer1, expr)
return
# convert rule
def convert_rule(r):
if r.has_form("Rule", None) and len(r.elements) == 2:
py_s = to_regex(r.elements[0], evaluation)
if py_s is None:
evaluation.message(
"StringExpression", "invld", r.elements[0], r.elements[0]
)
return
py_sp = r.elements[1]
return py_s, py_sp
elif cases:
py_s = to_regex(r, evaluation)
if py_s is None:
evaluation.message("StringExpression", "invld", r, r)
return
return py_s, None
evaluation.message(self.get_name(), "srep", r)
return
if rule.has_form("List", None):
py_rules = [convert_rule(r) for r in rule.elements]
else:
py_rules = [convert_rule(rule)]
if None in py_rules:
return None
# convert n
if n is None:
py_n = 0
elif n == Expression(SymbolDirectedInfinity, Integer1):
py_n = 0
else:
py_n = n.get_int_value()
if py_n is None or py_n < 0:
evaluation.message(self.get_name(), "innf", Integer(3), expr)
return
# flags
flags = re.MULTILINE
if options["System`IgnoreCase"] is SymbolTrue:
flags = flags | re.IGNORECASE
if isinstance(py_strings, list):
return to_mathics_list(
*[
self._find(py_stri, py_rules, py_n, flags, evaluation)
for py_stri in py_strings
]
)
else:
return self._find(py_strings, py_rules, py_n, flags, evaluation)
class String_(Builtin):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/String.html</url>
<dl>
<dt>'String'
<dd>is the head of strings.
</dl>
>> Head["abc"]
= String
>> "abc"
= abc
Use 'InputForm' to display quotes around strings:
>> InputForm["abc"]
= "abc"
'FullForm' also displays quotes:
>> FullForm["abc" + 2]
= Plus[2, "abc"]
"""
name = "String"
summary_text = "head for strings"
class StringContainsQ(Builtin):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/StringContainsQ.html</url>
<dl>
<dt>'StringContainsQ["$string$", $patt$]'
<dd>returns True if any part of $string$ matches $patt$, and returns False otherwise.
<dt>'StringContainsQ[{"s1", "s2", ...}, patt]'
<dd>returns the list of results for each element of string list.
<dt>'StringContainsQ[patt]'
<dd>represents an operator form of StringContainsQ that can be applied to an expression.
</dl>
>> StringContainsQ["mathics", "m" ~~ __ ~~ "s"]
= True
>> StringContainsQ["mathics", "a" ~~ __ ~~ "m"]
= False
#> StringContainsQ["Hello", "o"]
= True
#> StringContainsQ["a"]["abcd"]
= True
#> StringContainsQ["Mathics", "ma", IgnoreCase -> False]
= False
>> StringContainsQ["Mathics", "MA" , IgnoreCase -> True]
= True
#> StringContainsQ["", "Empty String"]
= False
#> StringContainsQ["", ___]
= True
#> StringContainsQ["Empty Pattern", ""]
= True
#> StringContainsQ[notastring, "n"]
: String or list of strings expected at position 1 in StringContainsQ[notastring, n].
= StringContainsQ[notastring, n]
#> StringContainsQ["Welcome", notapattern]
: Element notapattern is not a valid string or pattern element in notapattern.
= StringContainsQ[Welcome, notapattern]
>> StringContainsQ[{"g", "a", "laxy", "universe", "sun"}, "u"]
= {False, False, False, True, True}
#> StringContainsQ[{}, "list of string is empty"]
= {}
>> StringContainsQ["e" ~~ ___ ~~ "u"] /@ {"The Sun", "Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"}
= {True, True, True, False, False, False, False, False, True}
## special cases, Mathematica allows list of patterns
#> StringContainsQ[{"A", "Galaxy", "Far", "Far", "Away"}, {"F" ~~ __ ~~ "r", "aw" ~~ ___}]
= {False, False, True, True, False}
#> StringContainsQ[{"A", "Galaxy", "Far", "Far", "Away"}, {"F" ~~ __ ~~ "r", "aw" ~~ ___}, IgnoreCase -> True]
= {False, False, True, True, True}
#> StringContainsQ[{"A", "Galaxy", "Far", "Far", "Away"}, {}]
= {False, False, False, False, False}
#> StringContainsQ[{"A", Galaxy, "Far", "Far", Away}, {"F" ~~ __ ~~ "r", "aw" ~~ ___}]
: String or list of strings expected at position 1 in StringContainsQ[{A, Galaxy, Far, Far, Away}, {F ~~ __ ~~ r, aw ~~ ___}].
= StringContainsQ[{A, Galaxy, Far, Far, Away}, {F ~~ __ ~~ r, aw ~~ ___}]
#> StringContainsQ[{"A", "Galaxy", "Far", "Far", "Away"}, {F ~~ __ ~~ "r", aw ~~ ___}]
: Element F ~~ __ ~~ r is not a valid string or pattern element in {F ~~ __ ~~ r, aw ~~ ___}.
= StringContainsQ[{A, Galaxy, Far, Far, Away}, {F ~~ __ ~~ r, aw ~~ ___}]
## Mathematica can detemine correct invalid element in the pattern, it reports error:
## Element F is not a valid string or pattern element in {F ~~ __ ~~ r, aw ~~ ___}.
"""
messages = {
"strse": "String or list of strings expected at position `1` in `2`.",
}
options = {
"IgnoreCase": "False",
}
rules = {
"StringContainsQ[patt_][expr_]": "StringContainsQ[expr, patt]",
}
summary_text = "test whether a pattern matches with a substring"
def eval(self, string, patt, evaluation: Evaluation, options: dict):
"StringContainsQ[string_, patt_, OptionsPattern[%(name)s]]"
return _pattern_search(
self.__class__.__name__, string, patt, evaluation, options, True
)
class StringQ(Test):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/StringQ.html</url>
<dl>
<dt>'StringQ[$expr$]'
<dd>returns 'True' if $expr$ is a 'String', or 'False' otherwise.
</dl>
>> StringQ["abc"]
= True
>> StringQ[1.5]
= False
>> Select[{"12", 1, 3, 5, "yz", x, y}, StringQ]
= {12, yz}
"""
summary_text = "test whether an expression is a string"
def test(self, expr):
return isinstance(expr, String)
class StringRepeat(Builtin):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/StringRepeat.html</url>
<dl>
<dt>'StringRepeat["$string$", $n$]'
<dd>gives $string$ repeated $n$ times.
<dt>'StringRepeat["$string$", $n$, $max$]'
<dd>gives $string$ repeated $n$ times, but not more than $max$ characters.
</dl>
>> StringRepeat["abc", 3]
= abcabcabc
>> StringRepeat["abc", 10, 7]
= abcabca
#> StringRepeat["x", 0]
: A positive integer is expected at position 2 in StringRepeat[x, 0].
= StringRepeat[x, 0]
"""
messages = {
"intp": "A positive integer is expected at position `1` in `2`.",
}
summary_text = "build a string by concatenating repetitions"
def eval(self, s, n, expression, evaluation):
"StringRepeat[s_String, n_]"
py_n = n.value if isinstance(n, Integer) else 0
if py_n < 1:
evaluation.message("StringRepeat", "intp", 2, expression)
else:
return String(s.value * py_n)
def eval_truncated(self, s, n, m, expression, evaluation):
"StringRepeat[s_String, n_Integer, m_Integer]"
# The above rule insures that n and m are boht Integer type
py_n = n.value
py_m = m.value
if py_n < 1:
evaluation.message("StringRepeat", "intp", 2, expression)
elif py_m < 1:
evaluation.message("StringRepeat", "intp", 3, expression)
else:
py_s = s.value
py_n = min(1 + py_m // len(py_s), py_n)
return String((py_s * py_n)[:py_m])
class SystemCharacterEncoding(Predefined):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/$SystemCharacterEncoding.html</url>
<dl>
<dt>$SystemCharacterEncoding
<dd>gives the default character encoding of the system.
On startup, the value of environment variable 'MATHICS_CHARACTER_ENCODING' \
sets this value. However if that environment variable is not set, set the value \
is set in Python using 'sys.getdefaultencoding()'.
</dl>
>> $SystemCharacterEncoding
= ...
"""
name = "$SystemCharacterEncoding"
rules = {
"$SystemCharacterEncoding": '"' + SYSTEM_CHARACTER_ENCODING + '"',
}
summary_text = "system's character encoding"
class ToExpression(Builtin):
r"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/ToExpression.html</url>
<dl>
<dt>'ToExpression[$input$]'
<dd>interprets a given string as Mathics input.
<dt>'ToExpression[$input$, $form$]'
<dd>reads the given input in the specified $form$.
<dt>'ToExpression[$input$, $form$, $h$]'
<dd>applies the head $h$ to the expression before evaluating it.
</dl>
>> ToExpression["1 + 2"]
= 3
>> ToExpression["{2, 3, 1}", InputForm, Max]
= 3
>> ToExpression["2 3", InputForm]
= 6
Note that newlines are like semicolons, not blanks. So so the return value is the second-line value.
>> ToExpression["2\[NewLine]3"]
= 3
#> ToExpression["log(x)", InputForm]
= log x
#> ToExpression["1+"]
: Incomplete expression; more input is needed (line 1 of "ToExpression['1+']").
= $Failed
#> ToExpression[]
: ToExpression called with 0 arguments; between 1 and 3 arguments are expected.
= ToExpression[]
"""
# TODO: Other forms
"""
>> ToExpression["log(x)", TraditionalForm]
= Log[x]
>> ToExpression["log(x)", TraditionalForm]
= Log[x]
#> ToExpression["log(x)", StandardForm]
= log x
"""
attributes = A_LISTABLE | A_PROTECTED
messages = {
"argb": (
"`1` called with `2` arguments; "
"between `3` and `4` arguments are expected."
),
"interpfmt": (
"`1` is not a valid interpretation format. "
"Valid interpretation formats include InputForm "
"and any member of $BoxForms."
),
"notstr": "The format type `1` is valid only for string input.",
}
summary_text = "build an expression from formatted text"
def eval(self, seq, evaluation: Evaluation):
"ToExpression[seq__]"
# Organise Arguments
py_seq = seq.get_sequence()
if len(py_seq) == 1:
(inp, form, head) = (py_seq[0], SymbolInputForm, None)
elif len(py_seq) == 2:
(inp, form, head) = (py_seq[0], py_seq[1], None)
elif len(py_seq) == 3:
(inp, form, head) = (py_seq[0], py_seq[1], py_seq[2])
else:
assert len(py_seq) > 3 # 0 case handled by apply_empty
evaluation.message(
"ToExpression",
"argb",
"ToExpression",
Integer(len(py_seq)),
Integer1,
Integer(3),
)
return
# Apply the different forms
if form is SymbolInputForm:
if isinstance(inp, String):
# TODO: turn the below up into a function and call that.
s = inp.value
short_s = s[:15] + "..." if len(s) > 16 else s
with io.StringIO(s) as f:
f.name = """ToExpression['%s']""" % short_s
feeder = MathicsFileLineFeeder(f)
while not feeder.empty():
try:
query = parse(evaluation.definitions, feeder)
except TranslateError:
return SymbolFailed
finally:
feeder.send_messages(evaluation)
if query is None: # blank line / comment
continue
result = query.evaluate(evaluation)
else:
result = inp
else:
evaluation.message("ToExpression", "interpfmt", form)
return
# Apply head if present
if head is not None:
result = Expression(head, result).evaluate(evaluation)
return result
def eval_empty(self, evaluation: Evaluation):
"ToExpression[]"
evaluation.message(
"ToExpression", "argb", "ToExpression", Integer0, Integer1, Integer(3)
)
return
class ToString(Builtin):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/ToString.html</url>
<dl>
<dt>'ToString[$expr$]'
<dd>returns a string representation of $expr$.
<dt>'ToString[$expr$, $form$]'
<dd>returns a string representation of $expr$ in the form $form$.
</dl>
>> ToString[2]
= 2
>> ToString[2] // InputForm
= "2"
>> ToString[a+b]
= a + b
>> "U" <> 2
: String expected.
= U <> 2
>> "U" <> ToString[2]
= U2
>> ToString[Integrate[f[x],x], TeXForm]
= \\int f\\left[x\\right] \\, dx
"""
options = {
"CharacterEncoding": '"Unicode"',
"FormatType": "OutputForm",
"NumberMarks": "$NumberMarks",
"PageHeight": "Infinity",
"PageWidth": "Infinity",
"TotalHeight": "Infinity",
"TotalWidth": "Infinity",
}
summary_text = "format an expression and produce a string"
def eval_default(self, value, evaluation: Evaluation, options: dict):
"ToString[value_, OptionsPattern[ToString]]"
return self.eval_form(value, SymbolOutputForm, evaluation, options)
def eval_form(self, expr, form, evaluation: Evaluation, options: dict):
"ToString[expr_, form_, OptionsPattern[ToString]]"
encoding = options["System`CharacterEncoding"]
return eval_ToString(expr, form, encoding.value, evaluation)
class Transliterate(Builtin):
"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/Transliterate.html</url>
<dl>
<dt>'Transliterate[$s$]'
<dd>transliterates a text in some script into an ASCII string.
</dl>
ASCII translateration examples:
<ul>
<li><url>:Russian language: https://en.wikipedia.org/wiki/Russian_language#Transliteration</url>
<li><url>:Hiragana: https://en.wikipedia.org/wiki/Hiragana#Table_of_hiragana</url>
</ul>
"""
# Causes XeTeX to barf. Put this inside a unit test.
# >> Transliterate["つかう"]
# = tsukau
# >> Transliterate["Алекса́ндр Пу́шкин"]
# = Aleksandr Pushkin
# > Transliterate["μήτηρ γάρ τέ μέ φησι θεὰ Θέτις ἀργυρόπεζα"]
# = meter gar te me phesi thea Thetis arguropeza
requires = ("unidecode",)
summary_text = "transliterate an UTF string in different alphabets to ASCII"
def eval(self, s, evaluation: Evaluation):
"Transliterate[s_String]"
from unidecode import unidecode
return String(unidecode(s.value))
class Whitespace(Builtin):
r"""
<url>
:WMA link:
https://reference.wolfram.com/language/ref/Whitespace.html</url>
<dl>
<dt>'Whitespace'
<dd>represents a sequence of whitespace characters.
</dl>
>> StringMatchQ["\r \n", Whitespace]
= True
>> StringSplit["a \n b \r\n c d", Whitespace]
= {a, b, c, d}
>> StringReplace[" this has leading and trailing whitespace \n ", (StartOfString ~~ Whitespace) | (Whitespace ~~ EndOfString) -> ""] <> " removed" // FullForm
= "this has leading and trailing whitespace removed"
"""
summary_text = "sequence of whitespace characters" | PypiClean |
/ChatSearch-2023.4.25.9.51.41-py3-none-any.whl/chatllm/utils.py | import torch
from transformers import AutoTokenizer, AutoModel, AutoConfig
from meutils.pipe import *
def auto_configure_device_map(num_gpus: int) -> Dict[str, int]:
# transformer.word_embeddings 占用1层
# transformer.final_layernorm 和 lm_head 占用1层
# transformer.layers 占用 28 层
# 总共30层分配到num_gpus张卡上
num_trans_layers = 28
per_gpu_layers = 30 / num_gpus
# bugfix: 在linux中调用torch.embedding传入的weight,input不在同一device上,导致RuntimeError
# windows下 model.device 会被设置成 transformer.word_embeddings.device
# linux下 model.device 会被设置成 lm_head.device
# 在调用chat或者stream_chat时,input_ids会被放到model.device上
# 如果transformer.word_embeddings.device和model.device不同,则会导致RuntimeError
# 因此这里将transformer.word_embeddings,transformer.final_layernorm,lm_head都放到第一张卡上
device_map = {'transformer.word_embeddings': 0,
'transformer.final_layernorm': 0, 'lm_head': 0}
used = 2
gpu_target = 0
for i in range(num_trans_layers):
if used >= per_gpu_layers:
gpu_target += 1
used = 0
assert gpu_target < num_gpus
device_map[f'transformer.layers.{i}'] = gpu_target
used += 1
return device_map
def llm_load(model_name_or_path="THUDM/chatglm-6b", device='cpu', device_map: Optional[Dict[str, int]] = None,
**kwargs):
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_name_or_path, trust_remote_code=True, **kwargs)
if torch.cuda.is_available() and device.lower().startswith("cuda"):
# 根据当前设备GPU数量决定是否进行多卡部署
num_gpus = torch.cuda.device_count()
if num_gpus < 2 and device_map is None:
model = model.half().cuda()
else:
from accelerate import dispatch_model
# 可传入device_map自定义每张卡的部署情况
if device_map is None:
device_map = auto_configure_device_map(num_gpus)
model = dispatch_model(model, device_map=device_map).half().cuda()
else:
model = model.float().to(device)
return model.eval(), tokenizer
def llm_load4chat(model_name_or_path="THUDM/chatglm-6b", device='cpu', stream=True, **kwargs):
model, tokenizer = llm_load(model_name_or_path, device, **kwargs)
if stream:
return partial(model.stream_chat, tokenizer=tokenizer)
else:
return partial(model.chat, tokenizer=tokenizer)
if __name__ == '__main__':
a = llm_load("/Users/betterme/PycharmProjects/AI/CHAT_MODEL/chatglm") | PypiClean |
/ModelicaLanguage-0.0.0a6-py3-none-any.whl/modelicalang/types/primitive.py | __all__ = (
# base type
"PrimitiveModelicaObject",
# concrete types
"PrimitiveReal",
"PrimitiveInteger",
"PrimitiveBoolean",
"PrimitiveString",
)
import numpy
import enum
from .. import util
from .abc import AbstractModelicaScalarClass
RealType = numpy.double
IntegerType = numpy.intc
StringType = str
class ScalarNumberMeta(
AbstractModelicaScalarClass,
):
def __new__(mtcls, name, bases, namespace):
base_class_forward = util.Forward()
def newfunc(cls, *args, **kwrds):
with base_class_forward as base_class:
self = super(base_class, cls).__new__(
cls, *args, **kwrds
)
if not isinstance(self, cls):
raise ValueError(
f"{self}: {type(self)} is not instance of {cls}"
)
return self
namespace["__new__"] = newfunc
cls = base_class_forward << (
super(ScalarNumberMeta, mtcls).__new__(
mtcls,
name,
bases,
namespace,
)
)
return cls
class ModelicaEnumClassMeta(
enum.EnumMeta,
AbstractModelicaScalarClass,
):
def __getitem__(cls, indices):
if isinstance(indices, str):
return enum.EnumMeta.__getitem__(
cls, indices,
)
else:
return AbstractModelicaScalarClass.__getitem__(
cls, indices,
)
class PrimitiveModelicaObject(
metaclass=AbstractModelicaScalarClass,
):
pass
@PrimitiveModelicaObject.register
class PrimitiveReal(
RealType,
metaclass=ScalarNumberMeta
):
pass
@PrimitiveModelicaObject.register
class PrimitiveInteger(
IntegerType,
metaclass=ScalarNumberMeta,
):
pass
class PrimitiveBooleanMeta(
ModelicaEnumClassMeta,
):
def __call__(cls, value):
return super().__call__(bool(value))
@PrimitiveModelicaObject.register
class PrimitiveBoolean(
enum.Enum,
metaclass=PrimitiveBooleanMeta,
):
true = True
false = False
def __bool__(self):
return self.value
def __eq__(self, other):
return bool(self) == other
def __repr__(self):
return self.__str__()
def __format__(self, format_spec):
value = "true" if self else "false"
return f"{value:{format_spec}}"
class PrimitiveString(
str,
PrimitiveModelicaObject,
):
def __format__(self, format_spec):
replaced = util.replace_all(
self,
[
("\\", r"\\"),
('\"', r'\"'),
("\a", r"\a"),
("\b", r"\b"),
("\f", r"\f"),
("\n", r"\n"),
("\t", r"\t"),
("\v", r"\v"),
],
)
double_quoted = f'"{replaced}"'
return f"{double_quoted:{format_spec}}" | PypiClean |
/MikeT_messenger_server-0.4.1.tar.gz/MikeT_messenger_server-0.4.1/server/server/config_window.py | import os
from PyQt5.QtWidgets import QDialog, QLabel, QLineEdit, QPushButton, QFileDialog, QMessageBox
from PyQt5.QtCore import Qt
class ConfigWindow(QDialog):
"""Класс окно настроек."""
def __init__(self, config):
super().__init__()
self.config = config
self.initUI()
def initUI(self):
"""Настройки окна"""
self.setFixedSize(365, 260)
self.setWindowTitle('Настройки сервера')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
self.db_path_label = QLabel('Путь до файла базы данных: ', self)
self.db_path_label.move(10, 10)
self.db_path_label.setFixedSize(240, 15)
self.db_path = QLineEdit(self)
self.db_path.setFixedSize(250, 20)
self.db_path.move(10, 30)
self.db_path.setReadOnly(True)
self.db_path_select = QPushButton('Обзор...', self)
self.db_path_select.move(275, 28)
self.db_file_label = QLabel('Имя файла базы данных: ', self)
self.db_file_label.move(10, 68)
self.db_file_label.setFixedSize(180, 15)
self.db_file = QLineEdit(self)
self.db_file.move(200, 66)
self.db_file.setFixedSize(150, 20)
self.port_label = QLabel('Номер порта для соединений:', self)
self.port_label.move(10, 108)
self.port_label.setFixedSize(180, 15)
self.port = QLineEdit(self)
self.port.move(200, 108)
self.port.setFixedSize(150, 20)
self.ip_label = QLabel('С какого IP принимаем соединения:', self)
self.ip_label.move(10, 148)
self.ip_label.setFixedSize(180, 15)
self.ip_label_note = QLabel(
' оставьте это поле пустым, чтобы\n '
'принимать соединения с любых адресов.',
self)
self.ip_label_note.move(10, 168)
self.ip_label_note.setFixedSize(500, 30)
self.ip = QLineEdit(self)
self.ip.move(200, 148)
self.ip.setFixedSize(150, 20)
self.save_btn = QPushButton('Сохранить', self)
self.save_btn.move(190, 220)
self.close_button = QPushButton('Закрыть', self)
self.close_button.move(275, 220)
self.close_button.clicked.connect(self.close)
self.db_path_select.clicked.connect(self.open_file_dialog)
self.show()
self.db_path.insert(self.config['SETTINGS']['Database_path'])
self.db_file.insert(self.config['SETTINGS']['Database_file'])
self.port.insert(self.config['SETTINGS']['Default_port'])
self.ip.insert(self.config['SETTINGS']['Listen_Address'])
self.save_btn.clicked.connect(self.save_server_config)
def open_file_dialog(self):
"""Метод обработчик открытия окна выбора папки."""
global dialog
dialog = QFileDialog(self)
path = dialog.getExistingDirectory()
path = path.replace('/', '\\')
self.db_path.clear()
self.db_path.insert(path)
def save_server_config(self):
"""
Метод сохранения настроек.
Проверяет правильность введённых данных и
если всё правильно сохраняет ini файл.
"""
global config_window
message = QMessageBox()
self.config['SETTINGS']['Database_path'] = self.db_path.text()
self.config['SETTINGS']['Database_file'] = self.db_file.text()
try:
port = int(self.port.text())
except ValueError:
message.warning(self, 'Ошибка', 'Порт должен быть числом')
else:
self.config['SETTINGS']['Listen_Address'] = self.ip.text()
if 1023 < port < 65536:
self.config['SETTINGS']['Default_port'] = str(port)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, '..')
with open(f"{dir_path}/{'server.ini'}", 'w') as conf:
self.config.write(conf)
message.information(
self, 'OK', 'Настройки успешно сохранены!')
else:
message.warning(
self, 'Ошибка', 'Порт должен быть от 1024 до 65536') | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/crm/model/encoding_enum.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
from MergePythonSDK.shared.model_utils import MergeEnumType
class EncodingEnum(ModelNormal, MergeEnumType):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'RAW': "RAW",
'BASE64': "BASE64",
'GZIP_BASE64': "GZIP_BASE64",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
defined_types = {
'value': (str,),
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, value, *args, **kwargs): # noqa: E501
"""EncodingEnum - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, value, *args, **kwargs): # noqa: E501
"""EncodingEnum - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value | PypiClean |
/KTC_API-1.1.0.tar.gz/KTC_API-1.1.0/src/ktc_api/__init__.py | from typing import List
from requests import Session
from pydantic import parse_obj_as
from .types import ActualVersion, Branch, Course, Timetable, Teachers, TeacherTimetable, News, Post, Grade
class KTCClient:
"""Provides working around KTC API
"""
API_URL = 'http://mob.kansk-tc.ru'
def __init__(self):
self.session = Session()
def actual_version(self) -> ActualVersion:
return parse_obj_as(ActualVersion, self.session.get(
f'{self.API_URL}/ktc-api/actual-version'
).json())
def branches(self) -> List[Branch]:
return parse_obj_as(List[Branch], self.session.get(
f'{self.API_URL}/ktc-api/branches'
).json())
def courses(self, branch_id: int = 1) -> List[Course]:
return parse_obj_as(List[Course], self.session.get(
f'{self.API_URL}/ktc-api/courses/{branch_id}'
).json())
def news(self) -> News:
return parse_obj_as(News, self.session.get(
f'{self.API_URL}/ktc-api/news/'
).json())
def news_by_id(self, nid: int) -> Post:
return parse_obj_as(Post, self.session.get(
f'{self.API_URL}/ktc-api/news/id{nid}'
).json())
def teachers_list(self) -> Teachers:
return parse_obj_as(Teachers, self.session.get(
f'{self.API_URL}/ktc-api/teachers-list'
).json())
def teacher_timetable(self, branch_id: int, teacher_id: int) -> TeacherTimetable:
return parse_obj_as(TeacherTimetable, self.session.get(
f'{self.API_URL}/ktc-api/teacher-timetable/{branch_id}/{teacher_id}'
).json())
def timetable(self, group_id: int, week: int = 0) -> Timetable:
return parse_obj_as(Timetable, self.session.get(
f'{self.API_URL}/ktc-api/timetable/{group_id}/{week}'
).json())
def grades(self, username: str, password: str) -> List[Grade]:
return parse_obj_as(List[Grade], self.session.get(
f'{self.API_URL}/ktc-api/pro/grades',
params={
'username': username,
'password': password
}
).json()) | PypiClean |
/CodeViking.Collections-0.10.tar.gz/CodeViking.Collections-0.10/codeviking/collections/dict/_multidict.py | from collections import namedtuple, Mapping, MutableMapping
__all__ = ['FrozenMultiDict', 'MultiDict']
class MultiDictBase(Mapping):
'''An abstract source. This is a read-only dict-like object that maps key
strings to a user-defined tuple of objects. The length of the tuple is the
same for all ids. Each element of the tuple is called a "part", and each
part is stored in a separate Mapping.
'''
def __init__(self, parts):
'''
:param parts: (name, Mapping) tuples
:type parts: sequence of (str, Mapping)
'''
self._order = [name for (name, _) in parts]
self._parts = {name: part for (name, part) in parts}
self.itype = namedtuple(self.__class__.__name__ + '_itype',
' '.join(self._order))
self._keys = set()
for p in self._parts.values():
self._keys.update(p.keys())
def get_part(self, part_name, key):
'''get one tuple part associated with an key.
:param key: the key to look up.
:type key: str
:return: the part of the tupleassociated with the given key.
'''
return self._parts[part_name][key]
def __len__(self):
return len(self._keys)
def __iter__(self):
for k in self.keys():
yield k
def __contains__(self, key):
return key in self._keys
def __eq__(self, other):
sk = self._keys
ok = set(other.keys())
if len(sk.difference(ok)) > 0:
return False
for k, v in self.items():
if v != other[k]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, key):
result = []
for n in self._order:
try:
part = self.get_part(n, key)
except KeyError:
raise KeyError(repr(key))
result.append(part)
return self.itype(*result)
def keys(self):
for k in self._keys:
yield k
def items(self):
for k in self.keys():
yield (k, self.get(k))
def values(self):
for k in self.keys():
yield self.get(k)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
class FrozenMultiDict(MultiDictBase):
'''A read-only MultiDict'''
def __init__(self, parts):
super(FrozenMultiDict, self).__init__(parts)
self._h = None
def __hash__(self):
if self._h is None:
self._h = hash(frozenset(self.items()))
return self._h
class MultiDict(MultiDictBase, MutableMapping):
'''A read-write version of ItemSource
'''
def __setitem__(self, key, value):
self._keys.add(key)
for i, n in enumerate(self._order):
self._put_part(n, key, value[i])
def __delitem__(self, key):
for n in self._order:
self._del_part(n, key)
self._keys.remove(key)
def _put_part(self, part_name, key, value):
'''Put an value into a part.
:param part_name: part_name to use for storage
:type part_name: str
:param key: the key to associate with this value
:param value: the value to store
:type value: a tuple with the same length as the number of writers,
each tuple element must be appropriate for the associated writer.
'''
self._parts[part_name][key] = value
def _del_part(self, part_name, key):
'''delete a key from a part.
:param part_name: part_name to use for storage
:type part_name: str
:param key: the key to delete
'''
del self._parts[part_name][key] | PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/deployment/setuphandlers.py | from euphorie.content.passwordpolicy import EuphoriePasswordPolicy
from euphorie.content.utils import REGION_NAMES
from plone import api
from plone.app.layout.navigation.interfaces import INavigationRoot
from plone.dexterity.utils import createContentInContainer
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import _createObjectByType
from Products.PlonePAS.plugins.passwordpolicy import PasswordPolicyPlugin
from Products.PluggableAuthService.interfaces.plugins import IValidationPlugin
from zope.interface import alsoProvides
import logging
log = logging.getLogger(__name__)
def setupVarious(context):
site = api.portal.get()
disableRedirectTracking(site)
setupInitialContent(site)
setupVersioning(site)
registerPasswordPolicy(site)
setupSecureSessionCookie(site)
COUNTRIES = {
"at": ("Austria", "eu-member"),
"be": ("Belgium", "eu-member"),
"bg": ("Bulgaria", "eu-member"),
"se": ("Sweden", "eu-member"),
"cy": ("Cyprus", "eu-member"),
"cz": ("The Czech Republic", "eu-member"),
"de": ("Germany", "eu-member"),
"dk": ("Denmark", "eu-member"),
"ee": ("Estonia", "eu-member"),
"es": ("Spain", "eu-member"),
"fi": ("Finland", "eu-member"),
"fr": ("France", "eu-member"),
"gb": ("The United Kingdom", "eu-member"),
"gr": ("Greece", "eu-member"),
"hu": ("Hungary", "eu-member"),
"ie": ("Ireland", "eu-member"),
"it": ("Italy", "eu-member"),
"lt": ("Lithuania", "eu-member"),
"lu": ("Luxembourg", "eu-member"),
"lv": ("Latvia", "eu-member"),
"mt": ("Malta", "eu-member"),
"nl": ("The Netherlands", "eu-member"),
"pl": ("Poland", "eu-member"),
"pt": ("Portugal", "eu-member"),
"ro": ("Romania", "eu-member"),
"sk": ("Slovakia", "eu-member"),
"si": ("Slovenia", "eu-member"),
"li": ("Liechtenstein", "efta"),
"no": ("Norway", "efta"),
"ch": ("Switzerland", "efta"),
"hr": ("Republic of Croatia", "candidate-eu"),
"is": ("Republic of Iceland", "candidate-eu"),
"mk": ("F.Y.R. Macedonia", "candidate-eu"),
"me": ("Montenegro", "candidate-eu"),
"tr": ("Republic of Turkey", "candidate-eu"),
"al": ("Republic of Albania", "potential-candidate-eu"),
"ba": ("Bosnia and Herzegovina", "potential-candidate-eu"),
"cs": ("Kosovo", "potential-candidate-eu"),
"rs": ("Republic of Serbia", "potential-candidate-eu"),
}
for i in REGION_NAMES.items():
COUNTRIES[i[0]] = (i[1], "region")
def setupInitialContent(site):
present = site.objectIds()
for obj in ["Members", "events", "news"]:
if obj in present:
site.manage_delObjects([obj])
log.info("Removed default Plone %s folder", obj)
if "sectors" not in present:
site.invokeFactory(
"euphorie.sectorcontainer",
"sectors",
title="Surveys",
)
mt = getToolByName(site, "portal_membership")
mt.setMembersFolderById("sectors")
log.info("Added sectors folder")
sectors = site.sectors
for country_id, info in COUNTRIES.items():
(title, country_type) = info
if country_id not in sectors:
sectors.invokeFactory(
"euphorie.country",
country_id,
title=title,
country_type=country_type,
)
log.info("Added country %s (%s)", country_id, title)
country = sectors[country_id]
if "help" not in country:
createContentInContainer(
country,
"euphorie.page",
id="help",
title="Help",
checkConstraints=False,
)
log.info("Added help section for country %s (%s)", country_id, title)
help = country["help"]
if not INavigationRoot.providedBy(help):
alsoProvides(help, INavigationRoot)
log.info(
"Made help for country %s (%s) a navigation root.",
country_id,
title,
)
if "client" not in present:
site.invokeFactory("euphorie.client", "client", title="Client")
api.content.transition(site.client, to_state="published")
log.info("Added Euphorie client instance")
if "documents" not in present:
site.invokeFactory("euphorie.folder", "documents", title="Documents")
log.info("Added documents folder")
documents = site.documents
if not INavigationRoot.providedBy(documents):
alsoProvides(documents, INavigationRoot)
log.info("Made documentation folder a navigation root.")
lt = getToolByName(site, "portal_languages")
present_languages = documents.objectIds()
for code, name in lt.listSupportedLanguages():
if code not in present_languages:
documents.invokeFactory(
"euphorie.documentation",
code,
title=name,
)
log.info("Added documentation folder for %s (%s)", name, code)
docs = documents[code]
if "help" not in docs:
createContentInContainer(
docs,
"euphorie.help",
id="help",
checkConstraints=False,
)
log.info("Added online help text for language %s (%s)", name, code)
if "appendix" not in docs:
_createObjectByType(
"euphorie.page",
docs,
"appendix",
title="Appendix",
)
log.info("Added appendix folder for language %s (%s)", name, code)
def disableRedirectTracking(site):
# Add additional setup code here
from plone.app.redirector.interfaces import IRedirectionStorage
from zope.component import getSiteManager
from zope.interface.interfaces import IComponentRegistry
sm = getSiteManager(site)
if sm is None or not IComponentRegistry.providedBy(sm):
log.warning(
"Failed to find a site manager, can not remove "
"IRedirectionStorage utility"
)
return
sm.unregisterUtility(provided=IRedirectionStorage)
def setupVersioning(site):
repository = site.portal_repository
if "euphorie.survey" not in repository.getVersionableContentTypes():
repository.setVersionableContentTypes(["euphorie.survey"])
log.info("Enabled versioning for survey versions.")
def registerPasswordPolicy(site):
pas = api.portal.get_tool("acl_users")
# Deactivate the default policy
for oid in pas.objectIds([PasswordPolicyPlugin.meta_type]):
if oid in pas.plugins._getPlugins(IValidationPlugin):
pas.plugins.deactivatePlugin(
IValidationPlugin,
oid,
)
# Activate the Euphorie policy
if not pas.objectIds([EuphoriePasswordPolicy.meta_type]):
plugin = EuphoriePasswordPolicy(
EuphoriePasswordPolicy.id,
EuphoriePasswordPolicy.meta_type,
)
pas._setObject(plugin.getId(), plugin)
plugin = getattr(pas, plugin.getId())
infos = [
info
for info in pas.plugins.listPluginTypeInfo()
if plugin.testImplements(info["interface"])
]
plugin.manage_activateInterfaces([info["id"] for info in infos])
for info in infos:
for i in range(len(pas.plugins.listPluginIds(info["interface"]))):
pas.plugins.movePluginsUp(
info["interface"],
[plugin.getId()],
)
def setupSecureSessionCookie(site):
session = api.portal.get_tool("acl_users").get("session")
if not session:
return
if not session.secure:
session.secure = True | PypiClean |
/Lentil-0.7.0.tar.gz/Lentil-0.7.0/lentil/wavefront.py | import copy
import numpy as np
import lentil.field
from lentil.field import Field
import lentil.fourier
import lentil.helper
class Wavefront:
"""A class representing a monochromatic wavefront.
Parameters
----------
wavelength : float
Wavelength in meters
pixelscale : float, optional
Physical sampling of wavefront
shape : (2,) array_like, optional
Wavefront shape. If ``shape`` is None (default), the wavefront is
assumed to be infinite (broadcastable to any shape).
data : list_like, optional
Wavefront data. Default is [1+0j] (a plane wave).
focal_length : float or np.inf
Wavefront focal length. A plane wave (default) has an infinite focal
length (``np.inf``).
"""
__slots__ = ('wavelength', '_pixelscale', 'focal_length',
'data', 'shape', 'planetype')
def __init__(self, wavelength, pixelscale=None, shape=None, planetype=None,
data=None, focal_length=None):
self.wavelength = wavelength
self._pixelscale = () if pixelscale is None else lentil.sanitize_shape(pixelscale)
self.shape = () if shape is None else shape
if data is None:
self.data = [Field(data=1, pixelscale=pixelscale, offset=[0, 0], tilt=[])]
else:
self.data = [*data]
self.focal_length = focal_length if focal_length else np.inf
self.planetype = planetype
def __mul__(self, plane):
return plane.multiply(self, inplace=False)
def __imul__(self, plane):
return plane.multiply(self, inplace=True)
def __rmul__(self, other):
return self.__mul__(other)
@property
def pixelscale(self):
"""Physical sampling of the wavefront"""
return self._pixelscale
@pixelscale.setter
def pixelscale(self, value):
self._pixelscale = lentil.sanitize_shape(value)
@property
def field(self):
"""Wavefront complex field"""
out = np.zeros(self.shape, dtype=complex)
for field in self.data:
out = lentil.field.insert(field, out)
return out
@property
def intensity(self):
"""Wavefront intensity"""
out = np.zeros(self.shape, dtype=float)
for field in lentil.field.reduce(*self.data):
out = lentil.field.insert(field, out, intensity=True)
return out
def copy(self):
return copy.deepcopy(self)
def insert(self, out, weight=1):
"""Directly insert wavefront intensity data into an output array.
This method can avoid repeatedly allocating large arrays of zeros
when accumulating :attr:`intensity`.
Parameters
----------
out : ndarray
Array to insert wavefront data into
weight : float
Scale factor applied to wavefront data
Returns
-------
out : ndarray
Array with wavefront data inserted into it at the appropriate location
"""
for field in lentil.field.reduce(*self.data):
out = lentil.field.insert(field, out, intensity=True, weight=weight)
return out
def propagate_image(self, pixelscale, npix, npix_prop=None, oversample=2,
inplace=True):
"""Propagate the Wavefront from a Pupil to an Image plane using
Fraunhofer diffraction.
Parameters
----------
pixelscale : float or (2,) float
Physical sampling of output (image) plane. If a single value is supplied,
the output is assumed to be uniformly sampled in both x and y.
npix : int or (2,) tuple of ints
Shape of output plane.
npix_prop : int or (2,) tuple of ints, optional
Shape of propagation output plane. If None (default),
``npix_prop = npix``. If ``npix_prop != npix``, the propagation
result is placed in the appropriate location in the output plane.
npix_prop cannot be larger than npix.
oversample : int, optional
Number of times to oversample the output plane. Default is 2.
inplace : bool, optional
If True (default) the wavefront is propagated in-place, otherwise
a copy is created and propagated.
Returns
-------
wavefront : :class:`~lentil.Wavefront`
A Wavefront propagated to the specified image plane
"""
if self.planetype != 'pupil':
raise ValueError("Wavefront must have planetype 'pupil'")
npix = np.asarray(lentil.sanitize_shape(npix))
npix_prop = npix if npix_prop is None else np.asarray(lentil.sanitize_shape(npix_prop))
prop_shape = npix_prop * oversample
dx = self.pixelscale
du = np.asarray(lentil.sanitize_shape(pixelscale))
z = self.focal_length
data = self.data
if inplace:
out = self
out.data = []
out.pixelscale = du / oversample
out.shape = npix * oversample
out.focal_length = np.inf
out.planetype = 'image'
else:
out = Wavefront(wavelength=self.wavelength, data=[],
pixelscale=du/oversample, shape=npix*oversample,
planetype='image')
for field in data:
# compute the field shift from any embedded tilts. note the return value
# is specified in terms of (r, c)
shift = field.shift(z=z, wavelength=self.wavelength,
pixelscale=du, oversample=oversample,
indexing='ij')
fix_shift = np.fix(shift)
dft_shift = shift - fix_shift
if _overlap(prop_shape, fix_shift, out.shape):
alpha = lentil.helper.dft_alpha(dx=dx, du=du,
wave=self.wavelength, z=z,
oversample=oversample)
data = lentil.fourier.dft2(f=field.data, alpha=alpha,
npix=prop_shape, shift=dft_shift,
offset=field.offset, unitary=True)
out.data.append(Field(data=data, pixelscale=du/oversample,
offset=fix_shift))
return out
def _overlap(field_shape, field_shift, output_shape):
# Return True if there's any overlap between a shifted field and the
# output shape
output_shape = np.asarray(output_shape)
field_shape = np.asarray(field_shape)
field_shift = np.asarray(field_shift)
# Output coordinates of the upper left corner of the shifted data array
field_shifted_ul = (output_shape / 2) - (field_shape / 2) + field_shift
if field_shifted_ul[0] > output_shape[0]:
return False
if field_shifted_ul[0] + field_shape[0] < 0:
return False
if field_shifted_ul[1] > output_shape[1]:
return False
if field_shifted_ul[1] + field_shape[1] < 0:
return False
return True | PypiClean |
/Brainfeatures-0.0.4.tar.gz/Brainfeatures-0.0.4/brainfeatures/feature_generation/frequency_feature_generator.py | import numpy as np
from brainfeatures.feature_generation.abstract_feature_generator import (
AbstractFeatureGenerator)
from brainfeatures.feature_generation import features_frequency
class FrequencyFeatureGenerator(AbstractFeatureGenerator):
""" computes features in the frequency domain implemented in features_
frequency using fourier transform """
def get_feature_labels(self):
"""
:return: list of feature labels of the form
<fft>_<feature>_<lower-upperHz>_<channel>
"""
feature_labels = []
for freq_feat in self.freq_feats:
freq_feat = freq_feat.replace("_", "-")
for band_id, band in enumerate(self.bands):
lower, upper = band
for electrode in self.electrodes:
label = '_'.join([
self.domain,
freq_feat,
str(lower) + '-' + str(upper) + 'Hz',
str(electrode)])
feature_labels.append(label)
return feature_labels
def convert_with_fft(self, weighted_epochs):
epochs_amplitudes = np.abs(np.fft.rfft(weighted_epochs, axis=2))
epochs_amplitudes /= weighted_epochs.shape[-1]
return epochs_amplitudes
def generate_features(self, weighted_epochs):
""" computes all frequency domain features as implemented in module
features_frequency
:param weighted_epochs: ndarray with split eeg data weighted by a
window function in shape of n_eochs x n_elecs x n_samples_in_epoch
:return: ndarray of features in shape [n_epochs x] n_elecs x n_bands x
n_freq_features
"""
(n_epochs, n_elecs, n_samples_in_epoch) = weighted_epochs.shape
epochs_psds = self.convert_with_fft(weighted_epochs)
freq_bin_size = self.sfreq / n_samples_in_epoch
freqs = np.fft.fftfreq(int(n_samples_in_epoch), 1. / self.sfreq)
# extract frequency bands and generate features
# n_epochs x n_elecs x n_bands x n_feats
freq_feats = np.ndarray(shape=(n_epochs, len(self.freq_feats),
len(self.bands), n_elecs))
for freq_feat_id, freq_feat_name in enumerate(self.freq_feats):
# assumes that "power" feature was already computed. which should
# be the case, since features are iterated alphabetically
if freq_feat_name == "power_ratio":
powers = freq_feats[:, self.freq_feats.index("power"), :, :]
# divide the power by the sum of powers in each band to gain
# power ratio feature
func = getattr(features_frequency, freq_feat_name)
ratio = func(powers, axis=-2)
freq_feats[:, freq_feat_id, :, :] = ratio
# assumes that "ratio" feature was already computed. which should
# be the case, since features are iterated alphabetically
elif freq_feat_name == "spectral_entropy":
func = getattr(features_frequency, freq_feat_name)
ratios = freq_feats[:, self.freq_feats.index("power_ratio"),:,:]
spec_entropy = func(ratios)
freq_feats[:, freq_feat_id, :, :] = spec_entropy
else:
func = getattr(features_frequency, freq_feat_name)
# amplitudes shape: epochs x electrodes x frequencies
band_psd_features = np.ndarray(shape=(n_epochs, len(self.bands),
n_elecs))
for band_id, (lower, upper) in enumerate(self.bands):
lower_bin, upper_bin = (int(lower / freq_bin_size),
int(upper / freq_bin_size))
# if upper_bin corresponds to nyquist frequency or higher,
# take last available frequency
if upper_bin >= len(freqs):
upper_bin = len(freqs) - 1
band_psds = np.take(epochs_psds,
range(lower_bin, upper_bin), axis=-1)
band_psd_features[:, band_id, :] = func(band_psds, axis=-1)
freq_feats[:, freq_feat_id, :, :] = band_psd_features
freq_feats = freq_feats.reshape(n_epochs, -1)
# aggregate over the dimension of epochs
if self.agg_mode:
freq_feats = self.agg_mode(freq_feats, axis=0)
return freq_feats
def __init__(self, elecs, agg, bands, sfreq, domain="fft"):
super(FrequencyFeatureGenerator, self).__init__(
domain=domain, electrodes=elecs, agg_mode=agg)
self.freq_feats = sorted([
feat_func
for feat_func in dir(features_frequency)
if not feat_func.startswith('_')])
self.bands = bands
self.sfreq = sfreq | PypiClean |
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/docs/api/cmdline.rst | ## Command Line Options
This package includes a module called `appd.cmdline` that provides a simple command-line parser for use
in your scripts. You're not required to use it, but it allows you to point your script at different controllers
without making any code changes, and if you use it consistently, your scripts will all have a common
command-line syntax, which is nice. It supports the following options:
- **-c** or **--url** for the controller URL. Required.
- **-a** or **--account** for the account name. Optional and defaults to "customer1", which is the account
name on single-tenant controllers.
- **-u** or **--username** for the user name. Required.
- **-p** or **--password** for the password. Required.
- **-v** or **--verbose** will print out the URLs before they are retrieved.
- **-h** or **--help** will display a summary of the command line options.
The example scripts all use the parser, so you can look at their source to see how to use it.
| PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/app-layout/app-drawer/README.md | ##<app-drawer>
![app-drawer]
(http://app-layout-assets.appspot.com/assets/docs/app-drawer/drawer.gif)
app-drawer is a navigation drawer that can slide in from the left or right.
Example:
Align the drawer at the start, which is left in LTR layouts (default):
```html
<app-drawer opened></app-drawer>
```
Align the drawer at the end:
```html
<app-drawer align="end" opened></app-drawer>
```
To make the contents of the drawer scrollable, create a wrapper for the scroll
content, and apply height and overflow styles to it.
```html
<app-drawer>
<div style="height: 100%; overflow: auto;"></div>
</app-drawer>
```
### Styling
Custom property | Description | Default
---------------------------------|----------------------------------------|--------------------
`--app-drawer-width` | Width of the drawer | 256px
`--app-drawer-content-container` | Mixin for the drawer content container | {}
`--app-drawer-scrim-background` | Background for the scrim | rgba(0, 0, 0, 0.5)
| PypiClean |
/GNN4LP-0.1.0-py3-none-any.whl/src/vgae/model.py | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class GraphConvolution(nn.Module):
def __init__(self, input_dim, output_dim, dropout, bias=False):
super(GraphConvolution, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.weight = Parameter(torch.FloatTensor(input_dim, output_dim))
self.reset_parameters()
if bias:
self.bias = Parameter(torch.FloatTensor(output_dim))
nn.init.zeros_(self.bias)
else:
self.register_parameter('bias', None)
self.dropout = nn.Dropout(dropout)
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
def forward(self, input, adj):
# inputs: (N, n_channels), adj: sparse_matrix (N, N)
input = self.dropout(input)
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
output = output + self.bias
return output
class GCNModelVAE(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout, vae_bool=True):
super(GCNModelVAE, self).__init__()
self.vae_bool = vae_bool
self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout)
self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout)
self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout)
self.ip = InnerProductDecoder(dropout)
self.relu = nn.ReLU()
def encode(self, input, adj):
hidden1 = self.relu(self.gc1(input, adj))
return self.gc2(hidden1, adj), self.gc3(hidden1, adj)
def reparameterize(self, mu, logvar):
if self.vae_bool:
std = torch.exp(logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu) # 乘std加mu
else:
return mu
def forward(self, input, adj):
mu, logvar = self.encode(input, adj) #两个GCN分别得到mean和std
z = self.reparameterize(mu, logvar) #得到z
return self.ip(z), mu, logvar
class InnerProductDecoder(nn.Module):
'''
内积用来做decoder,用来生成邻接矩阵
'''
def __init__(self, dropout):
super(InnerProductDecoder, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, z):
z = self.dropout(z)
adj = torch.mm(z, z.t())
return adj | PypiClean |
/Bis-Miner-3.11.1.tar.gz/Bis-Miner-3.11.0/Orange/canvas/canvas/scene.py | import logging
import itertools
from operator import attrgetter
from xml.sax.saxutils import escape
from AnyQt.QtWidgets import QGraphicsScene, QGraphicsItem
from AnyQt.QtGui import QPainter, QColor, QFont
from AnyQt.QtCore import (
Qt, QPointF, QRectF, QSizeF, QLineF, QBuffer, QObject, QSignalMapper,
QT_VERSION
)
from AnyQt.QtSvg import QSvgGenerator
from AnyQt.QtCore import pyqtSignal as Signal
try:
from AnyQt.QtCore import PYQT_VERSION
USE_PYQT = True
except ImportError:
USE_PYQT, PYQT_VERSION = False, -1
from .. import scheme
from . import items
from .layout import AnchorLayout
from .items.utils import toGraphicsObjectIfPossible
log = logging.getLogger(__name__)
class CanvasScene(QGraphicsScene):
"""
A Graphics Scene for displaying an :class:`~.scheme.Scheme` instance.
"""
#: Signal emitted when a :class:`NodeItem` has been added to the scene.
node_item_added = Signal(object)
#: Signal emitted when a :class:`NodeItem` has been removed from the
#: scene.
node_item_removed = Signal(object)
#: Signal emitted when a new :class:`LinkItem` has been added to the
#: scene.
link_item_added = Signal(object)
#: Signal emitted when a :class:`LinkItem` has been removed.
link_item_removed = Signal(object)
#: Signal emitted when a :class:`Annotation` item has been added.
annotation_added = Signal(object)
#: Signal emitted when a :class:`Annotation` item has been removed.
annotation_removed = Signal(object)
#: Signal emitted when the position of a :class:`NodeItem` has changed.
node_item_position_changed = Signal(object, QPointF)
#: Signal emitted when an :class:`NodeItem` has been double clicked.
node_item_double_clicked = Signal(object)
#: An node item has been activated (clicked)
node_item_activated = Signal(object)
#: An node item has been hovered
node_item_hovered = Signal(object)
#: Link item has been hovered
link_item_hovered = Signal(object)
def __init__(self, *args, **kwargs):
QGraphicsScene.__init__(self, *args, **kwargs)
self.scheme = None
self.registry = None
# All node items
self.__node_items = []
# Mapping from SchemeNodes to canvas items
self.__item_for_node = {}
# All link items
self.__link_items = []
# Mapping from SchemeLinks to canvas items.
self.__item_for_link = {}
# All annotation items
self.__annotation_items = []
# Mapping from SchemeAnnotations to canvas items.
self.__item_for_annotation = {}
# Is the scene editable
self.editable = True
# Anchor Layout
self.__anchor_layout = AnchorLayout()
self.addItem(self.__anchor_layout)
self.__channel_names_visible = True
self.__node_animation_enabled = True
self.user_interaction_handler = None
self.activated_mapper = QSignalMapper(self)
self.activated_mapper.mapped[QObject].connect(
lambda node: self.node_item_activated.emit(node)
)
self.hovered_mapper = QSignalMapper(self)
self.hovered_mapper.mapped[QObject].connect(
lambda node: self.node_item_hovered.emit(node)
)
self.position_change_mapper = QSignalMapper(self)
self.position_change_mapper.mapped[QObject].connect(
self._on_position_change
)
log.info("'%s' intitialized." % self)
def clear_scene(self):
"""
Clear (reset) the scene.
"""
if self.scheme is not None:
self.scheme.node_added.disconnect(self.add_node)
self.scheme.node_removed.disconnect(self.remove_node)
self.scheme.link_added.disconnect(self.add_link)
self.scheme.link_removed.disconnect(self.remove_link)
self.scheme.annotation_added.disconnect(self.add_annotation)
self.scheme.annotation_removed.disconnect(self.remove_annotation)
self.scheme.node_state_changed.disconnect(
self.on_widget_state_change
)
self.scheme.channel_state_changed.disconnect(
self.on_link_state_change
)
# Remove all items to make sure all signals from scheme items
# to canvas items are disconnected.
for annot in self.scheme.annotations:
if annot in self.__item_for_annotation:
self.remove_annotation(annot)
for link in self.scheme.links:
if link in self.__item_for_link:
self.remove_link(link)
for node in self.scheme.nodes:
if node in self.__item_for_node:
self.remove_node(node)
self.scheme = None
self.__node_items = []
self.__item_for_node = {}
self.__link_items = []
self.__item_for_link = {}
self.__annotation_items = []
self.__item_for_annotation = {}
self.__anchor_layout.deleteLater()
self.user_interaction_handler = None
self.clear()
log.info("'%s' cleared." % self)
def set_scheme(self, scheme):
"""
Set the scheme to display. Populates the scene with nodes and links
already in the scheme. Any further change to the scheme will be
reflected in the scene.
Parameters
----------
scheme : :class:`~.scheme.Scheme`
"""
if self.scheme is not None:
# Clear the old scheme
self.clear_scene()
log.info("Setting scheme '%s' on '%s'" % (scheme, self))
self.scheme = scheme
if self.scheme is not None:
self.scheme.node_added.connect(self.add_node)
self.scheme.node_removed.connect(self.remove_node)
self.scheme.link_added.connect(self.add_link)
self.scheme.link_removed.connect(self.remove_link)
self.scheme.annotation_added.connect(self.add_annotation)
self.scheme.annotation_removed.connect(self.remove_annotation)
self.scheme.node_state_changed.connect(
self.on_widget_state_change
)
self.scheme.channel_state_changed.connect(
self.on_link_state_change
)
self.scheme.topology_changed.connect(self.on_scheme_change)
for node in scheme.nodes:
self.add_node(node)
for link in scheme.links:
self.add_link(link)
for annot in scheme.annotations:
self.add_annotation(annot)
def set_registry(self, registry):
"""
Set the widget registry.
"""
# TODO: Remove/Deprecate. Is used only to get the category/background
# color. That should be part of the SchemeNode/WidgetDescription.
log.info("Setting registry '%s on '%s'." % (registry, self))
self.registry = registry
def set_anchor_layout(self, layout):
"""
Set an :class:`~.layout.AnchorLayout`
"""
if self.__anchor_layout != layout:
if self.__anchor_layout:
self.__anchor_layout.deleteLater()
self.__anchor_layout = None
self.__anchor_layout = layout
def anchor_layout(self):
"""
Return the anchor layout instance.
"""
return self.__anchor_layout
def set_channel_names_visible(self, visible):
"""
Set the channel names visibility.
"""
self.__channel_names_visible = visible
for link in self.__link_items:
link.setChannelNamesVisible(visible)
def channel_names_visible(self):
"""
Return the channel names visibility state.
"""
return self.__channel_names_visible
def set_node_animation_enabled(self, enabled):
"""
Set node animation enabled state.
"""
if self.__node_animation_enabled != enabled:
self.__node_animation_enabled = enabled
for node in self.__node_items:
node.setAnimationEnabled(enabled)
def add_node_item(self, item):
"""
Add a :class:`.NodeItem` instance to the scene.
"""
if item in self.__node_items:
raise ValueError("%r is already in the scene." % item)
if item.pos().isNull():
if self.__node_items:
pos = self.__node_items[-1].pos() + QPointF(150, 0)
else:
pos = QPointF(150, 150)
item.setPos(pos)
item.setFont(self.font())
# Set signal mappings
self.activated_mapper.setMapping(item, item)
item.activated.connect(self.activated_mapper.map)
self.hovered_mapper.setMapping(item, item)
item.hovered.connect(self.hovered_mapper.map)
self.position_change_mapper.setMapping(item, item)
item.positionChanged.connect(self.position_change_mapper.map)
self.addItem(item)
self.__node_items.append(item)
self.node_item_added.emit(item)
log.info("Added item '%s' to '%s'" % (item, self))
return item
def add_node(self, node):
"""
Add and return a default constructed :class:`.NodeItem` for a
:class:`SchemeNode` instance `node`. If the `node` is already in
the scene do nothing and just return its item.
"""
if node in self.__item_for_node:
# Already added
return self.__item_for_node[node]
item = self.new_node_item(node.description)
if node.position:
pos = QPointF(*node.position)
item.setPos(pos)
item.setTitle(node.title)
item.setProcessingState(node.processing_state)
item.setProgress(node.progress)
for message in node.state_messages():
item.setStateMessage(message)
item.setStatusMessage(node.status_message())
self.__item_for_node[node] = item
node.position_changed.connect(self.__on_node_pos_changed)
node.title_changed.connect(item.setTitle)
node.progress_changed.connect(item.setProgress)
node.processing_state_changed.connect(item.setProcessingState)
node.state_message_changed.connect(item.setStateMessage)
node.status_message_changed.connect(item.setStatusMessage)
return self.add_node_item(item)
def new_node_item(self, widget_desc, category_desc=None):
"""
Construct an new :class:`.NodeItem` from a `WidgetDescription`.
Optionally also set `CategoryDescription`.
"""
item = items.NodeItem()
item.setWidgetDescription(widget_desc)
if category_desc is None and self.registry and widget_desc.category:
category_desc = self.registry.category(widget_desc.category)
if category_desc is None and self.registry is not None:
try:
category_desc = self.registry.category(widget_desc.category)
except KeyError:
pass
if category_desc is not None:
item.setWidgetCategory(category_desc)
item.setAnimationEnabled(self.__node_animation_enabled)
return item
def remove_node_item(self, item):
"""
Remove `item` (:class:`.NodeItem`) from the scene.
"""
self.activated_mapper.removeMappings(item)
self.hovered_mapper.removeMappings(item)
self.position_change_mapper.removeMappings(item)
item.hide()
self.removeItem(item)
self.__node_items.remove(item)
self.node_item_removed.emit(item)
log.info("Removed item '%s' from '%s'" % (item, self))
def remove_node(self, node):
"""
Remove the :class:`.NodeItem` instance that was previously
constructed for a :class:`SchemeNode` `node` using the `add_node`
method.
"""
item = self.__item_for_node.pop(node)
node.position_changed.disconnect(self.__on_node_pos_changed)
node.title_changed.disconnect(item.setTitle)
node.progress_changed.disconnect(item.setProgress)
node.processing_state_changed.disconnect(item.setProcessingState)
node.state_message_changed.disconnect(item.setStateMessage)
self.remove_node_item(item)
def node_items(self):
"""
Return all :class:`.NodeItem` instances in the scene.
"""
return list(self.__node_items)
def add_link_item(self, item):
"""
Add a link (:class:`.LinkItem`) to the scene.
"""
if item.scene() is not self:
self.addItem(item)
item.setFont(self.font())
self.__link_items.append(item)
self.link_item_added.emit(item)
log.info("Added link %r -> %r to '%s'" % \
(item.sourceItem.title(), item.sinkItem.title(), self))
self.__anchor_layout.invalidateLink(item)
return item
def add_link(self, scheme_link):
"""
Create and add a :class:`.LinkItem` instance for a
:class:`SchemeLink` instance. If the link is already in the scene
do nothing and just return its :class:`.LinkItem`.
"""
if scheme_link in self.__item_for_link:
return self.__item_for_link[scheme_link]
source = self.__item_for_node[scheme_link.source_node]
sink = self.__item_for_node[scheme_link.sink_node]
item = self.new_link_item(source, scheme_link.source_channel,
sink, scheme_link.sink_channel)
item.setEnabled(scheme_link.enabled)
scheme_link.enabled_changed.connect(item.setEnabled)
if scheme_link.is_dynamic():
item.setDynamic(True)
item.setDynamicEnabled(scheme_link.dynamic_enabled)
scheme_link.dynamic_enabled_changed.connect(item.setDynamicEnabled)
item.setRuntimeState(scheme_link.runtime_state())
scheme_link.state_changed.connect(item.setRuntimeState)
self.add_link_item(item)
self.__item_for_link[scheme_link] = item
return item
def new_link_item(self, source_item, source_channel,
sink_item, sink_channel):
"""
Construct and return a new :class:`.LinkItem`
"""
item = items.LinkItem()
item.setSourceItem(source_item)
item.setSinkItem(sink_item)
def channel_name(channel):
if isinstance(channel, str):
return channel
else:
return channel.name
source_name = channel_name(source_channel)
sink_name = channel_name(sink_channel)
fmt = "<b>{0}</b> \u2192 <b>{1}</b>"
item.setToolTip(
fmt.format(escape(source_name),
escape(sink_name))
)
item.setSourceName(source_name)
item.setSinkName(sink_name)
item.setChannelNamesVisible(self.__channel_names_visible)
return item
def remove_link_item(self, item):
"""
Remove a link (:class:`.LinkItem`) from the scene.
"""
# Invalidate the anchor layout.
self.__anchor_layout.invalidateAnchorItem(
item.sourceItem.outputAnchorItem
)
self.__anchor_layout.invalidateAnchorItem(
item.sinkItem.inputAnchorItem
)
self.__link_items.remove(item)
# Remove the anchor points.
item.removeLink()
self.removeItem(item)
self.link_item_removed.emit(item)
log.info("Removed link '%s' from '%s'" % (item, self))
return item
def remove_link(self, scheme_link):
"""
Remove a :class:`.LinkItem` instance that was previously constructed
for a :class:`SchemeLink` instance `link` using the `add_link` method.
"""
item = self.__item_for_link.pop(scheme_link)
scheme_link.enabled_changed.disconnect(item.setEnabled)
if scheme_link.is_dynamic():
scheme_link.dynamic_enabled_changed.disconnect(
item.setDynamicEnabled
)
scheme_link.state_changed.disconnect(item.setRuntimeState)
self.remove_link_item(item)
def link_items(self):
"""
Return all :class:`.LinkItem`\s in the scene.
"""
return list(self.__link_items)
def add_annotation_item(self, annotation):
"""
Add an :class:`.Annotation` item to the scene.
"""
self.__annotation_items.append(annotation)
self.addItem(annotation)
self.annotation_added.emit(annotation)
return annotation
def add_annotation(self, scheme_annot):
"""
Create a new item for :class:`SchemeAnnotation` and add it
to the scene. If the `scheme_annot` is already in the scene do
nothing and just return its item.
"""
if scheme_annot in self.__item_for_annotation:
# Already added
return self.__item_for_annotation[scheme_annot]
if isinstance(scheme_annot, scheme.SchemeTextAnnotation):
item = items.TextAnnotation()
x, y, w, h = scheme_annot.rect
item.setPos(x, y)
item.resize(w, h)
item.setTextInteractionFlags(Qt.TextEditorInteraction)
font = font_from_dict(scheme_annot.font, item.font())
item.setFont(font)
item.setContent(scheme_annot.content, scheme_annot.content_type)
scheme_annot.content_changed.connect(item.setContent)
elif isinstance(scheme_annot, scheme.SchemeArrowAnnotation):
item = items.ArrowAnnotation()
start, end = scheme_annot.start_pos, scheme_annot.end_pos
item.setLine(QLineF(QPointF(*start), QPointF(*end)))
item.setColor(QColor(scheme_annot.color))
scheme_annot.geometry_changed.connect(
self.__on_scheme_annot_geometry_change
)
self.add_annotation_item(item)
self.__item_for_annotation[scheme_annot] = item
return item
def remove_annotation_item(self, annotation):
"""
Remove an :class:`.Annotation` instance from the scene.
"""
self.__annotation_items.remove(annotation)
self.removeItem(annotation)
self.annotation_removed.emit(annotation)
def remove_annotation(self, scheme_annotation):
"""
Remove an :class:`.Annotation` instance that was previously added
using :func:`add_anotation`.
"""
item = self.__item_for_annotation.pop(scheme_annotation)
scheme_annotation.geometry_changed.disconnect(
self.__on_scheme_annot_geometry_change
)
if isinstance(scheme_annotation, scheme.SchemeTextAnnotation):
scheme_annotation.content_changed.disconnect(item.setContent)
self.remove_annotation_item(item)
def annotation_items(self):
"""
Return all :class:`.Annotation` items in the scene.
"""
return self.__annotation_items
def item_for_annotation(self, scheme_annotation):
return self.__item_for_annotation[scheme_annotation]
def annotation_for_item(self, item):
rev = dict(reversed(item) \
for item in self.__item_for_annotation.items())
return rev[item]
def commit_scheme_node(self, node):
"""
Commit the `node` into the scheme.
"""
if not self.editable:
raise Exception("Scheme not editable.")
if node not in self.__item_for_node:
raise ValueError("No 'NodeItem' for node.")
item = self.__item_for_node[node]
try:
self.scheme.add_node(node)
except Exception:
log.error("An error occurred while committing node '%s'",
node, exc_info=True)
# Cleanup (remove the node item)
self.remove_node_item(item)
raise
log.info("Commited node '%s' from '%s' to '%s'" % \
(node, self, self.scheme))
def commit_scheme_link(self, link):
"""
Commit a scheme link.
"""
if not self.editable:
raise Exception("Scheme not editable")
if link not in self.__item_for_link:
raise ValueError("No 'LinkItem' for link.")
self.scheme.add_link(link)
log.info("Commited link '%s' from '%s' to '%s'" % \
(link, self, self.scheme))
def node_for_item(self, item):
"""
Return the `SchemeNode` for the `item`.
"""
rev = dict([(v, k) for k, v in self.__item_for_node.items()])
return rev[item]
def item_for_node(self, node):
"""
Return the :class:`NodeItem` instance for a :class:`SchemeNode`.
"""
return self.__item_for_node[node]
def link_for_item(self, item):
"""
Return the `SchemeLink for `item` (:class:`LinkItem`).
"""
rev = dict([(v, k) for k, v in self.__item_for_link.items()])
return rev[item]
def item_for_link(self, link):
"""
Return the :class:`LinkItem` for a :class:`SchemeLink`
"""
return self.__item_for_link[link]
def selected_node_items(self):
"""
Return the selected :class:`NodeItem`'s.
"""
return [item for item in self.__node_items if item.isSelected()]
def selected_annotation_items(self):
"""
Return the selected :class:`Annotation`'s
"""
return [item for item in self.__annotation_items if item.isSelected()]
def node_links(self, node_item):
"""
Return all links from the `node_item` (:class:`NodeItem`).
"""
return self.node_output_links(node_item) + \
self.node_input_links(node_item)
def node_output_links(self, node_item):
"""
Return a list of all output links from `node_item`.
"""
return [link for link in self.__link_items
if link.sourceItem == node_item]
def node_input_links(self, node_item):
"""
Return a list of all input links for `node_item`.
"""
return [link for link in self.__link_items
if link.sinkItem == node_item]
def neighbor_nodes(self, node_item):
"""
Return a list of `node_item`'s (class:`NodeItem`) neighbor nodes.
"""
neighbors = list(map(attrgetter("sourceItem"),
self.node_input_links(node_item)))
neighbors.extend(map(attrgetter("sinkItem"),
self.node_output_links(node_item)))
return neighbors
def on_widget_state_change(self, widget, state):
pass
def on_link_state_change(self, link, state):
pass
def on_scheme_change(self, ):
pass
def _on_position_change(self, item):
# Invalidate the anchor point layout and schedule a layout.
self.__anchor_layout.invalidateNode(item)
self.node_item_position_changed.emit(item, item.pos())
def __on_node_pos_changed(self, pos):
node = self.sender()
item = self.__item_for_node[node]
item.setPos(*pos)
def __on_scheme_annot_geometry_change(self):
annot = self.sender()
item = self.__item_for_annotation[annot]
if isinstance(annot, scheme.SchemeTextAnnotation):
item.setGeometry(QRectF(*annot.rect))
elif isinstance(annot, scheme.SchemeArrowAnnotation):
p1 = item.mapFromScene(QPointF(*annot.start_pos))
p2 = item.mapFromScene(QPointF(*annot.end_pos))
item.setLine(QLineF(p1, p2))
else:
pass
def item_at(self, pos, type_or_tuple=None, buttons=0):
"""Return the item at `pos` that is an instance of the specified
type (`type_or_tuple`). If `buttons` (`Qt.MouseButtons`) is given
only return the item if it is the top level item that would
accept any of the buttons (`QGraphicsItem.acceptedMouseButtons`).
"""
rect = QRectF(pos, QSizeF(1, 1))
items = self.items(rect)
if buttons:
items = itertools.dropwhile(
lambda item: not item.acceptedMouseButtons() & buttons,
items
)
items = list(items)[:1]
if type_or_tuple:
items = [i for i in items if isinstance(i, type_or_tuple)]
return items[0] if items else None
if USE_PYQT and PYQT_VERSION < 0x40900:
# For QGraphicsObject subclasses items, itemAt ... return a
# QGraphicsItem wrapper instance and not the actual class instance.
def itemAt(self, *args, **kwargs):
item = QGraphicsScene.itemAt(self, *args, **kwargs)
return toGraphicsObjectIfPossible(item)
def items(self, *args, **kwargs):
items = QGraphicsScene.items(self, *args, **kwargs)
return list(map(toGraphicsObjectIfPossible, items))
def selectedItems(self, *args, **kwargs):
return list(map(toGraphicsObjectIfPossible,
QGraphicsScene.selectedItems(self, *args, **kwargs)))
def collidingItems(self, *args, **kwargs):
return list(map(toGraphicsObjectIfPossible,
QGraphicsScene.collidingItems(self, *args, **kwargs)))
def focusItem(self, *args, **kwargs):
item = QGraphicsScene.focusItem(self, *args, **kwargs)
return toGraphicsObjectIfPossible(item)
def mouseGrabberItem(self, *args, **kwargs):
item = QGraphicsScene.mouseGrabberItem(self, *args, **kwargs)
return toGraphicsObjectIfPossible(item)
def mousePressEvent(self, event):
if self.user_interaction_handler and \
self.user_interaction_handler.mousePressEvent(event):
return
# Right (context) click on the node item. If the widget is not
# in the current selection then select the widget (only the widget).
# Else simply return and let customContextMenuRequested signal
# handle it
shape_item = self.item_at(event.scenePos(), items.NodeItem)
if shape_item and event.button() == Qt.RightButton and \
shape_item.flags() & QGraphicsItem.ItemIsSelectable:
if not shape_item.isSelected():
self.clearSelection()
shape_item.setSelected(True)
return QGraphicsScene.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
if self.user_interaction_handler and \
self.user_interaction_handler.mouseMoveEvent(event):
return
return QGraphicsScene.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
if self.user_interaction_handler and \
self.user_interaction_handler.mouseReleaseEvent(event):
return
return QGraphicsScene.mouseReleaseEvent(self, event)
def mouseDoubleClickEvent(self, event):
if self.user_interaction_handler and \
self.user_interaction_handler.mouseDoubleClickEvent(event):
return
return QGraphicsScene.mouseDoubleClickEvent(self, event)
def keyPressEvent(self, event):
if self.user_interaction_handler and \
self.user_interaction_handler.keyPressEvent(event):
return
return QGraphicsScene.keyPressEvent(self, event)
def keyReleaseEvent(self, event):
if self.user_interaction_handler and \
self.user_interaction_handler.keyReleaseEvent(event):
return
return QGraphicsScene.keyReleaseEvent(self, event)
def contextMenuEvent(self, event):
if self.user_interaction_handler and \
self.user_interaction_handler.contextMenuEvent(event):
return
super().contextMenuEvent(event)
def set_user_interaction_handler(self, handler):
if self.user_interaction_handler and \
not self.user_interaction_handler.isFinished():
self.user_interaction_handler.cancel()
log.info("Setting interaction '%s' to '%s'" % (handler, self))
self.user_interaction_handler = handler
if handler:
handler.start()
def __str__(self):
return "%s(objectName=%r, ...)" % \
(type(self).__name__, str(self.objectName()))
def font_from_dict(font_dict, font=None):
if font is None:
font = QFont()
else:
font = QFont(font)
if "family" in font_dict:
font.setFamily(font_dict["family"])
if "size" in font_dict:
font.setPixelSize(font_dict["size"])
return font
if QT_VERSION >= 0x50900 and \
QSvgGenerator().metric(QSvgGenerator.PdmDevicePixelRatioScaled) == 1:
# QTBUG-63159
class QSvgGenerator(QSvgGenerator):
def metric(self, metric):
if metric == QSvgGenerator.PdmDevicePixelRatioScaled:
return int(1 * QSvgGenerator.devicePixelRatioFScale())
else:
return super().metric(metric)
def grab_svg(scene):
"""
Return a SVG rendering of the scene contents.
Parameters
----------
scene : :class:`CanvasScene`
"""
svg_buffer = QBuffer()
gen = QSvgGenerator()
gen.setOutputDevice(svg_buffer)
items_rect = scene.itemsBoundingRect().adjusted(-10, -10, 10, 10)
if items_rect.isNull():
items_rect = QRectF(0, 0, 10, 10)
width, height = items_rect.width(), items_rect.height()
rect_ratio = float(width) / height
# Keep a fixed aspect ratio.
aspect_ratio = 1.618
if rect_ratio > aspect_ratio:
height = int(height * rect_ratio / aspect_ratio)
else:
width = int(width * aspect_ratio / rect_ratio)
target_rect = QRectF(0, 0, width, height)
source_rect = QRectF(0, 0, width, height)
source_rect.moveCenter(items_rect.center())
gen.setSize(target_rect.size().toSize())
gen.setViewBox(target_rect)
painter = QPainter(gen)
# Draw background.
painter.setBrush(scene.palette().base())
painter.drawRect(target_rect)
# Render the scene
scene.render(painter, target_rect, source_rect)
painter.end()
buffer_str = bytes(svg_buffer.buffer())
return buffer_str.decode("utf-8") | PypiClean |
/NURBS-Python-3.8.0.tar.gz/NURBS-Python-3.8.0/geomdl/Abstract.py | import abc
from warnings import warn
import math
class Curve(object):
""" Abstract class for all curves. """
__metaclass__ = abc.ABCMeta
def __init__(self):
self._rational = False # defines whether the curve is rational or not
self._degree = 0 # degree
self._knot_vector = None # knot vector
self._control_points = None # control points
self._delta = 0.1 # evaluation delta
self._sample_size = None # sample size
self._curve_points = None # evaluated points
self._dimension = 0 # dimension of the curve
self._vis_component = None # visualization component
self._bounding_box = None # bounding box
self._cache = {} # cache dictionary
@property
def dimension(self):
""" Dimension of the curve.
Dimension will be automatically estimated from the first element of the control points array.
:getter: Gets the dimension of the curve, e.g. 2D, 3D, etc.
:type: integer
"""
return self._dimension
@property
def order(self):
""" Curve order.
Defined as order = degree + 1
:getter: Gets the curve order
:setter: Sets the curve order
:type: integer
"""
return self._degree + 1
@order.setter
def order(self, value):
self._degree = value - 1
@property
def degree(self):
""" Curve degree.
:getter: Gets the curve degree
:setter: Sets the curve degree
:type: integer
"""
return self._degree
@degree.setter
def degree(self, value):
val = int(value)
if val < 0:
raise ValueError("Degree cannot be less than zero")
# Clean up the curve points list, if necessary
self._reset_evalpts()
# Set degree
self._degree = val
@property
def knotvector(self):
""" Knot vector.
:getter: Gets the knot vector
:setter: Sets the knot vector
"""
return self._knot_vector
@knotvector.setter
def knotvector(self, value):
self._knot_vector = value
@property
def ctrlpts(self):
""" Control points.
:getter: Gets the control points
:setter: Sets the control points
"""
return self._control_points
@ctrlpts.setter
def ctrlpts(self, value):
self._control_points = value
@property
def curvepts(self):
""" Evaluated curve points.
:getter: Gets the coordinates of the evaluated points
"""
if not self._curve_points:
self.evaluate()
return self._curve_points
@property
def sample_size(self):
""" Sample size.
Sample size defines the number of curve points to generate. It sets the ``delta`` property.
:getter: Gets sample size
:setter: Sets sample size
:type: int
"""
if self._sample_size is None:
if self._knot_vector is not None and len(self._knot_vector) != 0:
self._sample_size = int(1.0 / self.delta) + 1
else:
warn("Cannot determine the sample size.")
return 0
return self._sample_size
@sample_size.setter
def sample_size(self, value):
if self._knot_vector is None or len(self._knot_vector) == 0:
warn("Cannot determine the delta value. Please set knot vector before setting the sample size.")
return
# To make it operate like linspace, we have to know the starting and ending points.
start = self._knot_vector[self._degree]
stop = self._knot_vector[-(self._degree+1)]
# Clean up the curve points list, if necessary
self._reset_evalpts()
# Set delta value
self._delta = (stop - start) / float(value - 1)
# Set sample size
self._sample_size = value
@property
def delta(self):
""" Curve evaluation delta.
Evaluation delta corresponds to the *step size* while ``evaluate`` function iterates on the knot vector to
generate curve points. Decreasing step size results in generation of more curve points.
Therefore; smaller the delta value, smoother the curve.
.. note:: The delta value is 0.1 by default.
:getter: Gets the delta value
:setter: Sets the delta value
:type: float
"""
return self._delta
@delta.setter
def delta(self, value):
# Delta value for surface evaluation should be between 0 and 1
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Curve evaluation delta should be between 0.0 and 1.0")
# Clean up the curve points list, if necessary
self._reset_evalpts()
# Set new delta value
self._delta = float(value)
@property
def vis(self):
""" Visualization component.
.. note::
The visualization component is completely optional to use.
:getter: Gets the visualization component
:setter: Sets the visualization component
"""
return self._vis_component
@vis.setter
def vis(self, value):
if not isinstance(value, VisAbstract):
warn("Visualization component is NOT an instance of VisAbstract class")
return
self._vis_component = value
@property
def bbox(self):
""" Bounding box.
Evaluates the bounding box of the curve and returns the minimum and maximum coordinates.
:getter: Gets bounding box
:type: tuple
"""
if self._bounding_box is None or len(self._bounding_box) == 0:
self._eval_bbox()
return tuple(self._bounding_box)
def _eval_bbox(self):
""" Evaluates bounding box of the curve. """
# Find correct dimension of the control points
dim = self._dimension
if self._rational:
dim -= 1
# Evaluate bounding box
bbmin = [float('inf') for _ in range(0, dim)]
bbmax = [0.0 for _ in range(0, dim)]
for cpt in self.ctrlpts:
for i, arr in enumerate(zip(cpt, bbmin)):
if arr[0] < arr[1]:
bbmin[i] = arr[0]
for i, arr in enumerate(zip(cpt, bbmax)):
if arr[0] > arr[1]:
bbmax[i] = arr[0]
self._bounding_box = [tuple(bbmin), tuple(bbmax)]
# Runs visualization component to render the surface
def render(self, **kwargs):
""" Renders the curve using the loaded visualization component
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Possible keyword arguments are
* ``cpcolor``: sets the color of the control points polygon
* ``curvecolor``: sets the color of the curve
"""
if not self._vis_component:
warn("No visualization component has set")
return
cpcolor = kwargs.get('cpcolor', 'blue')
curvecolor = kwargs.get('curvecolor', 'black')
# Check all parameters are set
self._check_variables()
# Check if the surface has been evaluated
if self._curve_points is None or len(self._curve_points) == 0:
self.evaluate()
# Run the visualization component
self._vis_component.clear()
self._vis_component.add(ptsarr=self.ctrlpts, name="Control Points", color=cpcolor, plot_type='ctrlpts')
self._vis_component.add(ptsarr=self.curvepts, name="Curve", color=curvecolor, plot_type='evalpts')
self._vis_component.render()
# Checks whether the curve evaluation is possible or not
def _check_variables(self):
works = True
param_list = []
if self._degree == 0:
works = False
param_list.append('degree')
if self._control_points is None or len(self._control_points) == 0:
works = False
param_list.append('ctrlpts')
if self._knot_vector is None or len(self._knot_vector) == 0:
works = False
param_list.append('knotvector')
if not works:
raise ValueError("Please set the following variables before evaluation: " + ",".join(param_list))
# Resets the control points
def _reset_ctrlpts(self):
self._control_points = None
self._bounding_box = None
# Resets the evaluated points
def _reset_evalpts(self):
self._curve_points = None
@abc.abstractmethod
def evaluate(self, **kwargs):
""" Evaluates the curve. """
pass
class Surface(object):
""" Abstract class for all surfaces. """
__metaclass__ = abc.ABCMeta
def __init__(self):
# U-direction
self._degree_u = 0 # degree
self._knot_vector_u = None # knot vector
self._control_points_size_u = 0 # control points array length
self._delta_u = 0.1 # evaluation delta
# V-direction
self._degree_v = 0 # degree
self._knot_vector_v = None # knot vector
self._control_points_size_v = 0 # control points array length
self._delta_v = 0.1 # evaluation delta
# Common
self._rational = False # defines whether the surface is rational or not
self._sample_size = None # defines sample size
self._control_points = None # control points, 1-D array (v-order)
self._control_points2D = None # control points, 2-D array [u][v]
self._surface_points = None # evaluated points
self._dimension = 0 # dimension of the surface
self._vis_component = None # visualization component
self._bounding_box = None # bounding box
self._cache = {} # cache dictionary
@property
def dimension(self):
""" Dimension of the surface.
Dimension will be automatically estimated from the first element of the control points array.
:getter: Gets the dimension of the surface
:type: integer
"""
return self._dimension
@property
def order_u(self):
""" Surface order for U direction.
Follows the following equality: order = degree + 1
:getter: Gets the surface order for U direction
:setter: Sets the surface order for U direction
:type: integer
"""
return self._degree_u + 1
@order_u.setter
def order_u(self, value):
self._degree_u = value - 1
@property
def order_v(self):
""" Surface order for V direction.
Follows the following equality: order = degree + 1
:getter: Gets the surface order for V direction
:setter: Sets the surface order for V direction
:type: integer
"""
return self._degree_v + 1
@order_v.setter
def order_v(self, value):
self._degree_v = value - 1
@property
def degree_u(self):
""" Surface degree for U direction.
:getter: Gets the surface degree for U direction
:setter: Sets the surface degree for U direction
:type: integer
"""
return self._degree_u
@degree_u.setter
def degree_u(self, value):
val = int(value)
if val <= 0:
raise ValueError("Degree cannot be less than zero")
# Clean up the surface points lists, if necessary
self._reset_evalpts()
# Set degree u
self._degree_u = int(value)
@property
def degree_v(self):
""" Surface degree for V direction.
:getter: Gets the surface degree for V direction
:setter: Sets the surface degree for V direction
:type: integer
"""
return self._degree_v
@degree_v.setter
def degree_v(self, value):
val = int(value)
if val <= 0:
raise ValueError("Degree cannot be less than zero")
# Clean up the surface points lists, if necessary
self._reset_evalpts()
# Set degree v
self._degree_v = val
@property
def knotvector_u(self):
""" Knot vector for U direction.
:getter: Gets the knot vector for U direction
:setter: Sets the knot vector for U direction
"""
return self._knot_vector_u
@knotvector_u.setter
def knotvector_u(self, value):
self._knot_vector_u = value
@property
def knotvector_v(self):
""" Knot vector for V direction.
:getter: Gets the knot vector for V direction
:setter: Sets the knot vector for V direction
"""
return self._knot_vector_v
@knotvector_v.setter
def knotvector_v(self, value):
self._knot_vector_v = value
@property
def ctrlpts(self):
""" 1-D control points.
:getter: Gets the control points
:setter: Sets the control points
"""
return self._control_points
@ctrlpts.setter
def ctrlpts(self, value):
self._control_points = value
@property
def ctrlpts2d(self):
""" 2-D control points.
:getter: Gets the control points in U and V directions
:setter: Sets the control points in U and V directions
"""
return self._control_points2D
@ctrlpts2d.setter
def ctrlpts2d(self, value):
self._control_points2D = value
@property
def ctrlpts_size_u(self):
""" Size of the control points array in U-direction.
:getter: Gets number of control points in U-direction
:setter: Sets number of control points in U-direction
"""
return self._control_points_size_u
@ctrlpts_size_u.setter
def ctrlpts_size_u(self, value):
if value <= 0:
raise ValueError("Control points size cannot be less than and equal to zero")
# Assume that user is doing this right
self._control_points_size_u = value
@property
def ctrlpts_size_v(self):
""" Size of the control points array in V-direction.
:getter: Gets number of control points in V-direction
:setter: Sets number of control points in V-direction
"""
return self._control_points_size_v
@ctrlpts_size_v.setter
def ctrlpts_size_v(self, value):
if value <= 0:
raise ValueError("Control points size cannot be less than and equal to zero")
# Assume that user is doing this right
self._control_points_size_v = value
@property
def surfpts(self):
""" Evaluated surface points.
:getter: Gets the coordinates of the evaluated points
"""
if not self._surface_points:
self.evaluate()
return self._surface_points
@property
def sample_size(self):
""" Sample size.
Sample size defines the number of surface points to generate. It sets the ``delta`` property.
:getter: Gets sample size
:setter: Sets sample size
:type: int
"""
if self._sample_size is None:
if self._knot_vector_u is not None and len(self._knot_vector_u) != 0:
self._sample_size = int(1.0 / self.delta_u) + 1
elif self._knot_vector_v is not None and len(self._knot_vector_v) != 0:
self._sample_size = int(1.0 / self.delta_v) + 1
else:
warn("Cannot determine the sample size")
return 0
return self._sample_size
@sample_size.setter
def sample_size(self, value):
if (self._knot_vector_u is None or len(self._knot_vector_u) == 0) or\
(self._knot_vector_v is None or len(self._knot_vector_v) == 0):
warn("Cannot determine the delta value. Please set knot vectors before setting the sample size.")
return
# To make it operate like linspace, we have to know the starting and ending points.
start_u = self._knot_vector_u[self._degree_u]
stop_u = self._knot_vector_u[-(self._degree_u+1)]
start_v = self._knot_vector_v[self._degree_v]
stop_v = self._knot_vector_v[-(self._degree_v+1)]
# Clean up the surface points lists, if necessary
self._reset_evalpts()
# Set delta values
self._delta_u = (stop_u - start_u) / float(value - 1)
self._delta_v = (stop_v - start_v) / float(value - 1)
# Set sample size
self._sample_size = value
@property
def delta_u(self):
""" Evaluation delta in U-direction.
Evaluation delta corresponds to the *step size* while ``evaluate`` function iterates on the knot vector to
generate surface points. Decreasing step size results in generation of more surface points.
Therefore; smaller the delta value, smoother the surface.
.. note:: The delta value is 0.1 by default.
:getter: Gets the delta value
:setter: Sets the delta value
:type: float
"""
return self._delta_u
@delta_u.setter
def delta_u(self, value):
# Delta value for surface evaluation should be between 0 and 1
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Surface evaluation delta should be between 0.0 and 1.0")
# Clean up the surface points lists, if necessary
self._reset_evalpts()
# Set a new delta value
self._delta_u = float(value)
@property
def delta_v(self):
""" Evaluation delta in V-direction.
Evaluation delta corresponds to the *step size* while ``evaluate`` function iterates on the knot vector to
generate surface points. Decreasing step size results in generation of more surface points.
Therefore; smaller the delta value, smoother the surface.
.. note:: The delta value is 0.1 by default.
:getter: Gets the delta value
:setter: Sets the delta value
:type: float
"""
return self._delta_v
@delta_v.setter
def delta_v(self, value):
# Delta value for surface evaluation should be between 0 and 1
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Surface evaluation delta should be between 0.0 and 1.0")
# Clean up the surface points lists, if necessary
self._reset_evalpts()
# Set a new delta value
self._delta_v = float(value)
@property
def delta(self):
""" Evaluation delta in U- and V-directions.
Evaluation delta corresponds to the *step size* while ``evaluate`` function iterates on the knot vector to
generate surface points. Decreasing step size results in generation of more surface points.
Therefore; smaller the delta value, smoother the surface.
.. note:: The delta value is 0.1 by default.
:getter: Gets the delta value
:setter: Sets the delta value
:type: float
"""
return self.delta_u, self.delta_v
@delta.setter
def delta(self, value):
if isinstance(value, float):
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Surface evaluation delta should be between 0.0 and 1.0")
self._delta_u = value
self._delta_v = value
elif isinstance(value, (list, tuple)):
if len(value) == 2:
if float(value[0]) <= 0 or float(value[0]) >= 1 or float(value[1]) <= 0 or float(value[1]) >= 1:
raise ValueError("Surface evaluation delta should be between 0.0 and 1.0")
self._delta_u = value[0]
self._delta_v = value[1]
else:
raise ValueError("Surface requires 2 delta values")
else:
warn("Cannot set delta. Please use a float or a list with 2 elements")
@property
def vis(self):
""" Visualization component.
:getter: Gets the visualization component
:setter: Sets the visualization component
"""
return self._vis_component
@vis.setter
def vis(self, value):
if not isinstance(value, VisAbstract):
warn("Visualization component is NOT an instance of VisAbstract class")
return
self._vis_component = value
@property
def bbox(self):
""" Bounding box.
Evaluates the bounding box of the surface and returns the minimum and maximum coordinates.
:getter: Gets bounding box
:type: tuple
"""
if self._bounding_box is None or len(self._bounding_box) == 0:
self._eval_bbox()
return tuple(self._bounding_box)
def _eval_bbox(self):
""" Evaluates bounding box of the surface. """
# Find correct dimension of the control points
dim = self._dimension
if self._rational:
dim -= 1
# Evaluate bounding box
bbmin = [float('inf') for _ in range(0, dim)]
bbmax = [0.0 for _ in range(0, dim)]
for cpt in self.ctrlpts:
for i, arr in enumerate(zip(cpt, bbmin)):
if arr[0] < arr[1]:
bbmin[i] = arr[0]
for i, arr in enumerate(zip(cpt, bbmax)):
if arr[0] > arr[1]:
bbmax[i] = arr[0]
self._bounding_box = [tuple(bbmin), tuple(bbmax)]
# Runs visualization component to render the surface
def render(self, **kwargs):
""" Renders the surface using the loaded visualization component.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Possible keyword arguments are
* ``cpcolor``: sets the color of the control points grid
* ``surfcolor``: sets the color of the surface
"""
if not self._vis_component:
warn("No visualization component has set")
return
cpcolor = kwargs.get('cpcolor', 'blue')
surfcolor = kwargs.get('surfcolor', 'green')
# Check all parameters are set
self._check_variables()
# Check if the surface has been evaluated
if self._surface_points is None or len(self._surface_points) == 0:
self.evaluate()
# Run the visualization component
self._vis_component.clear()
self._vis_component.add(ptsarr=self.ctrlpts,
size=[self._control_points_size_u, self._control_points_size_v],
name="Control Points", color=cpcolor, plot_type='ctrlpts')
self._vis_component.add(ptsarr=self._surface_points,
size=[self.sample_size, self.sample_size],
name="Surface", color=surfcolor, plot_type='evalpts')
self._vis_component.render()
# Resets the control points
def _reset_ctrlpts(self):
self._control_points = None
self._control_points2D = None
self._bounding_box = None
# Resets the evaluated points
def _reset_evalpts(self):
self._surface_points = None
# Checks whether the surface evaluation is possible or not
def _check_variables(self):
works = True
param_list = []
if self._degree_u == 0:
works = False
param_list.append('degree_u')
if self._degree_v == 0:
works = False
param_list.append('degree_v')
if self._control_points is None or len(self._control_points) == 0:
works = False
param_list.append('ctrlpts')
if self._knot_vector_u is None or len(self._knot_vector_u) == 0:
works = False
param_list.append('knotvector_u')
if self._knot_vector_v is None or len(self._knot_vector_v) == 0:
works = False
param_list.append('knotvector_v')
if not works:
raise ValueError("Please set the following variables before evaluation: " + ",".join(param_list))
@abc.abstractmethod
def evaluate(self, **kwargs):
""" Evaluates the surface. """
pass
class Multi(object):
""" Abstract class for curve and surface containers. """
__metaclass__ = abc.ABCMeta
def __init__(self):
self._elements = [] # elements contained
self._sample_size = 10 # sample size
self._vis_component = None # visualization component
self._iter_index = 0 # iterator index
self._instance = None # type of the initial element
def __iter__(self):
self._iter_index = 0
return self
def next(self):
return self.__next__()
def __next__(self):
try:
result = self._elements[self._iter_index]
except IndexError:
raise StopIteration
self._iter_index += 1
return result
def __reversed__(self):
return reversed(self._elements)
def __getitem__(self, index):
return self._elements[index]
def __len__(self):
return len(self._elements)
def __add__(self, other):
if not isinstance(other, self.__class__):
raise TypeError("Cannot add non-matching types of Multi containers")
ret = self.__class__()
new_elems = self._elements + other._elements
ret.add_list(new_elems)
return ret
@property
def sample_size(self):
""" Sample size.
Sample size defines the number of evaluated points to generate. It sets the ``delta`` property.
:getter: Gets sample size
:setter: Sets sample size
:type: int
"""
return self._sample_size
@sample_size.setter
def sample_size(self, value):
self._sample_size = value
@property
def vis(self):
""" Visualization component.
:getter: Gets the visualization component
:setter: Sets the visualization component
:type: float
"""
return self._vis_component
@vis.setter
def vis(self, value):
if not isinstance(value, VisAbstract):
warn("Visualization component is NOT an instance of the abstract class")
return
self._vis_component = value
def add(self, element):
""" Abstract method for adding surface or curve objects to the container.
:param element: the curve or surface object to be added
:type element:
"""
if not isinstance(element, self._instance):
warn("Cannot add, incompatible type.")
return
self._elements.append(element)
def add_list(self, elements):
""" Adds curve objects to the container.
:param elements: curve objects to be added
:type elements: list, tuple
"""
if not isinstance(elements, (list, tuple)):
warn("Input must be a list or a tuple")
return
for element in elements:
self.add(element)
def translate(self, vec=()):
""" Translates the elements in the container by the input vector.
:param vec: translation vector
:type vec: list, tuple
"""
for elem in self._elements:
elem.translate(vec)
# Runs visualization component to render the surface
@abc.abstractmethod
def render(self):
""" Abstract method for rendering plots using the visualization component. """
pass
class VisConfigAbstract(object):
""" Visualization configuration abstract class
Uses Python's *Abstract Base Class* implementation to define a base for all visualization configurations
in NURBS-Python package.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, **kwargs):
pass
class VisAbstract(object):
""" Visualization abstract class
Uses Python's *Abstract Base Class* implementation to define a base for all common visualization options
in NURBS-Python package.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, config=None):
self._plots = []
self._config = config
def clear(self):
""" Clears the points, colors and names lists. """
self._plots[:] = []
def add(self, ptsarr=(), size=0, name=None, color=None, plot_type=0):
""" Adds points sets to the visualization instance for plotting.
:param ptsarr: control, curve or surface points
:type ptsarr: list, tuple
:param size: size in all directions, e.g. in U- and V-direction
:type size: int, tuple, list
:param name: name of the point on the legend
:type name: str
:param color: color of the point on the legend
:type color: str
:param plot_type: type of the plot, control points (type = 1) or evaluated points (type = 0)
:type plot_type: int
"""
if ptsarr is None or len(ptsarr) == 0:
return
if not color or not name:
return
# Add points, size, plot color and name on the legend
elem = {'ptsarr': ptsarr, 'size': size, 'name': name, 'color': color, 'type': plot_type}
self._plots.append(elem)
@abc.abstractmethod
def render(self):
""" Abstract method for rendering plots of the point sets.
This method must be implemented in all subclasses of ``VisAbstract`` class.
"""
pass
class VisAbstractSurf(VisAbstract):
""" Visualization abstract class for surfaces
Implements ``VisABstract`` class and also uses Python's *Abstract Base Class* implementation to define a base
for **surface** visualization options in NURBS-Python package.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, config=None):
super(VisAbstractSurf, self).__init__(config=config)
self._ctrlpts_offset = 0.0
def set_ctrlpts_offset(self, offset_value):
""" Sets an offset for the control points grid plot.
:param offset_value: offset value
:type offset_value: float
"""
self._ctrlpts_offset = float(offset_value)
@abc.abstractmethod
def render(self):
""" Abstract method for rendering plots of the point sets.
This method must be implemented in all subclasses of ``VisAbstractSurf`` class.
"""
pass | PypiClean |
/Allegra-0.63.zip/Allegra-0.63/lib/async_chat.py |
# Copyright (C) 2005 Laurent A.V. Szyster
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# http://www.gnu.org/copyleft/gpl.html
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
"http://laurentszyster.be/blog/async_chat/"
import collections, socket
from allegra import async_core
def find_prefix_at_end (haystack, needle):
"given 'haystack', see if any prefix of 'needle' is at its end."
l = len (needle) - 1
while l and not haystack.endswith (needle[:l]):
l -= 1
return l
def collect_chat (c, buffer):
"collect a buffer for a channel or collector"
lb = len (buffer)
while lb:
terminator = c.get_terminator ()
if terminator is None or terminator == '':
c.collect_incoming_data (buffer)
buffer = ''
elif isinstance (terminator, int):
if lb < terminator:
c.collect_incoming_data (buffer)
buffer = ''
c.set_terminator (terminator - lb)
else:
c.collect_incoming_data (buffer[:terminator])
buffer = buffer[terminator:]
c.set_terminator (0)
if c.found_terminator ():
c.collector_stalled = True
break
else:
tl = len (terminator)
index = buffer.find (terminator)
if index != -1:
if index > 0:
c.collect_incoming_data (
buffer[:index]
)
buffer = buffer[index+tl:]
if c.found_terminator ():
c.collector_stalled = True
break
else:
index = find_prefix_at_end (
buffer, terminator
)
if index:
if index != lb:
c.collect_incoming_data (
buffer[:-index]
)
buffer = buffer[-index:]
break
else:
c.collect_incoming_data (buffer)
buffer = ''
lb = len (buffer)
return buffer
class Dispatcher (async_core.Dispatcher):
ac_in_buffer_size = ac_out_buffer_size = 1 << 14
terminator = None
collector_stalled = False
collector_is_simple = False
collector_depth = 32
def __init__ (self):
self.ac_in_buffer = ''
self.ac_out_buffer = ''
self.output_fifo = collections.deque ()
def __repr__ (self):
return 'async-chat id="%x"' % id (self)
def readable (self):
"predicate for inclusion in the poll loop for input"
return not (
self.collector_stalled or
len (self.ac_in_buffer) > self.ac_in_buffer_size
)
def writable (self):
"predicate for inclusion in the poll loop for output"
try:
return not (
self.output_fifo[
0
].producer_stalled () and
self.connected
)
except:
return not (
(self.ac_out_buffer == '') and
not self.output_fifo and
self.connected
)
def handle_read (self):
"try to refill the input buffer and collect it"
try:
data = self.recv (self.ac_in_buffer_size)
except socket.error, why:
self.handle_error ()
return
self.ac_in_buffer = collect_chat (
self, self.ac_in_buffer + data
)
def handle_write (self):
"maybe refill the output buffer and try to send it"
obs = self.ac_out_buffer_size
buffer = self.ac_out_buffer
if len (buffer) < obs:
fifo = self.output_fifo
while fifo:
p = fifo[0]
if p == None:
if buffer == '':
fifo.popleft ()
self.handle_close ()
return
break
elif type (p) == str:
fifo.popleft ()
buffer += p
if len (buffer) < obs:
continue
break
if p.producer_stalled ():
break
data = p.more ()
if data:
buffer += data
break
fifo.popleft ()
if buffer:
sent = self.send (buffer[:obs])
if sent:
self.ac_out_buffer = buffer[sent:]
else:
self.ac_out_buffer = buffer
else:
self.ac_out_buffer = ''
def close (self):
"close the dispatcher and maybe terminate the collector"
async_core.Dispatcher.close (self)
if not self.collector_stalled:
depth = self.collector_depth
while depth and not self.found_terminator ():
depth -= 1
if depth < 1:
self.log (
'%d' % self.collector_depth,
'collector-leak'
)
def close_when_done (self):
"""automatically close this channel once the outgoing queue
is empty, or handle close now if it is allready empty"""
if self.output_fifo:
self.output_fifo.append (None)
else:
self.handle_close () # when done is now!
def async_chat_push (self, p):
"push a string or producer on the output deque"
assert type (p) == str or hasattr (p, 'more')
self.output_fifo.append (p)
# push_with_producer = push = async_chat_push
def async_chat_pull (self):
"stall no more and collect the input buffer"
self.collector_stalled = False
if self.ac_in_buffer:
self.ac_in_buffer = collect_chat (
self, self.ac_in_buffer
)
def set_terminator (self, terminator):
"set the channel's terminator"
self.terminator = terminator
def get_terminator (self):
"get the channel's terminator"
return self.terminator
def collect_incoming_data (self, data):
"assert debug log of collected data"
assert None == self.log (data, 'collect-incoming-data')
def found_terminator (self):
"assert debug log of terminator found"
assert None == self.log (
self.get_terminator (), 'found-terminator'
)
return True # do not pipeline
# Note about this implementation
#
# This is a refactored version of asynchat.py as found in Python 2.4, and
# modified as to support stallable producers and collectors, loginfo and
# finalization.
#
# Stallable Producer and Collector
#
# In order to support non-blocking asynchronous and synchronized peer,
# the async_chat module introduces stallable collector and generalize
# the stallable producer of Medusa's proxy.
#
# Besides the fact that stallable reactors are a requirement for peers
# that do not block, they have other practical benefits. For instance,
# a channel with an collector_stalled and an empty output_fifo will not
# be polled for I/O.
#
# This implementation use collection.deque for output FIFO queues instead
# of a class wrapper, and the push () method actually does what it is
# supposed to do and pushes a string at the end that output queue, not a
# Simple instance.
#
# The channel's method collect_incoming_data is called to collect data
# between terminators. Its found_terminator method is called whenever
# the current terminator is found, and if that method returns True, then
# no more buffer will be consumed until the channel's collector_stalled
# is not set to False by a call to async_collect. | PypiClean |
/ILThermoPy-1.0.0-py3-none-any.whl/ilthermopy/search.py |
import typing as _typing
try:
from typing import Literal as _Literal
except ImportError:
from typing_extensions import Literal as _Literal
import pandas as _pd
import ilthermopy.errors as _err
import ilthermopy.requests as _req
import ilthermopy.data_structs as _ds
from ilthermopy.compound_list import _compounds as _cmp
def ShowPropertyList() -> None:
'''Prints list of properties available in ILThermo 2.0 database'''
_ds.PropertyList().Show()
return
def _SearchItemToRow(r: _typing.List) -> _typing.Dict:
'''Transforms row in ILThermo search response to the dictionary formatted
for the dataframe containing search results'''
# basic info
row = {'id': r[0],
'reference': r[1],
'property': r[2],
'phases': r[3],
'num_phases': r[3].count(';') + 1,
'num_components': len(r) - 8,
'num_data_points': int(r[7])}
# compounds
for i, j in enumerate(range(4, 7)):
code, name = (r[j], r[j+4]) if r[j] is not None else (None, None)
row[f'cmp{i+1}'] = name
row[f'cmp{i+1}_id'] = code
smiles = _cmp.id2smiles.get(code, None)
if not smiles:
smiles = _cmp.name2smiles.get(name, None)
row[f'cmp{i+1}_smiles'] = smiles
return row
def Search(compound: _typing.Optional[str] = None,
n_compounds: _Literal[None,1,2,3] = None,
prop: _typing.Optional[str] = None,
prop_key: _typing.Optional[str] = None,
year: _typing.Optional[int] = None,
author: _typing.Optional[str] = None,
keywords: _typing.Optional[str] = None) -> _pd.DataFrame:
'''Runs ILThermo search and returns results as a dataframe
Arguments:
compound: chemical formula, CAS registry number, or name (part or full)
n_compounds: number of mixture compounds
prop: name of physico-chemical property, only used if prop_key is not specified
prop_key: key of physico-chemical property (view available via GetPropertyList)
year: publication year
author: author's last name
keywords: keywords presumably specified in paper's title
Returns:
dataframe containing main info on found entries
'''
# get property key
if not prop_key and prop:
plist = _ds.PropertyList()
prop_key = plist.prop2key.get(prop, None)
if prop_key is None:
raise ValueError(f'Unknown property: {prop}\nCheck available properties via the ilt.ShowPropertyList function')
# run search API
data = _req.GetEntries(compound = compound,
n_compounds = n_compounds,
prop_key = prop_key,
year = year,
author = author,
keywords = keywords)
# process returned errors
errors = data.get('errors', [])
if errors:
raise _err.ILThermoSearchError(errors)
# transform to table
try:
rows = [_SearchItemToRow(r) for r in data['res']]
except (KeyError, IndexError, ValueError, TypeError):
raise _err.ILThermoResponseError('Search API', 'Unexpected JSON structure')
df = _pd.DataFrame(rows)
return df
def GetAllEntries() -> _pd.DataFrame:
'''Returns main info on all available ILThermo entries
Returns:
dataframe containing all currently available entries
'''
df = _pd.concat([Search(n_compounds = i) for i in (1,2,3)],
ignore_index = True)
return df | PypiClean |
/Analyzer-zero-0.3.tar.gz/Analyzer-zero-0.3/analyzerlibs/nerual/_nerual.py | from qlib.data.sql import SqlObjectEngine, Table
class Doc(Table):
title = 'title'
url = str
content = str
tag = str
class InLink(Table):
fromid = int
toid= int
strength = float
class OutLink(Table):
fromid = int
toid = int
strength = float
class Hidden(Table):
link_type = 'Doc'
linkid = int
desc = 'this is describle for hidden node . the hidden node can target to any obj. default is doc'
def __init__(self, handler=None, **kargs):
v = kargs['linkid']
if hasattr(v, '_table'):
kargs['linkid'] = v.id
kargs['link_type'] = v.__class__.__name__
kargs['desc'] = v.__class__.__name__
super().__init__(handler=handler, **kargs)
def __call__(self, sqlhandler, Obj ):
return handler.find_one(Obj, ID=self.linkid)
class Neural:
layer_map =[InLink, Hidden, OutLink]
def __init__(self, database):
self._db = SqlObjectEngine(database=database)
self.database_path = database
if not ('InLink',) in self._db.sql.table_list():
self._db.create(InLink)
if not ('OutLink',) in self._db.sql.table_list():
self._db.create(OutLink)
if not ('Hidden',) in self._db.sql.table_list():
self._db.create(Hidden)
def __del__(self):
self._db.close()
def _get_from_db(self, table,**kargs):
if hasattr(table, '_table'):
return self._db.find_one(table, **kargs)
else:
return self._db.sql.first(table, **kargs)
def getStrength(self, fromid, toid, layer):
if isinstance(layer, int):
l = self.__class__.layer_map[layer]
elif hasattr(layer, '_table'):
l = layer
else:
raise Exception("not such layer : ",layer)
res = self._get_from_db(l, fromid=fromid, toid=toid)
if not res:
if l is InLink:
return -0.2
else:
return 0.0
return res.strength
def getAllHiddenIds(self, inids, outids):
res_ids = set()
for inid in inids:
rows = self._db.find(InLink, fromid=inid)
for r in rows:
res_ids.add(r.toid)
for outid in outids:
rows = self._db.find(OutLink, toid=outids)
for r in rows:
res_ids.add(r.fromid)
return res_ids
def setUpNetwork(self, inids, outids):
self.inids = inids
self.outids = outids
self.hiddenids = self.getAllHiddenIds(inids, outids)
# node
self.ai = [1.0] * len(self.inids)
self.ah = [1.0] * len(self.hiddenids)
self.ao = [1.0] * len(self.outids)
# create weights matrix
self.wi = [[ self.getStrength(inid, hiddenid, 0) for hiddenid in self.hiddenids] for inid in self.inids ]
self.wo = [[ self.getStrength(hiddenid, outid, 2) for hiddenid in self.hiddenids] for outid in self.outids ]
def feedforward(self, activation_func):
# the only inputs are the query words
for i in range(len(self.inids)):
self.ai[i] = 1.0
# hidden activations
self.ah = activation_func(self.inids, self.hiddenids, self.ai, self.wi)
self.ao = activation_func(self.hiddenids, self.outids, self.ah, self.ao)
return self.ao
def getresult(self, inids, outids):
pass
def setStrength(self, fromid, toid, layer, strength):
res = self.getStrength(fromid,toid, layer)
if not res:
if isinstance(layer, int):
l = self.__class__.layer_map[layer]
elif hasattr(layer, '_table'):
l = layer
else:
raise Exception("not such layer : ",layer)
self._db.add(l(fromid=fromid, toid=toid, strength=strength))
else:
res['strength'] = strength
self._db.save(res)
def __repr__(self):
return '%d-layer-nerual| store in %s' % (len(self.layer_map), self.database_path) | PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/methods/groups/leave_group_call.py | import asyncio
from typing import Union
from ...exceptions import NoActiveGroupCall
from ...exceptions import NodeJSNotRunning
from ...exceptions import NoMtProtoClientSet
from ...exceptions import NotInGroupCallError
from ...mtproto import BridgedClient
from ...scaffold import Scaffold
from ...types import NotInGroupCall
from ...types.session import Session
class LeaveGroupCall(Scaffold):
async def leave_group_call(
self,
chat_id: Union[int, str],
):
"""Leave a group call
This method allow to leave a Group Call
Parameters:
chat_id (``int`` | ``str``):
Unique identifier of the target chat.
Can be a direct id (int) or a username (str)
Raises:
NoMtProtoClientSet: In case you try
to call this method without any MtProto client
NodeJSNotRunning: In case you try
to call this method without do
:meth:`~newcalls.NewCalls.start` before
NoActiveGroupCall: In case you try
to edit a not started group call
NotInGroupCallError: In case you try
to leave a non-joined group call
Example:
.. code-block:: python
:emphasize-lines: 10-12
from newcalls import Client
from newcalls import idle
...
app = NewCalls(client)
app.start()
... # Call API methods
app.leave_group_call(
-1001185324811,
)
idle()
"""
if self._app is not None:
if self._wait_until_run is not None:
try:
chat_id = int(chat_id)
except ValueError:
chat_id = BridgedClient.chat_id(
await self._app.resolve_peer(chat_id),
)
chat_call = await self._app.get_full_chat(
chat_id,
)
if chat_call is not None:
solver_id = Session.generate_session_id(24)
async def internal_sender():
if not self._wait_until_run.done():
await self._wait_until_run
await self._binding.send({
'action': 'leave_call',
'chat_id': chat_id,
'type': 'requested',
'solver_id': solver_id,
})
asyncio.ensure_future(internal_sender())
result = await self._wait_result.wait_future_update(
solver_id,
)
if isinstance(result, NotInGroupCall):
raise NotInGroupCallError()
else:
raise NoActiveGroupCall()
else:
raise NodeJSNotRunning()
else:
raise NoMtProtoClientSet() | PypiClean |
/BatchJAX-0.1.0-py3-none-any.whl/batchjax/batcher.py | from typing import Callable, List
import objax
import jax
import jax.numpy as np
def vc_to_dict(vc):
"""Convert an objax var colection to python dict"""
all_vars = {}
for key in vc.keys():
all_vars[key] = np.array(vc[key].value)
return all_vars
def remove_prefix_from_dict_keys(d: dict, prefix: str):
"""Assumes that all keys in d start with the same prefix"""
return {k[len(prefix) :]: v for k, v in d.items()}
def get_state_var_names(obj_list):
"""Get names of all state variables."""
sv_names = []
for obj in obj_list:
var_collection = obj.vars()
for key in var_collection.keys():
if type(var_collection[key]) != objax.TrainVar:
sv_names.append(key)
return sv_names
def get_batched_vars(obj_list):
"""
Stack all object var collections. We assume that each obj in obj_list is from the same class.
For each obj in obj_list we retrieve its var collection, which is a dictionary of parameters.
These are stacked into a new dictionary with the same keys, however the values are
an array across all the values.
"""
all_vars = {}
# collect vars
for obj in obj_list:
var_collection = obj.vars()
for key in var_collection.keys():
if key not in all_vars:
all_vars[key] = []
all_vars[key].append(var_collection[key].value)
# convert to jax array
for obj in obj_list:
var_collection = obj.vars()
for key in var_collection.keys():
all_vars[key] = np.array(all_vars[key])
return all_vars
def dict_to_int(d, num):
"""Creates a new dict with same keys as d but values num"""
return {k: num for k, i in d.items()}
def get_objax_iter_index(vc):
"""Mimics the way objax iterates over variables and returns the index in the same order."""
seen = set()
idx = []
for i, v in enumerate(vc.values()):
if id(v) not in seen:
seen.add(id(v))
idx.append(i)
return idx
def list_index(a, idx):
"""
For each element a this indexes it with the corresponding index in idx.
Args:
a: array of elements to index
idx: array of the same length of a with the corresponding indexes
"""
new_a = list(map(a.__getitem__, idx))
return new_a
def bool_map(
items: list, true_fn: Callable, false_fn: Callable, bool_arr: List[bool]
) -> list:
"""
Iterates over items and applies either true_fn or false_fn depending
on whether bool_arr ir true or false respectiely.
Both true_fn and false_fn take 2 arguments:
item , index
"""
return [
true_fn(items[i], i) if bool_arr[i] else false_fn(items[i], i)
for i in range(len(items))
]
def _batched_vmap_wrapper(fn, bool_arr, *args):
"""
A wrapper around fn that unpacks the jax.vmap arguments reorganises them so then can be passed to fn.
Args:
fn: original function passed by the user to be batched
bool_arr: a list that indicates which input is an objax array
args: an array of inputs that have been batched.
The first half are the objax objects that have been batched.
The second half refer to the batched variables.
"""
# The first half of args refer to modules
num_args = len(args)
num_m = int(num_args / 2)
# collect the reference modules
modules = [args[i] for i in range(num_m)]
# collect the batched variables
batched_vars = [args[i] for i in range(num_m, num_args)]
# modules is the array of referce variables which have not been batched
# if a module is not a ModuleList we need to replace with the corresonding tensor
# inside batched_vars
modules = bool_map(
modules,
true_fn=lambda x, i: x,
false_fn=lambda x, i: batched_vars[i],
bool_arr=bool_arr,
)
original_tensors = bool_map(
modules,
true_fn=lambda x, i: x.vars().tensors(),
false_fn=lambda x, i: x,
bool_arr=bool_arr,
)
# JAX does not ensure that dict will have same order after vmap
# So we need re-order the batched varcollections to match that of the corresponding modules
# See https://github.com/google/jax/issues/4085
fix_order = lambda d, m: {a: d[a] for a in m.vars().keys()}
new_tensors = bool_map(
batched_vars,
true_fn=lambda bv, i: [i for k, i in fix_order(bv, modules[i]).items()],
false_fn=lambda x, i: x,
bool_arr=bool_arr,
)
# assign new tensors to modules
bool_map(
modules,
true_fn=lambda x, i: x.vars().assign(
list_index(new_tensors[i], get_objax_iter_index(x.vars()))
),
false_fn=lambda x, i: None,
bool_arr=bool_arr,
)
val = fn(*modules)
# assign old tensors back
bool_map(
modules,
true_fn=lambda x, i: x.vars().assign(original_tensors[i]),
false_fn=lambda x, i: None,
bool_arr=bool_arr,
)
return val
def _batched(fn, inputs, axes, out_dim, bool_arr, module_ref_fn, var_fn):
"""
This is the function where the batching is done.
Args:
fn: callable function to batch/loop over
inputs: inputs to be passed to fn_to_batch
axes: corresponding axis for each input to batch/loop over
out_dim: the number of arguments to returned by fn_to_batch
bool_arr: a boolean list corresponding to each input indicating whether is is an objax object
module_ref_fn: a function that retrieves the underlying objax object.
This is required to make this function more general than just passing through the raw objax variables
var_fn: for an objax object return its variables
To implement the batching we:
1) For input corresponding to an objax input extract the base objax object
2) For each objax input collect its stacked variables
3) Organise inputs to _batched_vmap_wrapper so that it can be called with jax.vmap
"""
N = len(inputs)
# Step 1
# For each Batched obj we need to pass through the objax module that is being matched
ref_vmap_inputs = bool_map(
inputs,
true_fn=lambda x, i: module_ref_fn(x),
false_fn=lambda x, i: None,
bool_arr=bool_arr,
)
# Do not batch the reference objax.Modules
ref_vmap_inputs_axes = [None for i in range(N)]
# Step 2
batched_inputs = bool_map(
inputs,
true_fn=lambda x, i: var_fn(x),
false_fn=lambda x, i: x,
bool_arr=bool_arr,
)
# Step 3
in_axes_dict_list = bool_map(
batched_inputs,
true_fn=lambda x, i: dict_to_int(x, axes[i]),
false_fn=lambda x, i: axes[i],
bool_arr=bool_arr,
)
res = jax.vmap(
_batched_vmap_wrapper,
in_axes=[None, None, *ref_vmap_inputs_axes, *in_axes_dict_list],
out_axes=0,
)(fn, bool_arr, *ref_vmap_inputs, *batched_inputs)
return res
# Objax mode
def batch_over_objax_list(fn, inputs: list, axes: list, out_dim: int):
"""Entry point for batching over a list of inputs that can contain objax objects."""
input_batched_flag = [type(i) == objax.ModuleList for i in inputs]
return _batched(
fn,
inputs,
axes,
out_dim,
input_batched_flag,
lambda x: x[0],
lambda x: get_batched_vars(x),
)
# Explict Batched objects mode
class Batched(objax.Module):
"""
Turns an array of objax modules / modulelist into a single object with batch parameters.
This is faster than objax mode (as no iterating over objects are required when batching and jax can
handle everything.) HOWEVER this does change the way variables are stored and so should be used
cautiously
"""
def __init__(self, mod_list: list):
# use list to hide from objax
self.templ_m = [mod_list[0]]
mod_list = objax.ModuleList(mod_list)
# Collect batched versions of all variables across mod_list
var_list = get_batched_vars(mod_list)
# Set each variable as a trainable var so that objax can find them
sv_names = get_state_var_names(mod_list)
for k, v in var_list.items():
if k in sv_names:
setattr(self, k, objax.StateVar(v))
else:
setattr(self, k, objax.TrainVar(v))
def batch_over_batched_list(fn, inputs, axes: list, out_dim: int):
"""Entry point for batching over a list of inputs that can contain explicted batched objects."""
# Identify which inputs are of type Batched
input_batched_flag = [type(i) == Batched for i in inputs]
return _batched(
fn,
inputs,
axes,
out_dim,
input_batched_flag,
lambda x: x.templ_m[0],
lambda x: remove_prefix_from_dict_keys(vc_to_dict(x.vars()), "(Batched)."),
) | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/DjangoAppCenter/simpleui/static/admin/simpleui-x/elementui/locale/lang/de.js | 'use strict';
exports.__esModule = true;
exports.default = {
el: {
colorpicker: {
confirm: 'OK',
clear: 'Leeren'
},
datepicker: {
now: 'Jetzt',
today: 'Heute',
cancel: 'Abbrechen',
clear: 'Leeren',
confirm: 'OK',
selectDate: 'Datum wählen',
selectTime: 'Uhrzeit wählen',
startDate: 'Startdatum',
startTime: 'Startzeit',
endDate: 'Enddatum',
endTime: 'Endzeit',
prevYear: 'Letztes Jahr',
nextYear: 'Nächtes Jahr',
prevMonth: 'Letzter Monat',
nextMonth: 'Nächster Monat',
day: 'Tag',
week: 'Woche',
month: 'Monat',
year: '',
month1: 'Januar',
month2: 'Februar',
month3: 'März',
month4: 'April',
month5: 'Mai',
month6: 'Juni',
month7: 'Juli',
month8: 'August',
month9: 'September',
month10: 'Oktober',
month11: 'November',
month12: 'Dezember',
weeks: {
sun: 'So',
mon: 'Mo',
tue: 'Di',
wed: 'Mi',
thu: 'Do',
fri: 'Fr',
sat: 'Sa'
},
months: {
jan: 'Jan',
feb: 'Feb',
mar: 'Mär',
apr: 'Apr',
may: 'Mai',
jun: 'Jun',
jul: 'Jul',
aug: 'Aug',
sep: 'Sep',
oct: 'Okt',
nov: 'Nov',
dec: 'Dez'
}
},
select: {
loading: 'Lädt.',
noMatch: 'Nichts gefunden.',
noData: 'Keine Daten',
placeholder: 'Daten wählen'
},
cascader: {
noMatch: 'Nichts gefunden.',
loading: 'Lädt.',
placeholder: 'Daten wählen',
noData: 'Keine Daten'
},
pagination: {
goto: 'Gehe zu',
pagesize: ' pro Seite',
total: 'Gesamt {total}',
pageClassifier: ''
},
messagebox: {
confirm: 'OK',
cancel: 'Abbrechen',
error: 'Fehler'
},
upload: {
deleteTip: 'Klicke löschen zum entfernen',
delete: 'Löschen',
preview: 'Vorschau',
continue: 'Fortsetzen'
},
table: {
emptyText: 'Keine Daten',
confirmFilter: 'Anwenden',
resetFilter: 'Zurücksetzen',
clearFilter: 'Alles ',
sumText: 'Summe'
},
tree: {
emptyText: 'Keine Einträge'
},
transfer: {
noMatch: 'Nichts gefunden.',
noData: 'Keine Einträge',
titles: ['Liste 1', 'Liste 2'],
filterPlaceholder: 'Einträge filtern',
noCheckedFormat: '{total} Einträge',
hasCheckedFormat: '{checked}/{total} ausgewählt'
},
image: {
error: 'FAILED' // to be translated
},
pageHeader: {
title: 'Back' // to be translated
}
}
}; | PypiClean |
/Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/web/flow.py | from collections import OrderedDict
import simplejson as json
from acolyte.web import (
BaseWebHandler,
check_token
)
from acolyte.core.service import Result
from acolyte.core.job import ActionArg
class FlowMetaHandler(BaseWebHandler):
@check_token
def get(self, flow_meta_name):
"""查询某个FlowMeta的详情
"""
flow_service = self._("FlowService")
rs = flow_service.get_flow_meta_info(flow_meta_name)
if not rs.is_success():
self.render("tip.html", msg=rs.msg)
return
flow_templates_rs = flow_service\
.get_flow_templates_by_flow_meta_name(flow_meta_name)
self.render(
"flow_meta_details.html",
flow_meta_name=flow_meta_name,
flow_meta_details=rs.data,
flow_templates=flow_templates_rs.data
)
class ViewTemplateHandler(BaseWebHandler):
@check_token
def get(self, flow_template_id):
"""查询某个FlowTemplate的详情
"""
flow_service = self._("FlowService")
rs = flow_service.get_flow_template(flow_template_id)
if not rs.is_success():
self.render("tip.html", msg=rs.msg)
return
# 判断有无绑定参数
bind_args = rs.data.bind_args
action_num, empty_num = 0, 0
for step_name in bind_args:
for action in bind_args[step_name]:
action_num += 1
if not bind_args[step_name][action]:
empty_num += 1
bind_args_json = json.dumps(
bind_args, indent=4, ensure_ascii=False)
config_json = json.dumps(
rs.data.config, indent=4, ensure_ascii=False)
return self.render(
"flow_template_details.html",
details=rs.data,
bind_args_empty=action_num == empty_num,
bind_args_json=bind_args_json,
config_json=config_json
)
class CreateTemplateHandler(BaseWebHandler):
@check_token
def get(self):
"""显示创建flow template页面
"""
flow_meta_name = self.get_query_argument("meta")
flow_service = self._("FlowService")
rs = flow_service.get_flow_meta_info(flow_meta_name)
if not rs.is_success():
self.render("tip.html", msg=rs.msg)
return
flow_meta_details = rs.data
return self.render(
"create_flow_template.html",
flow_meta_details=flow_meta_details,
bind_args=self._render_bind_args_tpl(flow_meta_details)
)
def _render_bind_args_tpl(self, flow_meta_details):
"""渲染bind_args模板
"""
job_mgr = self._("job_manager")
bind_args = OrderedDict()
for job_ref in flow_meta_details.jobs:
job_name = job_ref.job_name
job_define = job_mgr.get(job_name)
bind_args[job_ref.step_name] = {
action: self._render_act_args_tpl(job_define.job_args[action])
for action in job_define.job_args}
return bind_args
def _render_act_args_tpl(self, action_args):
return {
a.name: {
"type": a.field_info.type.__name__,
"value": a.field_info.default,
"mark": a.mark,
"comment": a.comment
} for a in action_args
if a.mark != ActionArg.MARK_CONST
}
@check_token
def post(self):
"""执行创建, 需要Ajax请求
"""
(
follow_meta_name,
name,
max_run_instance,
config,
bind_args
) = self._form(
"flow_meta",
"name",
"max_run_instance",
"config",
"bind_args"
)
config = _parse_json(config)
# config json解析失败
if config is None:
self._output_result(Result.bad_request(
"invalid_config_fmt", msg="Config JSON格式有误"))
return
bind_args = _parse_json(bind_args)
# bind_args json解析失败
if bind_args is None:
self._output_result(Result.bad_request(
"invalid_bind_args_fmt", msg="Bind args JSON格式有误"))
return
rs = self._("FlowService").create_flow_template(
flow_meta_name=follow_meta_name,
name=name,
bind_args=bind_args,
max_run_instance=max_run_instance,
config=config,
creator=self.request.current_user.id
)
self._output_result(rs)
class ModifyTemplateHandler(BaseWebHandler):
@check_token
def post(self):
"""修改Flow template配置
"""
(
tpl_id,
name,
bind_args_json,
max_run_instance,
config_json
) = self._form(
"tpl_id",
"name",
"bind_args",
"max_run_instance",
"config"
)
config = _parse_json(config_json)
# config解析错误
if config is None:
self._output_result(Result.bad_request(
"invalid_config_fmt", msg="Config JSON格式有误"))
return
bind_args = _parse_json(bind_args_json)
# bind_args 解析错误
if bind_args is None:
self._output_result(Result.bad_request(
"invalid_bind_args_fmt", msg="Bind args JSON格式有误"))
return
# 执行修改
rs = self._("FlowService").modify_flow_template(
flow_tpl_id=tpl_id,
name=name,
bind_args=bind_args,
max_run_instance=max_run_instance,
config=config
)
self._output_result(rs)
class ViewFlowInstanceHandler(BaseWebHandler):
@check_token
def get(self, flow_instance_id):
"""FlowInstance终端页
"""
flow_service = self._("FlowService")
job_mgr = self._("job_manager")
rs = flow_service.get_flow_instance_details(flow_instance_id)
if rs.is_success():
flow_meta_info = flow_service.get_flow_meta_info(
rs.data.flow_tpl.flow_meta_name).data
jobs = {
job_ref.job_name: job_mgr.get(job_ref.job_name)
for job_ref in flow_meta_info.jobs
}
steps = {step.step_name: step for step in rs.data.steps}
self.render(
"flow_instance_details.html",
details=rs.data,
flow_meta_info=flow_meta_info,
jobs=jobs,
steps=steps
)
else:
self.render("tip.html", msg=rs.msg)
def _parse_json(json_str):
if not json_str:
return {}
else:
try:
return json.loads(json_str)
except:
return None
class DiscardFlowInstanceHandler(BaseWebHandler):
@check_token
def post(self):
flow_exec_service = self._("FlowExecutorService")
flow_instance_id, reason = self._form("flow_instance_id", "reason")
actor_id = self.request.current_user.id
rs = flow_exec_service.discard_flow_instance(
int(flow_instance_id), actor_id, reason)
self._output_result(rs)
class ViewFlowGroupDetailsHandler(BaseWebHandler):
@check_token
def get(self, group_id):
flow_service = self._("FlowService")
rs = flow_service\
.get_flow_instance_group_details(group_id)
self.render("flow_group_details.html", details=rs.data) | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/eclib/pstatbar.py | __author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: pstatbar.py 66840 2011-02-03 21:05:28Z CJP $"
__revision__ = "$Revision: 66840 $"
__all__ = ["ProgressStatusBar",]
#--------------------------------------------------------------------------#
# Dependancies
import wx
#--------------------------------------------------------------------------#
# Globals
#--------------------------------------------------------------------------#
class ProgressStatusBar(wx.StatusBar):
"""Custom StatusBar with a built-in progress bar"""
def __init__(self, parent, id_=wx.ID_ANY,
style=wx.SB_FLAT,
name="ProgressStatusBar"):
"""Creates a status bar that can hide and show a progressbar
in the far right section. The size of the progressbar is also
determined by the size of the right most section.
@param parent: Frame this status bar belongs to
"""
super(ProgressStatusBar, self).__init__(parent, id_, style, name)
# Attributes
self._changed = False # position has changed ?
self.busy = False # Bar in busy mode ?
self.stop = False # Stop flag to stop progress from other threads
self.progress = 0 # Current progress value of the bar
self.range = 0 # Range of progress indicator
self.tmp = None # Temp for text that may be pushed when busy
self.timer = wx.Timer(self)
self.prog = wx.Gauge(self, style=wx.GA_HORIZONTAL)
self.prog.Hide()
# Layout
self.SetFieldsCount(2)
self.SetStatusWidths([-1, 155])
# Event Handlers
self.Bind(wx.EVT_IDLE, lambda evt: self.__Reposition())
self.Bind(wx.EVT_TIMER, self.OnTimer)
self.Bind(wx.EVT_SIZE, self.OnSize)
def __del__(self):
"""Make sure the timer is stopped
@postcondition: timer is cleaned up
"""
if self.timer.IsRunning():
self.timer.Stop()
def __Reposition(self):
"""Does the actual repositioning of progress bar
@postcondition: Progress bar is repostioned inside right most field
"""
if self._changed:
rect = self.GetFieldRect(self.GetFieldsCount() - 1)
self.prog.SetPosition((rect.x + 2, rect.y + 2))
self.prog.SetSize((rect.width - 8, rect.height - 4))
self._changed = False
def _UpdateRange(self, range):
"""Update the internal progress gauges range
@param range: int
"""
self.range = range
try:
self.prog.SetRange(range)
except OverflowError:
# range too large, scale everything to 100
self.prog.SetRange(100)
def _UpdateValue(self, value):
"""Update the internal progress gauges value
@param range: int
"""
# Ensure value is within range
range = self.prog.GetRange()
if range != self.range: # need to scale value
value = int((float(value) / float(range)) * 100)
self.progress = value
self.prog.SetValue(value)
#---- Public Methods ----#
def Destroy(self):
"""Destroy the control"""
if self.timer.IsRunning():
self.timer.Stop()
del self.timer
super(ProgressStatusBar, self).Destroy()
def DoStop(self):
"""Stop any progress indication action and hide the bar"""
self.timer.Stop()
self.ShowProgress(False)
self.prog.SetValue(0) # Reset progress value
self.busy = False
self.stop = False
# Restore any status text that was sent while busy
if self.tmp is not None:
self.SetStatusText(self.tmp, self.GetFieldsCount() - 1)
self.tmp = None
def GetGauge(self):
"""Return the wx.Gauge used by this window
@return: wx.Gauge
"""
return self.prog
def GetProgress(self):
"""Get the progress of the progress bar
@return: int
"""
return self.prog.GetValue()
def GetRange(self):
"""Get the what the range of the progress bar is
@return: int
"""
return self.prog.GetRange()
def IsBusy(self):
"""Is the progress indicator busy or not
@return: bool
"""
return self.timer.IsRunning()
def OnSize(self, evt):
"""Reposition progress bar on resize
@param evt: wx.EVT_SIZE
"""
self.__Reposition()
self._changed = True
evt.Skip()
def OnTimer(self, evt):
"""Update the progress bar while the timer is running
@param evt: wx.EVT_TIMER
"""
# Check stop flag that can be set from non main thread
if self.stop:
self.DoStop()
return
if not self.prog.IsShown():
self.Stop()
if self.busy or self.progress < 0:
self.prog.Pulse()
else:
# Update the Range if it has changed
if self.range >= 0 and self.range != self.prog.GetRange():
self._UpdateRange(self.range)
# Update the progress value if it is less than the range
if self.progress <= self.range:
self._UpdateValue(self.progress)
def Run(self, rate=100):
"""Start the bar's timer to check for updates to progress
@keyword rate: rate at which to check for updates in msec
"""
if not self.timer.IsRunning():
self.timer.Start(rate)
def SetProgress(self, val):
"""Set the controls internal progress value that is reflected in the
progress bar when the timer next updates. Be sure to call Start before
calling this method if you want the changes to be visible. This method
can be called from non gui threads.
@param val: int
"""
self.progress = val
if val > 0 and wx.Thread_IsMain():
self._UpdateValue(val)
def SetRange(self, val):
"""Set the what the range of the progress bar is. This method can safely
be called from non gui threads.
@param val: int
"""
self.range = val
if val > 0 and wx.Thread_IsMain():
self._UpdateRange(val)
def ShowProgress(self, show=True):
"""Manually show or hide the progress bar
@keyword show: bool
"""
# If showing make sure bar is positioned properly
if show:
self.__Reposition()
self.prog.Show(show)
wx.GetApp().ProcessPendingEvents()
def SetStatusText(self, txt, number=0):
"""Override wx.StatusBar method to prevent text from being
put in when the progress indicator is running. Any text that
comes when it is running is buffered to be displayed afterwords.
@param txt: Text to put on status bar
@keyword number: Section number to put text in
"""
if number == self.GetFieldsCount() - 1 and self.IsBusy():
if self.tmp is None:
self.tmp = txt
else:
try:
super(ProgressStatusBar, self).SetStatusText(txt, number)
except wx.PyAssertionError:
pass
# Alias for SetStatusText
PushStatusText = SetStatusText
def Start(self, rate=100):
"""Show and the progress indicator and start the timer
@keyword rate: rate to update progress bar in msec
"""
self.__Reposition()
bfield = self.GetFieldsCount() - 1
self.tmp = self.GetStatusText(bfield)
# Clear the progress field so the text doesn't show behind
# the progress indicator.
super(ProgressStatusBar, self).SetStatusText(u'', bfield)
self.stop = False
self.ShowProgress(True)
self.Run(rate)
def StartBusy(self, rate=100):
"""Show and start the progress indicator in pulse mode
@keyword rate: interval to pulse indicator at in msec
"""
self.busy = True
self.Start(rate)
def Stop(self):
"""Stop and hide the progress bar. This method may safely be called
from background threads.
@precondition: Bar is already running
"""
if wx.Thread_IsMain():
self.DoStop()
else:
self.stop = True # Set flag from non main thread
self.progress = 0
def StopBusy(self):
"""Stop and hide the progress indicator
@postcondition: Progress bar is hidden from view
"""
self.busy = False
self.Stop() | PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/@types/node/fs.d.ts | declare module 'fs' {
import * as stream from 'node:stream';
import { Abortable, EventEmitter } from 'node:events';
import { URL } from 'node:url';
import * as promises from 'node:fs/promises';
export { promises };
/**
* Valid types for path values in "fs".
*/
export type PathLike = string | Buffer | URL;
export type PathOrFileDescriptor = PathLike | number;
export type TimeLike = string | number | Date;
export type NoParamCallback = (err: NodeJS.ErrnoException | null) => void;
export type BufferEncodingOption =
| 'buffer'
| {
encoding: 'buffer';
};
export interface ObjectEncodingOptions {
encoding?: BufferEncoding | null | undefined;
}
export type EncodingOption = ObjectEncodingOptions | BufferEncoding | undefined | null;
export type OpenMode = number | string;
export type Mode = number | string;
export interface StatsBase<T> {
isFile(): boolean;
isDirectory(): boolean;
isBlockDevice(): boolean;
isCharacterDevice(): boolean;
isSymbolicLink(): boolean;
isFIFO(): boolean;
isSocket(): boolean;
dev: T;
ino: T;
mode: T;
nlink: T;
uid: T;
gid: T;
rdev: T;
size: T;
blksize: T;
blocks: T;
atimeMs: T;
mtimeMs: T;
ctimeMs: T;
birthtimeMs: T;
atime: Date;
mtime: Date;
ctime: Date;
birthtime: Date;
}
export interface Stats extends StatsBase<number> {}
/**
* A `fs.Stats` object provides information about a file.
*
* Objects returned from {@link stat}, {@link lstat} and {@link fstat} and
* their synchronous counterparts are of this type.
* If `bigint` in the `options` passed to those methods is true, the numeric values
* will be `bigint` instead of `number`, and the object will contain additional
* nanosecond-precision properties suffixed with `Ns`.
*
* ```console
* Stats {
* dev: 2114,
* ino: 48064969,
* mode: 33188,
* nlink: 1,
* uid: 85,
* gid: 100,
* rdev: 0,
* size: 527,
* blksize: 4096,
* blocks: 8,
* atimeMs: 1318289051000.1,
* mtimeMs: 1318289051000.1,
* ctimeMs: 1318289051000.1,
* birthtimeMs: 1318289051000.1,
* atime: Mon, 10 Oct 2011 23:24:11 GMT,
* mtime: Mon, 10 Oct 2011 23:24:11 GMT,
* ctime: Mon, 10 Oct 2011 23:24:11 GMT,
* birthtime: Mon, 10 Oct 2011 23:24:11 GMT }
* ```
*
* `bigint` version:
*
* ```console
* BigIntStats {
* dev: 2114n,
* ino: 48064969n,
* mode: 33188n,
* nlink: 1n,
* uid: 85n,
* gid: 100n,
* rdev: 0n,
* size: 527n,
* blksize: 4096n,
* blocks: 8n,
* atimeMs: 1318289051000n,
* mtimeMs: 1318289051000n,
* ctimeMs: 1318289051000n,
* birthtimeMs: 1318289051000n,
* atimeNs: 1318289051000000000n,
* mtimeNs: 1318289051000000000n,
* ctimeNs: 1318289051000000000n,
* birthtimeNs: 1318289051000000000n,
* atime: Mon, 10 Oct 2011 23:24:11 GMT,
* mtime: Mon, 10 Oct 2011 23:24:11 GMT,
* ctime: Mon, 10 Oct 2011 23:24:11 GMT,
* birthtime: Mon, 10 Oct 2011 23:24:11 GMT }
* ```
* @since v0.1.21
*/
export class Stats {}
export interface StatsFsBase<T> {
/** Type of file system. */
type: T;
/** Optimal transfer block size. */
bsize: T;
/** Total data blocks in file system. */
blocks: T;
/** Free blocks in file system. */
bfree: T;
/** Available blocks for unprivileged users */
bavail: T;
/** Total file nodes in file system. */
files: T;
/** Free file nodes in file system. */
ffree: T;
}
export interface StatsFs extends StatsFsBase<number> {}
/**
* Provides information about a mounted file system
*
* Objects returned from {@link statfs} and {@link statfsSync} are of this type.
* If `bigint` in the `options` passed to those methods is true, the numeric values
* will be `bigint` instead of `number`.
* @since v18.15.0
*/
export class StatsFs {}
export interface BigIntStatsFs extends StatsFsBase<bigint> {}
export interface StatFsOptions {
bigint?: boolean | undefined;
}
/**
* A representation of a directory entry, which can be a file or a subdirectory
* within the directory, as returned by reading from an `fs.Dir`. The
* directory entry is a combination of the file name and file type pairs.
*
* Additionally, when {@link readdir} or {@link readdirSync} is called with
* the `withFileTypes` option set to `true`, the resulting array is filled with `fs.Dirent` objects, rather than strings or `Buffer` s.
* @since v10.10.0
*/
export class Dirent {
/**
* Returns `true` if the `fs.Dirent` object describes a regular file.
* @since v10.10.0
*/
isFile(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a file system
* directory.
* @since v10.10.0
*/
isDirectory(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a block device.
* @since v10.10.0
*/
isBlockDevice(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a character device.
* @since v10.10.0
*/
isCharacterDevice(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a symbolic link.
* @since v10.10.0
*/
isSymbolicLink(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a first-in-first-out
* (FIFO) pipe.
* @since v10.10.0
*/
isFIFO(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a socket.
* @since v10.10.0
*/
isSocket(): boolean;
/**
* The file name that this `fs.Dirent` object refers to. The type of this
* value is determined by the `options.encoding` passed to {@link readdir} or {@link readdirSync}.
* @since v10.10.0
*/
name: string;
}
/**
* A class representing a directory stream.
*
* Created by {@link opendir}, {@link opendirSync}, or `fsPromises.opendir()`.
*
* ```js
* import { opendir } from 'fs/promises';
*
* try {
* const dir = await opendir('./');
* for await (const dirent of dir)
* console.log(dirent.name);
* } catch (err) {
* console.error(err);
* }
* ```
*
* When using the async iterator, the `fs.Dir` object will be automatically
* closed after the iterator exits.
* @since v12.12.0
*/
export class Dir implements AsyncIterable<Dirent> {
/**
* The read-only path of this directory as was provided to {@link opendir},{@link opendirSync}, or `fsPromises.opendir()`.
* @since v12.12.0
*/
readonly path: string;
/**
* Asynchronously iterates over the directory via `readdir(3)` until all entries have been read.
*/
[Symbol.asyncIterator](): AsyncIterableIterator<Dirent>;
/**
* Asynchronously close the directory's underlying resource handle.
* Subsequent reads will result in errors.
*
* A promise is returned that will be resolved after the resource has been
* closed.
* @since v12.12.0
*/
close(): Promise<void>;
close(cb: NoParamCallback): void;
/**
* Synchronously close the directory's underlying resource handle.
* Subsequent reads will result in errors.
* @since v12.12.0
*/
closeSync(): void;
/**
* Asynchronously read the next directory entry via [`readdir(3)`](http://man7.org/linux/man-pages/man3/readdir.3.html) as an `fs.Dirent`.
*
* A promise is returned that will be resolved with an `fs.Dirent`, or `null`if there are no more directory entries to read.
*
* Directory entries returned by this function are in no particular order as
* provided by the operating system's underlying directory mechanisms.
* Entries added or removed while iterating over the directory might not be
* included in the iteration results.
* @since v12.12.0
* @return containing {fs.Dirent|null}
*/
read(): Promise<Dirent | null>;
read(cb: (err: NodeJS.ErrnoException | null, dirEnt: Dirent | null) => void): void;
/**
* Synchronously read the next directory entry as an `fs.Dirent`. See the
* POSIX [`readdir(3)`](http://man7.org/linux/man-pages/man3/readdir.3.html) documentation for more detail.
*
* If there are no more directory entries to read, `null` will be returned.
*
* Directory entries returned by this function are in no particular order as
* provided by the operating system's underlying directory mechanisms.
* Entries added or removed while iterating over the directory might not be
* included in the iteration results.
* @since v12.12.0
*/
readSync(): Dirent | null;
}
/**
* Class: fs.StatWatcher
* @since v14.3.0, v12.20.0
* Extends `EventEmitter`
* A successful call to {@link watchFile} method will return a new fs.StatWatcher object.
*/
export interface StatWatcher extends EventEmitter {
/**
* When called, requests that the Node.js event loop _not_ exit so long as the `fs.StatWatcher` is active. Calling `watcher.ref()` multiple times will have
* no effect.
*
* By default, all `fs.StatWatcher` objects are "ref'ed", making it normally
* unnecessary to call `watcher.ref()` unless `watcher.unref()` had been
* called previously.
* @since v14.3.0, v12.20.0
*/
ref(): this;
/**
* When called, the active `fs.StatWatcher` object will not require the Node.js
* event loop to remain active. If there is no other activity keeping the
* event loop running, the process may exit before the `fs.StatWatcher` object's
* callback is invoked. Calling `watcher.unref()` multiple times will have
* no effect.
* @since v14.3.0, v12.20.0
*/
unref(): this;
}
export interface FSWatcher extends EventEmitter {
/**
* Stop watching for changes on the given `fs.FSWatcher`. Once stopped, the `fs.FSWatcher` object is no longer usable.
* @since v0.5.8
*/
close(): void;
/**
* events.EventEmitter
* 1. change
* 2. error
*/
addListener(event: string, listener: (...args: any[]) => void): this;
addListener(event: 'change', listener: (eventType: string, filename: string | Buffer) => void): this;
addListener(event: 'error', listener: (error: Error) => void): this;
addListener(event: 'close', listener: () => void): this;
on(event: string, listener: (...args: any[]) => void): this;
on(event: 'change', listener: (eventType: string, filename: string | Buffer) => void): this;
on(event: 'error', listener: (error: Error) => void): this;
on(event: 'close', listener: () => void): this;
once(event: string, listener: (...args: any[]) => void): this;
once(event: 'change', listener: (eventType: string, filename: string | Buffer) => void): this;
once(event: 'error', listener: (error: Error) => void): this;
once(event: 'close', listener: () => void): this;
prependListener(event: string, listener: (...args: any[]) => void): this;
prependListener(event: 'change', listener: (eventType: string, filename: string | Buffer) => void): this;
prependListener(event: 'error', listener: (error: Error) => void): this;
prependListener(event: 'close', listener: () => void): this;
prependOnceListener(event: string, listener: (...args: any[]) => void): this;
prependOnceListener(event: 'change', listener: (eventType: string, filename: string | Buffer) => void): this;
prependOnceListener(event: 'error', listener: (error: Error) => void): this;
prependOnceListener(event: 'close', listener: () => void): this;
}
/**
* Instances of `fs.ReadStream` are created and returned using the {@link createReadStream} function.
* @since v0.1.93
*/
export class ReadStream extends stream.Readable {
close(callback?: (err?: NodeJS.ErrnoException | null) => void): void;
/**
* The number of bytes that have been read so far.
* @since v6.4.0
*/
bytesRead: number;
/**
* The path to the file the stream is reading from as specified in the first
* argument to `fs.createReadStream()`. If `path` is passed as a string, then`readStream.path` will be a string. If `path` is passed as a `Buffer`, then`readStream.path` will be a
* `Buffer`. If `fd` is specified, then`readStream.path` will be `undefined`.
* @since v0.1.93
*/
path: string | Buffer;
/**
* This property is `true` if the underlying file has not been opened yet,
* i.e. before the `'ready'` event is emitted.
* @since v11.2.0, v10.16.0
*/
pending: boolean;
/**
* events.EventEmitter
* 1. open
* 2. close
* 3. ready
*/
addListener(event: 'close', listener: () => void): this;
addListener(event: 'data', listener: (chunk: Buffer | string) => void): this;
addListener(event: 'end', listener: () => void): this;
addListener(event: 'error', listener: (err: Error) => void): this;
addListener(event: 'open', listener: (fd: number) => void): this;
addListener(event: 'pause', listener: () => void): this;
addListener(event: 'readable', listener: () => void): this;
addListener(event: 'ready', listener: () => void): this;
addListener(event: 'resume', listener: () => void): this;
addListener(event: string | symbol, listener: (...args: any[]) => void): this;
on(event: 'close', listener: () => void): this;
on(event: 'data', listener: (chunk: Buffer | string) => void): this;
on(event: 'end', listener: () => void): this;
on(event: 'error', listener: (err: Error) => void): this;
on(event: 'open', listener: (fd: number) => void): this;
on(event: 'pause', listener: () => void): this;
on(event: 'readable', listener: () => void): this;
on(event: 'ready', listener: () => void): this;
on(event: 'resume', listener: () => void): this;
on(event: string | symbol, listener: (...args: any[]) => void): this;
once(event: 'close', listener: () => void): this;
once(event: 'data', listener: (chunk: Buffer | string) => void): this;
once(event: 'end', listener: () => void): this;
once(event: 'error', listener: (err: Error) => void): this;
once(event: 'open', listener: (fd: number) => void): this;
once(event: 'pause', listener: () => void): this;
once(event: 'readable', listener: () => void): this;
once(event: 'ready', listener: () => void): this;
once(event: 'resume', listener: () => void): this;
once(event: string | symbol, listener: (...args: any[]) => void): this;
prependListener(event: 'close', listener: () => void): this;
prependListener(event: 'data', listener: (chunk: Buffer | string) => void): this;
prependListener(event: 'end', listener: () => void): this;
prependListener(event: 'error', listener: (err: Error) => void): this;
prependListener(event: 'open', listener: (fd: number) => void): this;
prependListener(event: 'pause', listener: () => void): this;
prependListener(event: 'readable', listener: () => void): this;
prependListener(event: 'ready', listener: () => void): this;
prependListener(event: 'resume', listener: () => void): this;
prependListener(event: string | symbol, listener: (...args: any[]) => void): this;
prependOnceListener(event: 'close', listener: () => void): this;
prependOnceListener(event: 'data', listener: (chunk: Buffer | string) => void): this;
prependOnceListener(event: 'end', listener: () => void): this;
prependOnceListener(event: 'error', listener: (err: Error) => void): this;
prependOnceListener(event: 'open', listener: (fd: number) => void): this;
prependOnceListener(event: 'pause', listener: () => void): this;
prependOnceListener(event: 'readable', listener: () => void): this;
prependOnceListener(event: 'ready', listener: () => void): this;
prependOnceListener(event: 'resume', listener: () => void): this;
prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this;
}
/**
* * Extends `stream.Writable`
*
* Instances of `fs.WriteStream` are created and returned using the {@link createWriteStream} function.
* @since v0.1.93
*/
export class WriteStream extends stream.Writable {
/**
* Closes `writeStream`. Optionally accepts a
* callback that will be executed once the `writeStream`is closed.
* @since v0.9.4
*/
close(callback?: (err?: NodeJS.ErrnoException | null) => void): void;
/**
* The number of bytes written so far. Does not include data that is still queued
* for writing.
* @since v0.4.7
*/
bytesWritten: number;
/**
* The path to the file the stream is writing to as specified in the first
* argument to {@link createWriteStream}. If `path` is passed as a string, then`writeStream.path` will be a string. If `path` is passed as a `Buffer`, then`writeStream.path` will be a
* `Buffer`.
* @since v0.1.93
*/
path: string | Buffer;
/**
* This property is `true` if the underlying file has not been opened yet,
* i.e. before the `'ready'` event is emitted.
* @since v11.2.0
*/
pending: boolean;
/**
* events.EventEmitter
* 1. open
* 2. close
* 3. ready
*/
addListener(event: 'close', listener: () => void): this;
addListener(event: 'drain', listener: () => void): this;
addListener(event: 'error', listener: (err: Error) => void): this;
addListener(event: 'finish', listener: () => void): this;
addListener(event: 'open', listener: (fd: number) => void): this;
addListener(event: 'pipe', listener: (src: stream.Readable) => void): this;
addListener(event: 'ready', listener: () => void): this;
addListener(event: 'unpipe', listener: (src: stream.Readable) => void): this;
addListener(event: string | symbol, listener: (...args: any[]) => void): this;
on(event: 'close', listener: () => void): this;
on(event: 'drain', listener: () => void): this;
on(event: 'error', listener: (err: Error) => void): this;
on(event: 'finish', listener: () => void): this;
on(event: 'open', listener: (fd: number) => void): this;
on(event: 'pipe', listener: (src: stream.Readable) => void): this;
on(event: 'ready', listener: () => void): this;
on(event: 'unpipe', listener: (src: stream.Readable) => void): this;
on(event: string | symbol, listener: (...args: any[]) => void): this;
once(event: 'close', listener: () => void): this;
once(event: 'drain', listener: () => void): this;
once(event: 'error', listener: (err: Error) => void): this;
once(event: 'finish', listener: () => void): this;
once(event: 'open', listener: (fd: number) => void): this;
once(event: 'pipe', listener: (src: stream.Readable) => void): this;
once(event: 'ready', listener: () => void): this;
once(event: 'unpipe', listener: (src: stream.Readable) => void): this;
once(event: string | symbol, listener: (...args: any[]) => void): this;
prependListener(event: 'close', listener: () => void): this;
prependListener(event: 'drain', listener: () => void): this;
prependListener(event: 'error', listener: (err: Error) => void): this;
prependListener(event: 'finish', listener: () => void): this;
prependListener(event: 'open', listener: (fd: number) => void): this;
prependListener(event: 'pipe', listener: (src: stream.Readable) => void): this;
prependListener(event: 'ready', listener: () => void): this;
prependListener(event: 'unpipe', listener: (src: stream.Readable) => void): this;
prependListener(event: string | symbol, listener: (...args: any[]) => void): this;
prependOnceListener(event: 'close', listener: () => void): this;
prependOnceListener(event: 'drain', listener: () => void): this;
prependOnceListener(event: 'error', listener: (err: Error) => void): this;
prependOnceListener(event: 'finish', listener: () => void): this;
prependOnceListener(event: 'open', listener: (fd: number) => void): this;
prependOnceListener(event: 'pipe', listener: (src: stream.Readable) => void): this;
prependOnceListener(event: 'ready', listener: () => void): this;
prependOnceListener(event: 'unpipe', listener: (src: stream.Readable) => void): this;
prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this;
}
/**
* Asynchronously rename file at `oldPath` to the pathname provided
* as `newPath`. In the case that `newPath` already exists, it will
* be overwritten. If there is a directory at `newPath`, an error will
* be raised instead. No arguments other than a possible exception are
* given to the completion callback.
*
* See also: [`rename(2)`](http://man7.org/linux/man-pages/man2/rename.2.html).
*
* ```js
* import { rename } from 'fs';
*
* rename('oldFile.txt', 'newFile.txt', (err) => {
* if (err) throw err;
* console.log('Rename complete!');
* });
* ```
* @since v0.0.2
*/
export function rename(oldPath: PathLike, newPath: PathLike, callback: NoParamCallback): void;
export namespace rename {
/**
* Asynchronous rename(2) - Change the name or location of a file or directory.
* @param oldPath A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
* @param newPath A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
*/
function __promisify__(oldPath: PathLike, newPath: PathLike): Promise<void>;
}
/**
* Renames the file from `oldPath` to `newPath`. Returns `undefined`.
*
* See the POSIX [`rename(2)`](http://man7.org/linux/man-pages/man2/rename.2.html) documentation for more details.
* @since v0.1.21
*/
export function renameSync(oldPath: PathLike, newPath: PathLike): void;
/**
* Truncates the file. No arguments other than a possible exception are
* given to the completion callback. A file descriptor can also be passed as the
* first argument. In this case, `fs.ftruncate()` is called.
*
* ```js
* import { truncate } from 'fs';
* // Assuming that 'path/file.txt' is a regular file.
* truncate('path/file.txt', (err) => {
* if (err) throw err;
* console.log('path/file.txt was truncated');
* });
* ```
*
* Passing a file descriptor is deprecated and may result in an error being thrown
* in the future.
*
* See the POSIX [`truncate(2)`](http://man7.org/linux/man-pages/man2/truncate.2.html) documentation for more details.
* @since v0.8.6
* @param [len=0]
*/
export function truncate(path: PathLike, len: number | undefined | null, callback: NoParamCallback): void;
/**
* Asynchronous truncate(2) - Truncate a file to a specified length.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function truncate(path: PathLike, callback: NoParamCallback): void;
export namespace truncate {
/**
* Asynchronous truncate(2) - Truncate a file to a specified length.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param len If not specified, defaults to `0`.
*/
function __promisify__(path: PathLike, len?: number | null): Promise<void>;
}
/**
* Truncates the file. Returns `undefined`. A file descriptor can also be
* passed as the first argument. In this case, `fs.ftruncateSync()` is called.
*
* Passing a file descriptor is deprecated and may result in an error being thrown
* in the future.
* @since v0.8.6
* @param [len=0]
*/
export function truncateSync(path: PathLike, len?: number | null): void;
/**
* Truncates the file descriptor. No arguments other than a possible exception are
* given to the completion callback.
*
* See the POSIX [`ftruncate(2)`](http://man7.org/linux/man-pages/man2/ftruncate.2.html) documentation for more detail.
*
* If the file referred to by the file descriptor was larger than `len` bytes, only
* the first `len` bytes will be retained in the file.
*
* For example, the following program retains only the first four bytes of the
* file:
*
* ```js
* import { open, close, ftruncate } from 'fs';
*
* function closeFd(fd) {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
*
* open('temp.txt', 'r+', (err, fd) => {
* if (err) throw err;
*
* try {
* ftruncate(fd, 4, (err) => {
* closeFd(fd);
* if (err) throw err;
* });
* } catch (err) {
* closeFd(fd);
* if (err) throw err;
* }
* });
* ```
*
* If the file previously was shorter than `len` bytes, it is extended, and the
* extended part is filled with null bytes (`'\0'`):
*
* If `len` is negative then `0` will be used.
* @since v0.8.6
* @param [len=0]
*/
export function ftruncate(fd: number, len: number | undefined | null, callback: NoParamCallback): void;
/**
* Asynchronous ftruncate(2) - Truncate a file to a specified length.
* @param fd A file descriptor.
*/
export function ftruncate(fd: number, callback: NoParamCallback): void;
export namespace ftruncate {
/**
* Asynchronous ftruncate(2) - Truncate a file to a specified length.
* @param fd A file descriptor.
* @param len If not specified, defaults to `0`.
*/
function __promisify__(fd: number, len?: number | null): Promise<void>;
}
/**
* Truncates the file descriptor. Returns `undefined`.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link ftruncate}.
* @since v0.8.6
* @param [len=0]
*/
export function ftruncateSync(fd: number, len?: number | null): void;
/**
* Asynchronously changes owner and group of a file. No arguments other than a
* possible exception are given to the completion callback.
*
* See the POSIX [`chown(2)`](http://man7.org/linux/man-pages/man2/chown.2.html) documentation for more detail.
* @since v0.1.97
*/
export function chown(path: PathLike, uid: number, gid: number, callback: NoParamCallback): void;
export namespace chown {
/**
* Asynchronous chown(2) - Change ownership of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(path: PathLike, uid: number, gid: number): Promise<void>;
}
/**
* Synchronously changes owner and group of a file. Returns `undefined`.
* This is the synchronous version of {@link chown}.
*
* See the POSIX [`chown(2)`](http://man7.org/linux/man-pages/man2/chown.2.html) documentation for more detail.
* @since v0.1.97
*/
export function chownSync(path: PathLike, uid: number, gid: number): void;
/**
* Sets the owner of the file. No arguments other than a possible exception are
* given to the completion callback.
*
* See the POSIX [`fchown(2)`](http://man7.org/linux/man-pages/man2/fchown.2.html) documentation for more detail.
* @since v0.4.7
*/
export function fchown(fd: number, uid: number, gid: number, callback: NoParamCallback): void;
export namespace fchown {
/**
* Asynchronous fchown(2) - Change ownership of a file.
* @param fd A file descriptor.
*/
function __promisify__(fd: number, uid: number, gid: number): Promise<void>;
}
/**
* Sets the owner of the file. Returns `undefined`.
*
* See the POSIX [`fchown(2)`](http://man7.org/linux/man-pages/man2/fchown.2.html) documentation for more detail.
* @since v0.4.7
* @param uid The file's new owner's user id.
* @param gid The file's new group's group id.
*/
export function fchownSync(fd: number, uid: number, gid: number): void;
/**
* Set the owner of the symbolic link. No arguments other than a possible
* exception are given to the completion callback.
*
* See the POSIX [`lchown(2)`](http://man7.org/linux/man-pages/man2/lchown.2.html) documentation for more detail.
*/
export function lchown(path: PathLike, uid: number, gid: number, callback: NoParamCallback): void;
export namespace lchown {
/**
* Asynchronous lchown(2) - Change ownership of a file. Does not dereference symbolic links.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(path: PathLike, uid: number, gid: number): Promise<void>;
}
/**
* Set the owner for the path. Returns `undefined`.
*
* See the POSIX [`lchown(2)`](http://man7.org/linux/man-pages/man2/lchown.2.html) documentation for more details.
* @param uid The file's new owner's user id.
* @param gid The file's new group's group id.
*/
export function lchownSync(path: PathLike, uid: number, gid: number): void;
/**
* Changes the access and modification times of a file in the same way as {@link utimes}, with the difference that if the path refers to a symbolic
* link, then the link is not dereferenced: instead, the timestamps of the
* symbolic link itself are changed.
*
* No arguments other than a possible exception are given to the completion
* callback.
* @since v14.5.0, v12.19.0
*/
export function lutimes(path: PathLike, atime: TimeLike, mtime: TimeLike, callback: NoParamCallback): void;
export namespace lutimes {
/**
* Changes the access and modification times of a file in the same way as `fsPromises.utimes()`,
* with the difference that if the path refers to a symbolic link, then the link is not
* dereferenced: instead, the timestamps of the symbolic link itself are changed.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param atime The last access time. If a string is provided, it will be coerced to number.
* @param mtime The last modified time. If a string is provided, it will be coerced to number.
*/
function __promisify__(path: PathLike, atime: TimeLike, mtime: TimeLike): Promise<void>;
}
/**
* Change the file system timestamps of the symbolic link referenced by `path`.
* Returns `undefined`, or throws an exception when parameters are incorrect or
* the operation fails. This is the synchronous version of {@link lutimes}.
* @since v14.5.0, v12.19.0
*/
export function lutimesSync(path: PathLike, atime: TimeLike, mtime: TimeLike): void;
/**
* Asynchronously changes the permissions of a file. No arguments other than a
* possible exception are given to the completion callback.
*
* See the POSIX [`chmod(2)`](http://man7.org/linux/man-pages/man2/chmod.2.html) documentation for more detail.
*
* ```js
* import { chmod } from 'fs';
*
* chmod('my_file.txt', 0o775, (err) => {
* if (err) throw err;
* console.log('The permissions for file "my_file.txt" have been changed!');
* });
* ```
* @since v0.1.30
*/
export function chmod(path: PathLike, mode: Mode, callback: NoParamCallback): void;
export namespace chmod {
/**
* Asynchronous chmod(2) - Change permissions of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param mode A file mode. If a string is passed, it is parsed as an octal integer.
*/
function __promisify__(path: PathLike, mode: Mode): Promise<void>;
}
/**
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link chmod}.
*
* See the POSIX [`chmod(2)`](http://man7.org/linux/man-pages/man2/chmod.2.html) documentation for more detail.
* @since v0.6.7
*/
export function chmodSync(path: PathLike, mode: Mode): void;
/**
* Sets the permissions on the file. No arguments other than a possible exception
* are given to the completion callback.
*
* See the POSIX [`fchmod(2)`](http://man7.org/linux/man-pages/man2/fchmod.2.html) documentation for more detail.
* @since v0.4.7
*/
export function fchmod(fd: number, mode: Mode, callback: NoParamCallback): void;
export namespace fchmod {
/**
* Asynchronous fchmod(2) - Change permissions of a file.
* @param fd A file descriptor.
* @param mode A file mode. If a string is passed, it is parsed as an octal integer.
*/
function __promisify__(fd: number, mode: Mode): Promise<void>;
}
/**
* Sets the permissions on the file. Returns `undefined`.
*
* See the POSIX [`fchmod(2)`](http://man7.org/linux/man-pages/man2/fchmod.2.html) documentation for more detail.
* @since v0.4.7
*/
export function fchmodSync(fd: number, mode: Mode): void;
/**
* Changes the permissions on a symbolic link. No arguments other than a possible
* exception are given to the completion callback.
*
* This method is only implemented on macOS.
*
* See the POSIX [`lchmod(2)`](https://www.freebsd.org/cgi/man.cgi?query=lchmod&sektion=2) documentation for more detail.
* @deprecated Since v0.4.7
*/
export function lchmod(path: PathLike, mode: Mode, callback: NoParamCallback): void;
/** @deprecated */
export namespace lchmod {
/**
* Asynchronous lchmod(2) - Change permissions of a file. Does not dereference symbolic links.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param mode A file mode. If a string is passed, it is parsed as an octal integer.
*/
function __promisify__(path: PathLike, mode: Mode): Promise<void>;
}
/**
* Changes the permissions on a symbolic link. Returns `undefined`.
*
* This method is only implemented on macOS.
*
* See the POSIX [`lchmod(2)`](https://www.freebsd.org/cgi/man.cgi?query=lchmod&sektion=2) documentation for more detail.
* @deprecated Since v0.4.7
*/
export function lchmodSync(path: PathLike, mode: Mode): void;
/**
* Asynchronous [`stat(2)`](http://man7.org/linux/man-pages/man2/stat.2.html). The callback gets two arguments `(err, stats)` where`stats` is an `fs.Stats` object.
*
* In case of an error, the `err.code` will be one of `Common System Errors`.
*
* Using `fs.stat()` to check for the existence of a file before calling`fs.open()`, `fs.readFile()` or `fs.writeFile()` is not recommended.
* Instead, user code should open/read/write the file directly and handle the
* error raised if the file is not available.
*
* To check if a file exists without manipulating it afterwards, {@link access} is recommended.
*
* For example, given the following directory structure:
*
* ```text
* - txtDir
* -- file.txt
* - app.js
* ```
*
* The next program will check for the stats of the given paths:
*
* ```js
* import { stat } from 'fs';
*
* const pathsToCheck = ['./txtDir', './txtDir/file.txt'];
*
* for (let i = 0; i < pathsToCheck.length; i++) {
* stat(pathsToCheck[i], (err, stats) => {
* console.log(stats.isDirectory());
* console.log(stats);
* });
* }
* ```
*
* The resulting output will resemble:
*
* ```console
* true
* Stats {
* dev: 16777220,
* mode: 16877,
* nlink: 3,
* uid: 501,
* gid: 20,
* rdev: 0,
* blksize: 4096,
* ino: 14214262,
* size: 96,
* blocks: 0,
* atimeMs: 1561174653071.963,
* mtimeMs: 1561174614583.3518,
* ctimeMs: 1561174626623.5366,
* birthtimeMs: 1561174126937.2893,
* atime: 2019-06-22T03:37:33.072Z,
* mtime: 2019-06-22T03:36:54.583Z,
* ctime: 2019-06-22T03:37:06.624Z,
* birthtime: 2019-06-22T03:28:46.937Z
* }
* false
* Stats {
* dev: 16777220,
* mode: 33188,
* nlink: 1,
* uid: 501,
* gid: 20,
* rdev: 0,
* blksize: 4096,
* ino: 14214074,
* size: 8,
* blocks: 8,
* atimeMs: 1561174616618.8555,
* mtimeMs: 1561174614584,
* ctimeMs: 1561174614583.8145,
* birthtimeMs: 1561174007710.7478,
* atime: 2019-06-22T03:36:56.619Z,
* mtime: 2019-06-22T03:36:54.584Z,
* ctime: 2019-06-22T03:36:54.584Z,
* birthtime: 2019-06-22T03:26:47.711Z
* }
* ```
* @since v0.0.2
*/
export function stat(path: PathLike, callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void): void;
export function stat(
path: PathLike,
options:
| (StatOptions & {
bigint?: false | undefined;
})
| undefined,
callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void
): void;
export function stat(
path: PathLike,
options: StatOptions & {
bigint: true;
},
callback: (err: NodeJS.ErrnoException | null, stats: BigIntStats) => void
): void;
export function stat(path: PathLike, options: StatOptions | undefined, callback: (err: NodeJS.ErrnoException | null, stats: Stats | BigIntStats) => void): void;
export namespace stat {
/**
* Asynchronous stat(2) - Get file status.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(
path: PathLike,
options?: StatOptions & {
bigint?: false | undefined;
}
): Promise<Stats>;
function __promisify__(
path: PathLike,
options: StatOptions & {
bigint: true;
}
): Promise<BigIntStats>;
function __promisify__(path: PathLike, options?: StatOptions): Promise<Stats | BigIntStats>;
}
export interface StatSyncFn extends Function {
(path: PathLike, options?: undefined): Stats;
(
path: PathLike,
options?: StatSyncOptions & {
bigint?: false | undefined;
throwIfNoEntry: false;
}
): Stats | undefined;
(
path: PathLike,
options: StatSyncOptions & {
bigint: true;
throwIfNoEntry: false;
}
): BigIntStats | undefined;
(
path: PathLike,
options?: StatSyncOptions & {
bigint?: false | undefined;
}
): Stats;
(
path: PathLike,
options: StatSyncOptions & {
bigint: true;
}
): BigIntStats;
(
path: PathLike,
options: StatSyncOptions & {
bigint: boolean;
throwIfNoEntry?: false | undefined;
}
): Stats | BigIntStats;
(path: PathLike, options?: StatSyncOptions): Stats | BigIntStats | undefined;
}
/**
* Synchronous stat(2) - Get file status.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export const statSync: StatSyncFn;
/**
* Invokes the callback with the `fs.Stats` for the file descriptor.
*
* See the POSIX [`fstat(2)`](http://man7.org/linux/man-pages/man2/fstat.2.html) documentation for more detail.
* @since v0.1.95
*/
export function fstat(fd: number, callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void): void;
export function fstat(
fd: number,
options:
| (StatOptions & {
bigint?: false | undefined;
})
| undefined,
callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void
): void;
export function fstat(
fd: number,
options: StatOptions & {
bigint: true;
},
callback: (err: NodeJS.ErrnoException | null, stats: BigIntStats) => void
): void;
export function fstat(fd: number, options: StatOptions | undefined, callback: (err: NodeJS.ErrnoException | null, stats: Stats | BigIntStats) => void): void;
export namespace fstat {
/**
* Asynchronous fstat(2) - Get file status.
* @param fd A file descriptor.
*/
function __promisify__(
fd: number,
options?: StatOptions & {
bigint?: false | undefined;
}
): Promise<Stats>;
function __promisify__(
fd: number,
options: StatOptions & {
bigint: true;
}
): Promise<BigIntStats>;
function __promisify__(fd: number, options?: StatOptions): Promise<Stats | BigIntStats>;
}
/**
* Retrieves the `fs.Stats` for the file descriptor.
*
* See the POSIX [`fstat(2)`](http://man7.org/linux/man-pages/man2/fstat.2.html) documentation for more detail.
* @since v0.1.95
*/
export function fstatSync(
fd: number,
options?: StatOptions & {
bigint?: false | undefined;
}
): Stats;
export function fstatSync(
fd: number,
options: StatOptions & {
bigint: true;
}
): BigIntStats;
export function fstatSync(fd: number, options?: StatOptions): Stats | BigIntStats;
/**
* Retrieves the `fs.Stats` for the symbolic link referred to by the path.
* The callback gets two arguments `(err, stats)` where `stats` is a `fs.Stats` object. `lstat()` is identical to `stat()`, except that if `path` is a symbolic
* link, then the link itself is stat-ed, not the file that it refers to.
*
* See the POSIX [`lstat(2)`](http://man7.org/linux/man-pages/man2/lstat.2.html) documentation for more details.
* @since v0.1.30
*/
export function lstat(path: PathLike, callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void): void;
export function lstat(
path: PathLike,
options:
| (StatOptions & {
bigint?: false | undefined;
})
| undefined,
callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void
): void;
export function lstat(
path: PathLike,
options: StatOptions & {
bigint: true;
},
callback: (err: NodeJS.ErrnoException | null, stats: BigIntStats) => void
): void;
export function lstat(path: PathLike, options: StatOptions | undefined, callback: (err: NodeJS.ErrnoException | null, stats: Stats | BigIntStats) => void): void;
export namespace lstat {
/**
* Asynchronous lstat(2) - Get file status. Does not dereference symbolic links.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(
path: PathLike,
options?: StatOptions & {
bigint?: false | undefined;
}
): Promise<Stats>;
function __promisify__(
path: PathLike,
options: StatOptions & {
bigint: true;
}
): Promise<BigIntStats>;
function __promisify__(path: PathLike, options?: StatOptions): Promise<Stats | BigIntStats>;
}
/**
* Asynchronous statfs(2). Returns information about the mounted file system which contains path. The callback gets two arguments (err, stats) where stats is an <fs.StatFs> object.
* In case of an error, the err.code will be one of Common System Errors.
* @param path A path to an existing file or directory on the file system to be queried.
* @param callback
*/
export function statfs(path: PathLike, callback: (err: NodeJS.ErrnoException | null, stats: StatsFs) => void): void;
export function statfs(
path: PathLike,
options:
| (StatFsOptions & {
bigint?: false | undefined;
})
| undefined,
callback: (err: NodeJS.ErrnoException | null, stats: StatsFs) => void
): void;
export function statfs(
path: PathLike,
options: StatFsOptions & {
bigint: true;
},
callback: (err: NodeJS.ErrnoException | null, stats: BigIntStatsFs) => void
): void;
export function statfs(path: PathLike, options: StatFsOptions | undefined, callback: (err: NodeJS.ErrnoException | null, stats: StatsFs | BigIntStatsFs) => void): void;
export namespace statfs {
/**
* Asynchronous statfs(2) - Returns information about the mounted file system which contains path. The callback gets two arguments (err, stats) where stats is an <fs.StatFs> object.
* @param path A path to an existing file or directory on the file system to be queried.
*/
function __promisify__(
path: PathLike,
options?: StatFsOptions & {
bigint?: false | undefined;
}
): Promise<StatsFs>;
function __promisify__(
path: PathLike,
options: StatFsOptions & {
bigint: true;
}
): Promise<BigIntStatsFs>;
function __promisify__(path: PathLike, options?: StatFsOptions): Promise<StatsFs | BigIntStatsFs>;
}
/**
* Synchronous statfs(2). Returns information about the mounted file system which contains path. The callback gets two arguments (err, stats) where stats is an <fs.StatFs> object.
* In case of an error, the err.code will be one of Common System Errors.
* @param path A path to an existing file or directory on the file system to be queried.
* @param callback
*/
export function statfsSync(
path: PathLike,
options?: StatFsOptions & {
bigint?: false | undefined;
}
): StatsFs;
export function statfsSync(
path: PathLike,
options: StatFsOptions & {
bigint: true;
}
): BigIntStatsFs;
export function statfsSync(path: PathLike, options?: StatFsOptions): StatsFs | BigIntStatsFs;
/**
* Synchronous lstat(2) - Get file status. Does not dereference symbolic links.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export const lstatSync: StatSyncFn;
/**
* Creates a new link from the `existingPath` to the `newPath`. See the POSIX [`link(2)`](http://man7.org/linux/man-pages/man2/link.2.html) documentation for more detail. No arguments other than
* a possible
* exception are given to the completion callback.
* @since v0.1.31
*/
export function link(existingPath: PathLike, newPath: PathLike, callback: NoParamCallback): void;
export namespace link {
/**
* Asynchronous link(2) - Create a new link (also known as a hard link) to an existing file.
* @param existingPath A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param newPath A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(existingPath: PathLike, newPath: PathLike): Promise<void>;
}
/**
* Creates a new link from the `existingPath` to the `newPath`. See the POSIX [`link(2)`](http://man7.org/linux/man-pages/man2/link.2.html) documentation for more detail. Returns `undefined`.
* @since v0.1.31
*/
export function linkSync(existingPath: PathLike, newPath: PathLike): void;
/**
* Creates the link called `path` pointing to `target`. No arguments other than a
* possible exception are given to the completion callback.
*
* See the POSIX [`symlink(2)`](http://man7.org/linux/man-pages/man2/symlink.2.html) documentation for more details.
*
* The `type` argument is only available on Windows and ignored on other platforms.
* It can be set to `'dir'`, `'file'`, or `'junction'`. If the `type` argument is
* not set, Node.js will autodetect `target` type and use `'file'` or `'dir'`. If
* the `target` does not exist, `'file'` will be used. Windows junction points
* require the destination path to be absolute. When using `'junction'`, the`target` argument will automatically be normalized to absolute path.
*
* Relative targets are relative to the link’s parent directory.
*
* ```js
* import { symlink } from 'fs';
*
* symlink('./mew', './mewtwo', callback);
* ```
*
* The above example creates a symbolic link `mewtwo` which points to `mew` in the
* same directory:
*
* ```bash
* $ tree .
* .
* ├── mew
* └── mewtwo -> ./mew
* ```
* @since v0.1.31
*/
export function symlink(target: PathLike, path: PathLike, type: symlink.Type | undefined | null, callback: NoParamCallback): void;
/**
* Asynchronous symlink(2) - Create a new symbolic link to an existing file.
* @param target A path to an existing file. If a URL is provided, it must use the `file:` protocol.
* @param path A path to the new symlink. If a URL is provided, it must use the `file:` protocol.
*/
export function symlink(target: PathLike, path: PathLike, callback: NoParamCallback): void;
export namespace symlink {
/**
* Asynchronous symlink(2) - Create a new symbolic link to an existing file.
* @param target A path to an existing file. If a URL is provided, it must use the `file:` protocol.
* @param path A path to the new symlink. If a URL is provided, it must use the `file:` protocol.
* @param type May be set to `'dir'`, `'file'`, or `'junction'` (default is `'file'`) and is only available on Windows (ignored on other platforms).
* When using `'junction'`, the `target` argument will automatically be normalized to an absolute path.
*/
function __promisify__(target: PathLike, path: PathLike, type?: string | null): Promise<void>;
type Type = 'dir' | 'file' | 'junction';
}
/**
* Returns `undefined`.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link symlink}.
* @since v0.1.31
*/
export function symlinkSync(target: PathLike, path: PathLike, type?: symlink.Type | null): void;
/**
* Reads the contents of the symbolic link referred to by `path`. The callback gets
* two arguments `(err, linkString)`.
*
* See the POSIX [`readlink(2)`](http://man7.org/linux/man-pages/man2/readlink.2.html) documentation for more details.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the link path passed to the callback. If the `encoding` is set to `'buffer'`,
* the link path returned will be passed as a `Buffer` object.
* @since v0.1.31
*/
export function readlink(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, linkString: string) => void): void;
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readlink(path: PathLike, options: BufferEncodingOption, callback: (err: NodeJS.ErrnoException | null, linkString: Buffer) => void): void;
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readlink(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, linkString: string | Buffer) => void): void;
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function readlink(path: PathLike, callback: (err: NodeJS.ErrnoException | null, linkString: string) => void): void;
export namespace readlink {
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options?: EncodingOption): Promise<string>;
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options: BufferEncodingOption): Promise<Buffer>;
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options?: EncodingOption): Promise<string | Buffer>;
}
/**
* Returns the symbolic link's string value.
*
* See the POSIX [`readlink(2)`](http://man7.org/linux/man-pages/man2/readlink.2.html) documentation for more details.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the link path returned. If the `encoding` is set to `'buffer'`,
* the link path returned will be passed as a `Buffer` object.
* @since v0.1.31
*/
export function readlinkSync(path: PathLike, options?: EncodingOption): string;
/**
* Synchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readlinkSync(path: PathLike, options: BufferEncodingOption): Buffer;
/**
* Synchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readlinkSync(path: PathLike, options?: EncodingOption): string | Buffer;
/**
* Asynchronously computes the canonical pathname by resolving `.`, `..` and
* symbolic links.
*
* A canonical pathname is not necessarily unique. Hard links and bind mounts can
* expose a file system entity through many pathnames.
*
* This function behaves like [`realpath(3)`](http://man7.org/linux/man-pages/man3/realpath.3.html), with some exceptions:
*
* 1. No case conversion is performed on case-insensitive file systems.
* 2. The maximum number of symbolic links is platform-independent and generally
* (much) higher than what the native [`realpath(3)`](http://man7.org/linux/man-pages/man3/realpath.3.html) implementation supports.
*
* The `callback` gets two arguments `(err, resolvedPath)`. May use `process.cwd`to resolve relative paths.
*
* Only paths that can be converted to UTF8 strings are supported.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the path passed to the callback. If the `encoding` is set to `'buffer'`,
* the path returned will be passed as a `Buffer` object.
*
* If `path` resolves to a socket or a pipe, the function will return a system
* dependent name for that object.
* @since v0.1.31
*/
export function realpath(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string) => void): void;
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function realpath(path: PathLike, options: BufferEncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: Buffer) => void): void;
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function realpath(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string | Buffer) => void): void;
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function realpath(path: PathLike, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string) => void): void;
export namespace realpath {
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options?: EncodingOption): Promise<string>;
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options: BufferEncodingOption): Promise<Buffer>;
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options?: EncodingOption): Promise<string | Buffer>;
/**
* Asynchronous [`realpath(3)`](http://man7.org/linux/man-pages/man3/realpath.3.html).
*
* The `callback` gets two arguments `(err, resolvedPath)`.
*
* Only paths that can be converted to UTF8 strings are supported.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the path passed to the callback. If the `encoding` is set to `'buffer'`,
* the path returned will be passed as a `Buffer` object.
*
* On Linux, when Node.js is linked against musl libc, the procfs file system must
* be mounted on `/proc` in order for this function to work. Glibc does not have
* this restriction.
* @since v9.2.0
*/
function native(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string) => void): void;
function native(path: PathLike, options: BufferEncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: Buffer) => void): void;
function native(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string | Buffer) => void): void;
function native(path: PathLike, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string) => void): void;
}
/**
* Returns the resolved pathname.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link realpath}.
* @since v0.1.31
*/
export function realpathSync(path: PathLike, options?: EncodingOption): string;
/**
* Synchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function realpathSync(path: PathLike, options: BufferEncodingOption): Buffer;
/**
* Synchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function realpathSync(path: PathLike, options?: EncodingOption): string | Buffer;
export namespace realpathSync {
function native(path: PathLike, options?: EncodingOption): string;
function native(path: PathLike, options: BufferEncodingOption): Buffer;
function native(path: PathLike, options?: EncodingOption): string | Buffer;
}
/**
* Asynchronously removes a file or symbolic link. No arguments other than a
* possible exception are given to the completion callback.
*
* ```js
* import { unlink } from 'fs';
* // Assuming that 'path/file.txt' is a regular file.
* unlink('path/file.txt', (err) => {
* if (err) throw err;
* console.log('path/file.txt was deleted');
* });
* ```
*
* `fs.unlink()` will not work on a directory, empty or otherwise. To remove a
* directory, use {@link rmdir}.
*
* See the POSIX [`unlink(2)`](http://man7.org/linux/man-pages/man2/unlink.2.html) documentation for more details.
* @since v0.0.2
*/
export function unlink(path: PathLike, callback: NoParamCallback): void;
export namespace unlink {
/**
* Asynchronous unlink(2) - delete a name and possibly the file it refers to.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(path: PathLike): Promise<void>;
}
/**
* Synchronous [`unlink(2)`](http://man7.org/linux/man-pages/man2/unlink.2.html). Returns `undefined`.
* @since v0.1.21
*/
export function unlinkSync(path: PathLike): void;
export interface RmDirOptions {
/**
* If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
* `EPERM` error is encountered, Node.js will retry the operation with a linear
* backoff wait of `retryDelay` ms longer on each try. This option represents the
* number of retries. This option is ignored if the `recursive` option is not
* `true`.
* @default 0
*/
maxRetries?: number | undefined;
/**
* @deprecated since v14.14.0 In future versions of Node.js and will trigger a warning
* `fs.rmdir(path, { recursive: true })` will throw if `path` does not exist or is a file.
* Use `fs.rm(path, { recursive: true, force: true })` instead.
*
* If `true`, perform a recursive directory removal. In
* recursive mode, operations are retried on failure.
* @default false
*/
recursive?: boolean | undefined;
/**
* The amount of time in milliseconds to wait between retries.
* This option is ignored if the `recursive` option is not `true`.
* @default 100
*/
retryDelay?: number | undefined;
}
/**
* Asynchronous [`rmdir(2)`](http://man7.org/linux/man-pages/man2/rmdir.2.html). No arguments other than a possible exception are given
* to the completion callback.
*
* Using `fs.rmdir()` on a file (not a directory) results in an `ENOENT` error on
* Windows and an `ENOTDIR` error on POSIX.
*
* To get a behavior similar to the `rm -rf` Unix command, use {@link rm} with options `{ recursive: true, force: true }`.
* @since v0.0.2
*/
export function rmdir(path: PathLike, callback: NoParamCallback): void;
export function rmdir(path: PathLike, options: RmDirOptions, callback: NoParamCallback): void;
export namespace rmdir {
/**
* Asynchronous rmdir(2) - delete a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(path: PathLike, options?: RmDirOptions): Promise<void>;
}
/**
* Synchronous [`rmdir(2)`](http://man7.org/linux/man-pages/man2/rmdir.2.html). Returns `undefined`.
*
* Using `fs.rmdirSync()` on a file (not a directory) results in an `ENOENT` error
* on Windows and an `ENOTDIR` error on POSIX.
*
* To get a behavior similar to the `rm -rf` Unix command, use {@link rmSync} with options `{ recursive: true, force: true }`.
* @since v0.1.21
*/
export function rmdirSync(path: PathLike, options?: RmDirOptions): void;
export interface RmOptions {
/**
* When `true`, exceptions will be ignored if `path` does not exist.
* @default false
*/
force?: boolean | undefined;
/**
* If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
* `EPERM` error is encountered, Node.js will retry the operation with a linear
* backoff wait of `retryDelay` ms longer on each try. This option represents the
* number of retries. This option is ignored if the `recursive` option is not
* `true`.
* @default 0
*/
maxRetries?: number | undefined;
/**
* If `true`, perform a recursive directory removal. In
* recursive mode, operations are retried on failure.
* @default false
*/
recursive?: boolean | undefined;
/**
* The amount of time in milliseconds to wait between retries.
* This option is ignored if the `recursive` option is not `true`.
* @default 100
*/
retryDelay?: number | undefined;
}
/**
* Asynchronously removes files and directories (modeled on the standard POSIX `rm`utility). No arguments other than a possible exception are given to the
* completion callback.
* @since v14.14.0
*/
export function rm(path: PathLike, callback: NoParamCallback): void;
export function rm(path: PathLike, options: RmOptions, callback: NoParamCallback): void;
export namespace rm {
/**
* Asynchronously removes files and directories (modeled on the standard POSIX `rm` utility).
*/
function __promisify__(path: PathLike, options?: RmOptions): Promise<void>;
}
/**
* Synchronously removes files and directories (modeled on the standard POSIX `rm`utility). Returns `undefined`.
* @since v14.14.0
*/
export function rmSync(path: PathLike, options?: RmOptions): void;
export interface MakeDirectoryOptions {
/**
* Indicates whether parent folders should be created.
* If a folder was created, the path to the first created folder will be returned.
* @default false
*/
recursive?: boolean | undefined;
/**
* A file mode. If a string is passed, it is parsed as an octal integer. If not specified
* @default 0o777
*/
mode?: Mode | undefined;
}
/**
* Asynchronously creates a directory.
*
* The callback is given a possible exception and, if `recursive` is `true`, the
* first directory path created, `(err[, path])`.`path` can still be `undefined` when `recursive` is `true`, if no directory was
* created.
*
* The optional `options` argument can be an integer specifying `mode` (permission
* and sticky bits), or an object with a `mode` property and a `recursive`property indicating whether parent directories should be created. Calling`fs.mkdir()` when `path` is a directory that
* exists results in an error only
* when `recursive` is false.
*
* ```js
* import { mkdir } from 'fs';
*
* // Creates /tmp/a/apple, regardless of whether `/tmp` and /tmp/a exist.
* mkdir('/tmp/a/apple', { recursive: true }, (err) => {
* if (err) throw err;
* });
* ```
*
* On Windows, using `fs.mkdir()` on the root directory even with recursion will
* result in an error:
*
* ```js
* import { mkdir } from 'fs';
*
* mkdir('/', { recursive: true }, (err) => {
* // => [Error: EPERM: operation not permitted, mkdir 'C:\']
* });
* ```
*
* See the POSIX [`mkdir(2)`](http://man7.org/linux/man-pages/man2/mkdir.2.html) documentation for more details.
* @since v0.1.8
*/
export function mkdir(
path: PathLike,
options: MakeDirectoryOptions & {
recursive: true;
},
callback: (err: NodeJS.ErrnoException | null, path?: string) => void
): void;
/**
* Asynchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
export function mkdir(
path: PathLike,
options:
| Mode
| (MakeDirectoryOptions & {
recursive?: false | undefined;
})
| null
| undefined,
callback: NoParamCallback
): void;
/**
* Asynchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
export function mkdir(path: PathLike, options: Mode | MakeDirectoryOptions | null | undefined, callback: (err: NodeJS.ErrnoException | null, path?: string) => void): void;
/**
* Asynchronous mkdir(2) - create a directory with a mode of `0o777`.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function mkdir(path: PathLike, callback: NoParamCallback): void;
export namespace mkdir {
/**
* Asynchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
function __promisify__(
path: PathLike,
options: MakeDirectoryOptions & {
recursive: true;
}
): Promise<string | undefined>;
/**
* Asynchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
function __promisify__(
path: PathLike,
options?:
| Mode
| (MakeDirectoryOptions & {
recursive?: false | undefined;
})
| null
): Promise<void>;
/**
* Asynchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
function __promisify__(path: PathLike, options?: Mode | MakeDirectoryOptions | null): Promise<string | undefined>;
}
/**
* Synchronously creates a directory. Returns `undefined`, or if `recursive` is`true`, the first directory path created.
* This is the synchronous version of {@link mkdir}.
*
* See the POSIX [`mkdir(2)`](http://man7.org/linux/man-pages/man2/mkdir.2.html) documentation for more details.
* @since v0.1.21
*/
export function mkdirSync(
path: PathLike,
options: MakeDirectoryOptions & {
recursive: true;
}
): string | undefined;
/**
* Synchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
export function mkdirSync(
path: PathLike,
options?:
| Mode
| (MakeDirectoryOptions & {
recursive?: false | undefined;
})
| null
): void;
/**
* Synchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
export function mkdirSync(path: PathLike, options?: Mode | MakeDirectoryOptions | null): string | undefined;
/**
* Creates a unique temporary directory.
*
* Generates six random characters to be appended behind a required`prefix` to create a unique temporary directory. Due to platform
* inconsistencies, avoid trailing `X` characters in `prefix`. Some platforms,
* notably the BSDs, can return more than six random characters, and replace
* trailing `X` characters in `prefix` with random characters.
*
* The created directory path is passed as a string to the callback's second
* parameter.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use.
*
* ```js
* import { mkdtemp } from 'fs';
*
* mkdtemp(path.join(os.tmpdir(), 'foo-'), (err, directory) => {
* if (err) throw err;
* console.log(directory);
* // Prints: /tmp/foo-itXde2 or C:\Users\...\AppData\Local\Temp\foo-itXde2
* });
* ```
*
* The `fs.mkdtemp()` method will append the six randomly selected characters
* directly to the `prefix` string. For instance, given a directory `/tmp`, if the
* intention is to create a temporary directory _within_`/tmp`, the `prefix`must end with a trailing platform-specific path separator
* (`require('path').sep`).
*
* ```js
* import { tmpdir } from 'os';
* import { mkdtemp } from 'fs';
*
* // The parent directory for the new temporary directory
* const tmpDir = tmpdir();
*
* // This method is *INCORRECT*:
* mkdtemp(tmpDir, (err, directory) => {
* if (err) throw err;
* console.log(directory);
* // Will print something similar to `/tmpabc123`.
* // A new temporary directory is created at the file system root
* // rather than *within* the /tmp directory.
* });
*
* // This method is *CORRECT*:
* import { sep } from 'path';
* mkdtemp(`${tmpDir}${sep}`, (err, directory) => {
* if (err) throw err;
* console.log(directory);
* // Will print something similar to `/tmp/abc123`.
* // A new temporary directory is created within
* // the /tmp directory.
* });
* ```
* @since v5.10.0
*/
export function mkdtemp(prefix: string, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, folder: string) => void): void;
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function mkdtemp(
prefix: string,
options:
| 'buffer'
| {
encoding: 'buffer';
},
callback: (err: NodeJS.ErrnoException | null, folder: Buffer) => void
): void;
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function mkdtemp(prefix: string, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, folder: string | Buffer) => void): void;
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
*/
export function mkdtemp(prefix: string, callback: (err: NodeJS.ErrnoException | null, folder: string) => void): void;
export namespace mkdtemp {
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(prefix: string, options?: EncodingOption): Promise<string>;
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(prefix: string, options: BufferEncodingOption): Promise<Buffer>;
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(prefix: string, options?: EncodingOption): Promise<string | Buffer>;
}
/**
* Returns the created directory path.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link mkdtemp}.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use.
* @since v5.10.0
*/
export function mkdtempSync(prefix: string, options?: EncodingOption): string;
/**
* Synchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function mkdtempSync(prefix: string, options: BufferEncodingOption): Buffer;
/**
* Synchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function mkdtempSync(prefix: string, options?: EncodingOption): string | Buffer;
/**
* Reads the contents of a directory. The callback gets two arguments `(err, files)`where `files` is an array of the names of the files in the directory excluding`'.'` and `'..'`.
*
* See the POSIX [`readdir(3)`](http://man7.org/linux/man-pages/man3/readdir.3.html) documentation for more details.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the filenames passed to the callback. If the `encoding` is set to `'buffer'`,
* the filenames returned will be passed as `Buffer` objects.
*
* If `options.withFileTypes` is set to `true`, the `files` array will contain `fs.Dirent` objects.
* @since v0.1.8
*/
export function readdir(
path: PathLike,
options:
| {
encoding: BufferEncoding | null;
withFileTypes?: false | undefined;
recursive?: boolean | undefined;
}
| BufferEncoding
| undefined
| null,
callback: (err: NodeJS.ErrnoException | null, files: string[]) => void
): void;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readdir(
path: PathLike,
options:
| {
encoding: 'buffer';
withFileTypes?: false | undefined;
recursive?: boolean | undefined;
}
| 'buffer',
callback: (err: NodeJS.ErrnoException | null, files: Buffer[]) => void
): void;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readdir(
path: PathLike,
options:
| (ObjectEncodingOptions & {
withFileTypes?: false | undefined;
recursive?: boolean | undefined;
})
| BufferEncoding
| undefined
| null,
callback: (err: NodeJS.ErrnoException | null, files: string[] | Buffer[]) => void
): void;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function readdir(path: PathLike, callback: (err: NodeJS.ErrnoException | null, files: string[]) => void): void;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options If called with `withFileTypes: true` the result data will be an array of Dirent.
*/
export function readdir(
path: PathLike,
options: ObjectEncodingOptions & {
withFileTypes: true;
recursive?: boolean | undefined;
},
callback: (err: NodeJS.ErrnoException | null, files: Dirent[]) => void
): void;
export namespace readdir {
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(
path: PathLike,
options?:
| {
encoding: BufferEncoding | null;
withFileTypes?: false | undefined;
recursive?: boolean | undefined;
}
| BufferEncoding
| null
): Promise<string[]>;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(
path: PathLike,
options:
| 'buffer'
| {
encoding: 'buffer';
withFileTypes?: false | undefined;
recursive?: boolean | undefined;
}
): Promise<Buffer[]>;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(
path: PathLike,
options?:
| (ObjectEncodingOptions & {
withFileTypes?: false | undefined;
recursive?: boolean | undefined;
})
| BufferEncoding
| null
): Promise<string[] | Buffer[]>;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options If called with `withFileTypes: true` the result data will be an array of Dirent
*/
function __promisify__(
path: PathLike,
options: ObjectEncodingOptions & {
withFileTypes: true;
recursive?: boolean | undefined;
}
): Promise<Dirent[]>;
}
/**
* Reads the contents of the directory.
*
* See the POSIX [`readdir(3)`](http://man7.org/linux/man-pages/man3/readdir.3.html) documentation for more details.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the filenames returned. If the `encoding` is set to `'buffer'`,
* the filenames returned will be passed as `Buffer` objects.
*
* If `options.withFileTypes` is set to `true`, the result will contain `fs.Dirent` objects.
* @since v0.1.21
*/
export function readdirSync(
path: PathLike,
options?:
| {
encoding: BufferEncoding | null;
withFileTypes?: false | undefined;
recursive?: boolean | undefined;
}
| BufferEncoding
| null
): string[];
/**
* Synchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readdirSync(
path: PathLike,
options:
| {
encoding: 'buffer';
withFileTypes?: false | undefined;
recursive?: boolean | undefined;
}
| 'buffer'
): Buffer[];
/**
* Synchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readdirSync(
path: PathLike,
options?:
| (ObjectEncodingOptions & {
withFileTypes?: false | undefined;
recursive?: boolean | undefined;
})
| BufferEncoding
| null
): string[] | Buffer[];
/**
* Synchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options If called with `withFileTypes: true` the result data will be an array of Dirent.
*/
export function readdirSync(
path: PathLike,
options: ObjectEncodingOptions & {
withFileTypes: true;
recursive?: boolean | undefined;
}
): Dirent[];
/**
* Closes the file descriptor. No arguments other than a possible exception are
* given to the completion callback.
*
* Calling `fs.close()` on any file descriptor (`fd`) that is currently in use
* through any other `fs` operation may lead to undefined behavior.
*
* See the POSIX [`close(2)`](http://man7.org/linux/man-pages/man2/close.2.html) documentation for more detail.
* @since v0.0.2
*/
export function close(fd: number, callback?: NoParamCallback): void;
export namespace close {
/**
* Asynchronous close(2) - close a file descriptor.
* @param fd A file descriptor.
*/
function __promisify__(fd: number): Promise<void>;
}
/**
* Closes the file descriptor. Returns `undefined`.
*
* Calling `fs.closeSync()` on any file descriptor (`fd`) that is currently in use
* through any other `fs` operation may lead to undefined behavior.
*
* See the POSIX [`close(2)`](http://man7.org/linux/man-pages/man2/close.2.html) documentation for more detail.
* @since v0.1.21
*/
export function closeSync(fd: number): void;
/**
* Asynchronous file open. See the POSIX [`open(2)`](http://man7.org/linux/man-pages/man2/open.2.html) documentation for more details.
*
* `mode` sets the file mode (permission and sticky bits), but only if the file was
* created. On Windows, only the write permission can be manipulated; see {@link chmod}.
*
* The callback gets two arguments `(err, fd)`.
*
* Some characters (`< > : " / \ | ? *`) are reserved under Windows as documented
* by [Naming Files, Paths, and Namespaces](https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file). Under NTFS, if the filename contains
* a colon, Node.js will open a file system stream, as described by [this MSDN page](https://docs.microsoft.com/en-us/windows/desktop/FileIO/using-streams).
*
* Functions based on `fs.open()` exhibit this behavior as well:`fs.writeFile()`, `fs.readFile()`, etc.
* @since v0.0.2
* @param [flags='r'] See `support of file system `flags``.
* @param [mode=0o666]
*/
export function open(path: PathLike, flags: OpenMode | undefined, mode: Mode | undefined | null, callback: (err: NodeJS.ErrnoException | null, fd: number) => void): void;
/**
* Asynchronous open(2) - open and possibly create a file. If the file is created, its mode will be `0o666`.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param [flags='r'] See `support of file system `flags``.
*/
export function open(path: PathLike, flags: OpenMode | undefined, callback: (err: NodeJS.ErrnoException | null, fd: number) => void): void;
/**
* Asynchronous open(2) - open and possibly create a file. If the file is created, its mode will be `0o666`.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function open(path: PathLike, callback: (err: NodeJS.ErrnoException | null, fd: number) => void): void;
export namespace open {
/**
* Asynchronous open(2) - open and possibly create a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param mode A file mode. If a string is passed, it is parsed as an octal integer. If not supplied, defaults to `0o666`.
*/
function __promisify__(path: PathLike, flags: OpenMode, mode?: Mode | null): Promise<number>;
}
/**
* Returns an integer representing the file descriptor.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link open}.
* @since v0.1.21
* @param [flags='r']
* @param [mode=0o666]
*/
export function openSync(path: PathLike, flags: OpenMode, mode?: Mode | null): number;
/**
* Change the file system timestamps of the object referenced by `path`.
*
* The `atime` and `mtime` arguments follow these rules:
*
* * Values can be either numbers representing Unix epoch time in seconds,`Date`s, or a numeric string like `'123456789.0'`.
* * If the value can not be converted to a number, or is `NaN`, `Infinity` or`-Infinity`, an `Error` will be thrown.
* @since v0.4.2
*/
export function utimes(path: PathLike, atime: TimeLike, mtime: TimeLike, callback: NoParamCallback): void;
export namespace utimes {
/**
* Asynchronously change file timestamps of the file referenced by the supplied path.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param atime The last access time. If a string is provided, it will be coerced to number.
* @param mtime The last modified time. If a string is provided, it will be coerced to number.
*/
function __promisify__(path: PathLike, atime: TimeLike, mtime: TimeLike): Promise<void>;
}
/**
* Returns `undefined`.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link utimes}.
* @since v0.4.2
*/
export function utimesSync(path: PathLike, atime: TimeLike, mtime: TimeLike): void;
/**
* Change the file system timestamps of the object referenced by the supplied file
* descriptor. See {@link utimes}.
* @since v0.4.2
*/
export function futimes(fd: number, atime: TimeLike, mtime: TimeLike, callback: NoParamCallback): void;
export namespace futimes {
/**
* Asynchronously change file timestamps of the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param atime The last access time. If a string is provided, it will be coerced to number.
* @param mtime The last modified time. If a string is provided, it will be coerced to number.
*/
function __promisify__(fd: number, atime: TimeLike, mtime: TimeLike): Promise<void>;
}
/**
* Synchronous version of {@link futimes}. Returns `undefined`.
* @since v0.4.2
*/
export function futimesSync(fd: number, atime: TimeLike, mtime: TimeLike): void;
/**
* Request that all data for the open file descriptor is flushed to the storage
* device. The specific implementation is operating system and device specific.
* Refer to the POSIX [`fsync(2)`](http://man7.org/linux/man-pages/man2/fsync.2.html) documentation for more detail. No arguments other
* than a possible exception are given to the completion callback.
* @since v0.1.96
*/
export function fsync(fd: number, callback: NoParamCallback): void;
export namespace fsync {
/**
* Asynchronous fsync(2) - synchronize a file's in-core state with the underlying storage device.
* @param fd A file descriptor.
*/
function __promisify__(fd: number): Promise<void>;
}
/**
* Request that all data for the open file descriptor is flushed to the storage
* device. The specific implementation is operating system and device specific.
* Refer to the POSIX [`fsync(2)`](http://man7.org/linux/man-pages/man2/fsync.2.html) documentation for more detail. Returns `undefined`.
* @since v0.1.96
*/
export function fsyncSync(fd: number): void;
/**
* Write `buffer` to the file specified by `fd`.
*
* `offset` determines the part of the buffer to be written, and `length` is
* an integer specifying the number of bytes to write.
*
* `position` refers to the offset from the beginning of the file where this data
* should be written. If `typeof position !== 'number'`, the data will be written
* at the current position. See [`pwrite(2)`](http://man7.org/linux/man-pages/man2/pwrite.2.html).
*
* The callback will be given three arguments `(err, bytesWritten, buffer)` where`bytesWritten` specifies how many _bytes_ were written from `buffer`.
*
* If this method is invoked as its `util.promisify()` ed version, it returns
* a promise for an `Object` with `bytesWritten` and `buffer` properties.
*
* It is unsafe to use `fs.write()` multiple times on the same file without waiting
* for the callback. For this scenario, {@link createWriteStream} is
* recommended.
*
* On Linux, positional writes don't work when the file is opened in append mode.
* The kernel ignores the position argument and always appends the data to
* the end of the file.
* @since v0.0.2
*/
export function write<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer: TBuffer,
offset: number | undefined | null,
length: number | undefined | null,
position: number | undefined | null,
callback: (err: NodeJS.ErrnoException | null, written: number, buffer: TBuffer) => void
): void;
/**
* Asynchronously writes `buffer` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param offset The part of the buffer to be written. If not supplied, defaults to `0`.
* @param length The number of bytes to write. If not supplied, defaults to `buffer.length - offset`.
*/
export function write<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer: TBuffer,
offset: number | undefined | null,
length: number | undefined | null,
callback: (err: NodeJS.ErrnoException | null, written: number, buffer: TBuffer) => void
): void;
/**
* Asynchronously writes `buffer` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param offset The part of the buffer to be written. If not supplied, defaults to `0`.
*/
export function write<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer: TBuffer,
offset: number | undefined | null,
callback: (err: NodeJS.ErrnoException | null, written: number, buffer: TBuffer) => void
): void;
/**
* Asynchronously writes `buffer` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
*/
export function write<TBuffer extends NodeJS.ArrayBufferView>(fd: number, buffer: TBuffer, callback: (err: NodeJS.ErrnoException | null, written: number, buffer: TBuffer) => void): void;
/**
* Asynchronously writes `string` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param string A string to write.
* @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position.
* @param encoding The expected string encoding.
*/
export function write(
fd: number,
string: string,
position: number | undefined | null,
encoding: BufferEncoding | undefined | null,
callback: (err: NodeJS.ErrnoException | null, written: number, str: string) => void
): void;
/**
* Asynchronously writes `string` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param string A string to write.
* @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position.
*/
export function write(fd: number, string: string, position: number | undefined | null, callback: (err: NodeJS.ErrnoException | null, written: number, str: string) => void): void;
/**
* Asynchronously writes `string` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param string A string to write.
*/
export function write(fd: number, string: string, callback: (err: NodeJS.ErrnoException | null, written: number, str: string) => void): void;
export namespace write {
/**
* Asynchronously writes `buffer` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param offset The part of the buffer to be written. If not supplied, defaults to `0`.
* @param length The number of bytes to write. If not supplied, defaults to `buffer.length - offset`.
* @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position.
*/
function __promisify__<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer?: TBuffer,
offset?: number,
length?: number,
position?: number | null
): Promise<{
bytesWritten: number;
buffer: TBuffer;
}>;
/**
* Asynchronously writes `string` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param string A string to write.
* @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position.
* @param encoding The expected string encoding.
*/
function __promisify__(
fd: number,
string: string,
position?: number | null,
encoding?: BufferEncoding | null
): Promise<{
bytesWritten: number;
buffer: string;
}>;
}
/**
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link write}.
* @since v0.1.21
* @return The number of bytes written.
*/
export function writeSync(fd: number, buffer: NodeJS.ArrayBufferView, offset?: number | null, length?: number | null, position?: number | null): number;
/**
* Synchronously writes `string` to the file referenced by the supplied file descriptor, returning the number of bytes written.
* @param fd A file descriptor.
* @param string A string to write.
* @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position.
* @param encoding The expected string encoding.
*/
export function writeSync(fd: number, string: string, position?: number | null, encoding?: BufferEncoding | null): number;
export type ReadPosition = number | bigint;
export interface ReadSyncOptions {
/**
* @default 0
*/
offset?: number | undefined;
/**
* @default `length of buffer`
*/
length?: number | undefined;
/**
* @default null
*/
position?: ReadPosition | null | undefined;
}
export interface ReadAsyncOptions<TBuffer extends NodeJS.ArrayBufferView> extends ReadSyncOptions {
buffer?: TBuffer;
}
/**
* Read data from the file specified by `fd`.
*
* The callback is given the three arguments, `(err, bytesRead, buffer)`.
*
* If the file is not modified concurrently, the end-of-file is reached when the
* number of bytes read is zero.
*
* If this method is invoked as its `util.promisify()` ed version, it returns
* a promise for an `Object` with `bytesRead` and `buffer` properties.
* @since v0.0.2
* @param buffer The buffer that the data will be written to.
* @param offset The position in `buffer` to write the data to.
* @param length The number of bytes to read.
* @param position Specifies where to begin reading from in the file. If `position` is `null` or `-1 `, data will be read from the current file position, and the file position will be updated. If
* `position` is an integer, the file position will be unchanged.
*/
export function read<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer: TBuffer,
offset: number,
length: number,
position: ReadPosition | null,
callback: (err: NodeJS.ErrnoException | null, bytesRead: number, buffer: TBuffer) => void
): void;
/**
* Similar to the above `fs.read` function, this version takes an optional `options` object.
* If not otherwise specified in an `options` object,
* `buffer` defaults to `Buffer.alloc(16384)`,
* `offset` defaults to `0`,
* `length` defaults to `buffer.byteLength`, `- offset` as of Node 17.6.0
* `position` defaults to `null`
* @since v12.17.0, 13.11.0
*/
export function read<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
options: ReadAsyncOptions<TBuffer>,
callback: (err: NodeJS.ErrnoException | null, bytesRead: number, buffer: TBuffer) => void
): void;
export function read(fd: number, callback: (err: NodeJS.ErrnoException | null, bytesRead: number, buffer: NodeJS.ArrayBufferView) => void): void;
export namespace read {
/**
* @param fd A file descriptor.
* @param buffer The buffer that the data will be written to.
* @param offset The offset in the buffer at which to start writing.
* @param length The number of bytes to read.
* @param position The offset from the beginning of the file from which data should be read. If `null`, data will be read from the current position.
*/
function __promisify__<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer: TBuffer,
offset: number,
length: number,
position: number | null
): Promise<{
bytesRead: number;
buffer: TBuffer;
}>;
function __promisify__<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
options: ReadAsyncOptions<TBuffer>
): Promise<{
bytesRead: number;
buffer: TBuffer;
}>;
function __promisify__(fd: number): Promise<{
bytesRead: number;
buffer: NodeJS.ArrayBufferView;
}>;
}
/**
* Returns the number of `bytesRead`.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link read}.
* @since v0.1.21
*/
export function readSync(fd: number, buffer: NodeJS.ArrayBufferView, offset: number, length: number, position: ReadPosition | null): number;
/**
* Similar to the above `fs.readSync` function, this version takes an optional `options` object.
* If no `options` object is specified, it will default with the above values.
*/
export function readSync(fd: number, buffer: NodeJS.ArrayBufferView, opts?: ReadSyncOptions): number;
/**
* Asynchronously reads the entire contents of a file.
*
* ```js
* import { readFile } from 'fs';
*
* readFile('/etc/passwd', (err, data) => {
* if (err) throw err;
* console.log(data);
* });
* ```
*
* The callback is passed two arguments `(err, data)`, where `data` is the
* contents of the file.
*
* If no encoding is specified, then the raw buffer is returned.
*
* If `options` is a string, then it specifies the encoding:
*
* ```js
* import { readFile } from 'fs';
*
* readFile('/etc/passwd', 'utf8', callback);
* ```
*
* When the path is a directory, the behavior of `fs.readFile()` and {@link readFileSync} is platform-specific. On macOS, Linux, and Windows, an
* error will be returned. On FreeBSD, a representation of the directory's contents
* will be returned.
*
* ```js
* import { readFile } from 'fs';
*
* // macOS, Linux, and Windows
* readFile('<directory>', (err, data) => {
* // => [Error: EISDIR: illegal operation on a directory, read <directory>]
* });
*
* // FreeBSD
* readFile('<directory>', (err, data) => {
* // => null, <data>
* });
* ```
*
* It is possible to abort an ongoing request using an `AbortSignal`. If a
* request is aborted the callback is called with an `AbortError`:
*
* ```js
* import { readFile } from 'fs';
*
* const controller = new AbortController();
* const signal = controller.signal;
* readFile(fileInfo[0].name, { signal }, (err, buf) => {
* // ...
* });
* // When you want to abort the request
* controller.abort();
* ```
*
* The `fs.readFile()` function buffers the entire file. To minimize memory costs,
* when possible prefer streaming via `fs.createReadStream()`.
*
* Aborting an ongoing request does not abort individual operating
* system requests but rather the internal buffering `fs.readFile` performs.
* @since v0.1.29
* @param path filename or file descriptor
*/
export function readFile(
path: PathOrFileDescriptor,
options:
| ({
encoding?: null | undefined;
flag?: string | undefined;
} & Abortable)
| undefined
| null,
callback: (err: NodeJS.ErrnoException | null, data: Buffer) => void
): void;
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
export function readFile(
path: PathOrFileDescriptor,
options:
| ({
encoding: BufferEncoding;
flag?: string | undefined;
} & Abortable)
| BufferEncoding,
callback: (err: NodeJS.ErrnoException | null, data: string) => void
): void;
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
export function readFile(
path: PathOrFileDescriptor,
options:
| (ObjectEncodingOptions & {
flag?: string | undefined;
} & Abortable)
| BufferEncoding
| undefined
| null,
callback: (err: NodeJS.ErrnoException | null, data: string | Buffer) => void
): void;
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
*/
export function readFile(path: PathOrFileDescriptor, callback: (err: NodeJS.ErrnoException | null, data: Buffer) => void): void;
export namespace readFile {
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options An object that may contain an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
function __promisify__(
path: PathOrFileDescriptor,
options?: {
encoding?: null | undefined;
flag?: string | undefined;
} | null
): Promise<Buffer>;
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
function __promisify__(
path: PathOrFileDescriptor,
options:
| {
encoding: BufferEncoding;
flag?: string | undefined;
}
| BufferEncoding
): Promise<string>;
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
function __promisify__(
path: PathOrFileDescriptor,
options?:
| (ObjectEncodingOptions & {
flag?: string | undefined;
})
| BufferEncoding
| null
): Promise<string | Buffer>;
}
/**
* Returns the contents of the `path`.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link readFile}.
*
* If the `encoding` option is specified then this function returns a
* string. Otherwise it returns a buffer.
*
* Similar to {@link readFile}, when the path is a directory, the behavior of`fs.readFileSync()` is platform-specific.
*
* ```js
* import { readFileSync } from 'fs';
*
* // macOS, Linux, and Windows
* readFileSync('<directory>');
* // => [Error: EISDIR: illegal operation on a directory, read <directory>]
*
* // FreeBSD
* readFileSync('<directory>'); // => <data>
* ```
* @since v0.1.8
* @param path filename or file descriptor
*/
export function readFileSync(
path: PathOrFileDescriptor,
options?: {
encoding?: null | undefined;
flag?: string | undefined;
} | null
): Buffer;
/**
* Synchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
export function readFileSync(
path: PathOrFileDescriptor,
options:
| {
encoding: BufferEncoding;
flag?: string | undefined;
}
| BufferEncoding
): string;
/**
* Synchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
export function readFileSync(
path: PathOrFileDescriptor,
options?:
| (ObjectEncodingOptions & {
flag?: string | undefined;
})
| BufferEncoding
| null
): string | Buffer;
export type WriteFileOptions =
| (ObjectEncodingOptions &
Abortable & {
mode?: Mode | undefined;
flag?: string | undefined;
})
| BufferEncoding
| null;
/**
* When `file` is a filename, asynchronously writes data to the file, replacing the
* file if it already exists. `data` can be a string or a buffer.
*
* When `file` is a file descriptor, the behavior is similar to calling`fs.write()` directly (which is recommended). See the notes below on using
* a file descriptor.
*
* The `encoding` option is ignored if `data` is a buffer.
*
* The `mode` option only affects the newly created file. See {@link open} for more details.
*
* ```js
* import { writeFile } from 'fs';
* import { Buffer } from 'buffer';
*
* const data = new Uint8Array(Buffer.from('Hello Node.js'));
* writeFile('message.txt', data, (err) => {
* if (err) throw err;
* console.log('The file has been saved!');
* });
* ```
*
* If `options` is a string, then it specifies the encoding:
*
* ```js
* import { writeFile } from 'fs';
*
* writeFile('message.txt', 'Hello Node.js', 'utf8', callback);
* ```
*
* It is unsafe to use `fs.writeFile()` multiple times on the same file without
* waiting for the callback. For this scenario, {@link createWriteStream} is
* recommended.
*
* Similarly to `fs.readFile` \- `fs.writeFile` is a convenience method that
* performs multiple `write` calls internally to write the buffer passed to it.
* For performance sensitive code consider using {@link createWriteStream}.
*
* It is possible to use an `AbortSignal` to cancel an `fs.writeFile()`.
* Cancelation is "best effort", and some amount of data is likely still
* to be written.
*
* ```js
* import { writeFile } from 'fs';
* import { Buffer } from 'buffer';
*
* const controller = new AbortController();
* const { signal } = controller;
* const data = new Uint8Array(Buffer.from('Hello Node.js'));
* writeFile('message.txt', data, { signal }, (err) => {
* // When a request is aborted - the callback is called with an AbortError
* });
* // When the request should be aborted
* controller.abort();
* ```
*
* Aborting an ongoing request does not abort individual operating
* system requests but rather the internal buffering `fs.writeFile` performs.
* @since v0.1.29
* @param file filename or file descriptor
*/
export function writeFile(file: PathOrFileDescriptor, data: string | NodeJS.ArrayBufferView, options: WriteFileOptions, callback: NoParamCallback): void;
/**
* Asynchronously writes data to a file, replacing the file if it already exists.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param data The data to write. If something other than a Buffer or Uint8Array is provided, the value is coerced to a string.
*/
export function writeFile(path: PathOrFileDescriptor, data: string | NodeJS.ArrayBufferView, callback: NoParamCallback): void;
export namespace writeFile {
/**
* Asynchronously writes data to a file, replacing the file if it already exists.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param data The data to write. If something other than a Buffer or Uint8Array is provided, the value is coerced to a string.
* @param options Either the encoding for the file, or an object optionally specifying the encoding, file mode, and flag.
* If `encoding` is not supplied, the default of `'utf8'` is used.
* If `mode` is not supplied, the default of `0o666` is used.
* If `mode` is a string, it is parsed as an octal integer.
* If `flag` is not supplied, the default of `'w'` is used.
*/
function __promisify__(path: PathOrFileDescriptor, data: string | NodeJS.ArrayBufferView, options?: WriteFileOptions): Promise<void>;
}
/**
* Returns `undefined`.
*
* The `mode` option only affects the newly created file. See {@link open} for more details.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link writeFile}.
* @since v0.1.29
* @param file filename or file descriptor
*/
export function writeFileSync(file: PathOrFileDescriptor, data: string | NodeJS.ArrayBufferView, options?: WriteFileOptions): void;
/**
* Asynchronously append data to a file, creating the file if it does not yet
* exist. `data` can be a string or a `Buffer`.
*
* The `mode` option only affects the newly created file. See {@link open} for more details.
*
* ```js
* import { appendFile } from 'fs';
*
* appendFile('message.txt', 'data to append', (err) => {
* if (err) throw err;
* console.log('The "data to append" was appended to file!');
* });
* ```
*
* If `options` is a string, then it specifies the encoding:
*
* ```js
* import { appendFile } from 'fs';
*
* appendFile('message.txt', 'data to append', 'utf8', callback);
* ```
*
* The `path` may be specified as a numeric file descriptor that has been opened
* for appending (using `fs.open()` or `fs.openSync()`). The file descriptor will
* not be closed automatically.
*
* ```js
* import { open, close, appendFile } from 'fs';
*
* function closeFd(fd) {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
*
* open('message.txt', 'a', (err, fd) => {
* if (err) throw err;
*
* try {
* appendFile(fd, 'data to append', 'utf8', (err) => {
* closeFd(fd);
* if (err) throw err;
* });
* } catch (err) {
* closeFd(fd);
* throw err;
* }
* });
* ```
* @since v0.6.7
* @param path filename or file descriptor
*/
export function appendFile(path: PathOrFileDescriptor, data: string | Uint8Array, options: WriteFileOptions, callback: NoParamCallback): void;
/**
* Asynchronously append data to a file, creating the file if it does not exist.
* @param file A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param data The data to write. If something other than a Buffer or Uint8Array is provided, the value is coerced to a string.
*/
export function appendFile(file: PathOrFileDescriptor, data: string | Uint8Array, callback: NoParamCallback): void;
export namespace appendFile {
/**
* Asynchronously append data to a file, creating the file if it does not exist.
* @param file A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param data The data to write. If something other than a Buffer or Uint8Array is provided, the value is coerced to a string.
* @param options Either the encoding for the file, or an object optionally specifying the encoding, file mode, and flag.
* If `encoding` is not supplied, the default of `'utf8'` is used.
* If `mode` is not supplied, the default of `0o666` is used.
* If `mode` is a string, it is parsed as an octal integer.
* If `flag` is not supplied, the default of `'a'` is used.
*/
function __promisify__(file: PathOrFileDescriptor, data: string | Uint8Array, options?: WriteFileOptions): Promise<void>;
}
/**
* Synchronously append data to a file, creating the file if it does not yet
* exist. `data` can be a string or a `Buffer`.
*
* The `mode` option only affects the newly created file. See {@link open} for more details.
*
* ```js
* import { appendFileSync } from 'fs';
*
* try {
* appendFileSync('message.txt', 'data to append');
* console.log('The "data to append" was appended to file!');
* } catch (err) {
* // Handle the error
* }
* ```
*
* If `options` is a string, then it specifies the encoding:
*
* ```js
* import { appendFileSync } from 'fs';
*
* appendFileSync('message.txt', 'data to append', 'utf8');
* ```
*
* The `path` may be specified as a numeric file descriptor that has been opened
* for appending (using `fs.open()` or `fs.openSync()`). The file descriptor will
* not be closed automatically.
*
* ```js
* import { openSync, closeSync, appendFileSync } from 'fs';
*
* let fd;
*
* try {
* fd = openSync('message.txt', 'a');
* appendFileSync(fd, 'data to append', 'utf8');
* } catch (err) {
* // Handle the error
* } finally {
* if (fd !== undefined)
* closeSync(fd);
* }
* ```
* @since v0.6.7
* @param path filename or file descriptor
*/
export function appendFileSync(path: PathOrFileDescriptor, data: string | Uint8Array, options?: WriteFileOptions): void;
/**
* Watch for changes on `filename`. The callback `listener` will be called each
* time the file is accessed.
*
* The `options` argument may be omitted. If provided, it should be an object. The`options` object may contain a boolean named `persistent` that indicates
* whether the process should continue to run as long as files are being watched.
* The `options` object may specify an `interval` property indicating how often the
* target should be polled in milliseconds.
*
* The `listener` gets two arguments the current stat object and the previous
* stat object:
*
* ```js
* import { watchFile } from 'fs';
*
* watchFile('message.text', (curr, prev) => {
* console.log(`the current mtime is: ${curr.mtime}`);
* console.log(`the previous mtime was: ${prev.mtime}`);
* });
* ```
*
* These stat objects are instances of `fs.Stat`. If the `bigint` option is `true`,
* the numeric values in these objects are specified as `BigInt`s.
*
* To be notified when the file was modified, not just accessed, it is necessary
* to compare `curr.mtimeMs` and `prev.mtimeMs`.
*
* When an `fs.watchFile` operation results in an `ENOENT` error, it
* will invoke the listener once, with all the fields zeroed (or, for dates, the
* Unix Epoch). If the file is created later on, the listener will be called
* again, with the latest stat objects. This is a change in functionality since
* v0.10.
*
* Using {@link watch} is more efficient than `fs.watchFile` and`fs.unwatchFile`. `fs.watch` should be used instead of `fs.watchFile` and`fs.unwatchFile` when possible.
*
* When a file being watched by `fs.watchFile()` disappears and reappears,
* then the contents of `previous` in the second callback event (the file's
* reappearance) will be the same as the contents of `previous` in the first
* callback event (its disappearance).
*
* This happens when:
*
* * the file is deleted, followed by a restore
* * the file is renamed and then renamed a second time back to its original name
* @since v0.1.31
*/
export interface WatchFileOptions {
bigint?: boolean | undefined;
persistent?: boolean | undefined;
interval?: number | undefined;
}
/**
* Watch for changes on `filename`. The callback `listener` will be called each
* time the file is accessed.
*
* The `options` argument may be omitted. If provided, it should be an object. The`options` object may contain a boolean named `persistent` that indicates
* whether the process should continue to run as long as files are being watched.
* The `options` object may specify an `interval` property indicating how often the
* target should be polled in milliseconds.
*
* The `listener` gets two arguments the current stat object and the previous
* stat object:
*
* ```js
* import { watchFile } from 'fs';
*
* watchFile('message.text', (curr, prev) => {
* console.log(`the current mtime is: ${curr.mtime}`);
* console.log(`the previous mtime was: ${prev.mtime}`);
* });
* ```
*
* These stat objects are instances of `fs.Stat`. If the `bigint` option is `true`,
* the numeric values in these objects are specified as `BigInt`s.
*
* To be notified when the file was modified, not just accessed, it is necessary
* to compare `curr.mtimeMs` and `prev.mtimeMs`.
*
* When an `fs.watchFile` operation results in an `ENOENT` error, it
* will invoke the listener once, with all the fields zeroed (or, for dates, the
* Unix Epoch). If the file is created later on, the listener will be called
* again, with the latest stat objects. This is a change in functionality since
* v0.10.
*
* Using {@link watch} is more efficient than `fs.watchFile` and`fs.unwatchFile`. `fs.watch` should be used instead of `fs.watchFile` and`fs.unwatchFile` when possible.
*
* When a file being watched by `fs.watchFile()` disappears and reappears,
* then the contents of `previous` in the second callback event (the file's
* reappearance) will be the same as the contents of `previous` in the first
* callback event (its disappearance).
*
* This happens when:
*
* * the file is deleted, followed by a restore
* * the file is renamed and then renamed a second time back to its original name
* @since v0.1.31
*/
export function watchFile(
filename: PathLike,
options:
| (WatchFileOptions & {
bigint?: false | undefined;
})
| undefined,
listener: StatsListener
): StatWatcher;
export function watchFile(
filename: PathLike,
options:
| (WatchFileOptions & {
bigint: true;
})
| undefined,
listener: BigIntStatsListener
): StatWatcher;
/**
* Watch for changes on `filename`. The callback `listener` will be called each time the file is accessed.
* @param filename A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
* @param listener The callback listener will be called each time the file is accessed.
*/
export function watchFile(filename: PathLike, listener: StatsListener): StatWatcher;
/**
* Stop watching for changes on `filename`. If `listener` is specified, only that
* particular listener is removed. Otherwise, _all_ listeners are removed,
* effectively stopping watching of `filename`.
*
* Calling `fs.unwatchFile()` with a filename that is not being watched is a
* no-op, not an error.
*
* Using {@link watch} is more efficient than `fs.watchFile()` and`fs.unwatchFile()`. `fs.watch()` should be used instead of `fs.watchFile()`and `fs.unwatchFile()` when possible.
* @since v0.1.31
* @param listener Optional, a listener previously attached using `fs.watchFile()`
*/
export function unwatchFile(filename: PathLike, listener?: StatsListener): void;
export function unwatchFile(filename: PathLike, listener?: BigIntStatsListener): void;
export interface WatchOptions extends Abortable {
encoding?: BufferEncoding | 'buffer' | undefined;
persistent?: boolean | undefined;
recursive?: boolean | undefined;
}
export type WatchEventType = 'rename' | 'change';
export type WatchListener<T> = (event: WatchEventType, filename: T | null) => void;
export type StatsListener = (curr: Stats, prev: Stats) => void;
export type BigIntStatsListener = (curr: BigIntStats, prev: BigIntStats) => void;
/**
* Watch for changes on `filename`, where `filename` is either a file or a
* directory.
*
* The second argument is optional. If `options` is provided as a string, it
* specifies the `encoding`. Otherwise `options` should be passed as an object.
*
* The listener callback gets two arguments `(eventType, filename)`. `eventType`is either `'rename'` or `'change'`, and `filename` is the name of the file
* which triggered the event.
*
* On most platforms, `'rename'` is emitted whenever a filename appears or
* disappears in the directory.
*
* The listener callback is attached to the `'change'` event fired by `fs.FSWatcher`, but it is not the same thing as the `'change'` value of`eventType`.
*
* If a `signal` is passed, aborting the corresponding AbortController will close
* the returned `fs.FSWatcher`.
* @since v0.5.10
* @param listener
*/
export function watch(
filename: PathLike,
options:
| (WatchOptions & {
encoding: 'buffer';
})
| 'buffer',
listener?: WatchListener<Buffer>
): FSWatcher;
/**
* Watch for changes on `filename`, where `filename` is either a file or a directory, returning an `FSWatcher`.
* @param filename A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
* @param options Either the encoding for the filename provided to the listener, or an object optionally specifying encoding, persistent, and recursive options.
* If `encoding` is not supplied, the default of `'utf8'` is used.
* If `persistent` is not supplied, the default of `true` is used.
* If `recursive` is not supplied, the default of `false` is used.
*/
export function watch(filename: PathLike, options?: WatchOptions | BufferEncoding | null, listener?: WatchListener<string>): FSWatcher;
/**
* Watch for changes on `filename`, where `filename` is either a file or a directory, returning an `FSWatcher`.
* @param filename A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
* @param options Either the encoding for the filename provided to the listener, or an object optionally specifying encoding, persistent, and recursive options.
* If `encoding` is not supplied, the default of `'utf8'` is used.
* If `persistent` is not supplied, the default of `true` is used.
* If `recursive` is not supplied, the default of `false` is used.
*/
export function watch(filename: PathLike, options: WatchOptions | string, listener?: WatchListener<string | Buffer>): FSWatcher;
/**
* Watch for changes on `filename`, where `filename` is either a file or a directory, returning an `FSWatcher`.
* @param filename A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
*/
export function watch(filename: PathLike, listener?: WatchListener<string>): FSWatcher;
/**
* Test whether or not the given path exists by checking with the file system.
* Then call the `callback` argument with either true or false:
*
* ```js
* import { exists } from 'fs';
*
* exists('/etc/passwd', (e) => {
* console.log(e ? 'it exists' : 'no passwd!');
* });
* ```
*
* **The parameters for this callback are not consistent with other Node.js**
* **callbacks.** Normally, the first parameter to a Node.js callback is an `err`parameter, optionally followed by other parameters. The `fs.exists()` callback
* has only one boolean parameter. This is one reason `fs.access()` is recommended
* instead of `fs.exists()`.
*
* Using `fs.exists()` to check for the existence of a file before calling`fs.open()`, `fs.readFile()` or `fs.writeFile()` is not recommended. Doing
* so introduces a race condition, since other processes may change the file's
* state between the two calls. Instead, user code should open/read/write the
* file directly and handle the error raised if the file does not exist.
*
* **write (NOT RECOMMENDED)**
*
* ```js
* import { exists, open, close } from 'fs';
*
* exists('myfile', (e) => {
* if (e) {
* console.error('myfile already exists');
* } else {
* open('myfile', 'wx', (err, fd) => {
* if (err) throw err;
*
* try {
* writeMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* }
* });
* ```
*
* **write (RECOMMENDED)**
*
* ```js
* import { open, close } from 'fs';
* open('myfile', 'wx', (err, fd) => {
* if (err) {
* if (err.code === 'EEXIST') {
* console.error('myfile already exists');
* return;
* }
*
* throw err;
* }
*
* try {
* writeMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* ```
*
* **read (NOT RECOMMENDED)**
*
* ```js
* import { open, close, exists } from 'fs';
*
* exists('myfile', (e) => {
* if (e) {
* open('myfile', 'r', (err, fd) => {
* if (err) throw err;
*
* try {
* readMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* } else {
* console.error('myfile does not exist');
* }
* });
* ```
*
* **read (RECOMMENDED)**
*
* ```js
* import { open, close } from 'fs';
*
* open('myfile', 'r', (err, fd) => {
* if (err) {
* if (err.code === 'ENOENT') {
* console.error('myfile does not exist');
* return;
* }
*
* throw err;
* }
*
* try {
* readMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* ```
*
* The "not recommended" examples above check for existence and then use the
* file; the "recommended" examples are better because they use the file directly
* and handle the error, if any.
*
* In general, check for the existence of a file only if the file won’t be
* used directly, for example when its existence is a signal from another
* process.
* @since v0.0.2
* @deprecated Since v1.0.0 - Use {@link stat} or {@link access} instead.
*/
export function exists(path: PathLike, callback: (exists: boolean) => void): void;
/** @deprecated */
export namespace exists {
/**
* @param path A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
*/
function __promisify__(path: PathLike): Promise<boolean>;
}
/**
* Returns `true` if the path exists, `false` otherwise.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link exists}.
*
* `fs.exists()` is deprecated, but `fs.existsSync()` is not. The `callback`parameter to `fs.exists()` accepts parameters that are inconsistent with other
* Node.js callbacks. `fs.existsSync()` does not use a callback.
*
* ```js
* import { existsSync } from 'fs';
*
* if (existsSync('/etc/passwd'))
* console.log('The path exists.');
* ```
* @since v0.1.21
*/
export function existsSync(path: PathLike): boolean;
export namespace constants {
// File Access Constants
/** Constant for fs.access(). File is visible to the calling process. */
const F_OK: number;
/** Constant for fs.access(). File can be read by the calling process. */
const R_OK: number;
/** Constant for fs.access(). File can be written by the calling process. */
const W_OK: number;
/** Constant for fs.access(). File can be executed by the calling process. */
const X_OK: number;
// File Copy Constants
/** Constant for fs.copyFile. Flag indicating the destination file should not be overwritten if it already exists. */
const COPYFILE_EXCL: number;
/**
* Constant for fs.copyFile. copy operation will attempt to create a copy-on-write reflink.
* If the underlying platform does not support copy-on-write, then a fallback copy mechanism is used.
*/
const COPYFILE_FICLONE: number;
/**
* Constant for fs.copyFile. Copy operation will attempt to create a copy-on-write reflink.
* If the underlying platform does not support copy-on-write, then the operation will fail with an error.
*/
const COPYFILE_FICLONE_FORCE: number;
// File Open Constants
/** Constant for fs.open(). Flag indicating to open a file for read-only access. */
const O_RDONLY: number;
/** Constant for fs.open(). Flag indicating to open a file for write-only access. */
const O_WRONLY: number;
/** Constant for fs.open(). Flag indicating to open a file for read-write access. */
const O_RDWR: number;
/** Constant for fs.open(). Flag indicating to create the file if it does not already exist. */
const O_CREAT: number;
/** Constant for fs.open(). Flag indicating that opening a file should fail if the O_CREAT flag is set and the file already exists. */
const O_EXCL: number;
/**
* Constant for fs.open(). Flag indicating that if path identifies a terminal device,
* opening the path shall not cause that terminal to become the controlling terminal for the process
* (if the process does not already have one).
*/
const O_NOCTTY: number;
/** Constant for fs.open(). Flag indicating that if the file exists and is a regular file, and the file is opened successfully for write access, its length shall be truncated to zero. */
const O_TRUNC: number;
/** Constant for fs.open(). Flag indicating that data will be appended to the end of the file. */
const O_APPEND: number;
/** Constant for fs.open(). Flag indicating that the open should fail if the path is not a directory. */
const O_DIRECTORY: number;
/**
* constant for fs.open().
* Flag indicating reading accesses to the file system will no longer result in
* an update to the atime information associated with the file.
* This flag is available on Linux operating systems only.
*/
const O_NOATIME: number;
/** Constant for fs.open(). Flag indicating that the open should fail if the path is a symbolic link. */
const O_NOFOLLOW: number;
/** Constant for fs.open(). Flag indicating that the file is opened for synchronous I/O. */
const O_SYNC: number;
/** Constant for fs.open(). Flag indicating that the file is opened for synchronous I/O with write operations waiting for data integrity. */
const O_DSYNC: number;
/** Constant for fs.open(). Flag indicating to open the symbolic link itself rather than the resource it is pointing to. */
const O_SYMLINK: number;
/** Constant for fs.open(). When set, an attempt will be made to minimize caching effects of file I/O. */
const O_DIRECT: number;
/** Constant for fs.open(). Flag indicating to open the file in nonblocking mode when possible. */
const O_NONBLOCK: number;
// File Type Constants
/** Constant for fs.Stats mode property for determining a file's type. Bit mask used to extract the file type code. */
const S_IFMT: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a regular file. */
const S_IFREG: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a directory. */
const S_IFDIR: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a character-oriented device file. */
const S_IFCHR: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a block-oriented device file. */
const S_IFBLK: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a FIFO/pipe. */
const S_IFIFO: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a symbolic link. */
const S_IFLNK: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a socket. */
const S_IFSOCK: number;
// File Mode Constants
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by owner. */
const S_IRWXU: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by owner. */
const S_IRUSR: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by owner. */
const S_IWUSR: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by owner. */
const S_IXUSR: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by group. */
const S_IRWXG: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by group. */
const S_IRGRP: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by group. */
const S_IWGRP: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by group. */
const S_IXGRP: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by others. */
const S_IRWXO: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by others. */
const S_IROTH: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by others. */
const S_IWOTH: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by others. */
const S_IXOTH: number;
/**
* When set, a memory file mapping is used to access the file. This flag
* is available on Windows operating systems only. On other operating systems,
* this flag is ignored.
*/
const UV_FS_O_FILEMAP: number;
}
/**
* Tests a user's permissions for the file or directory specified by `path`.
* The `mode` argument is an optional integer that specifies the accessibility
* checks to be performed. `mode` should be either the value `fs.constants.F_OK`or a mask consisting of the bitwise OR of any of `fs.constants.R_OK`,`fs.constants.W_OK`, and `fs.constants.X_OK`
* (e.g.`fs.constants.W_OK | fs.constants.R_OK`). Check `File access constants` for
* possible values of `mode`.
*
* The final argument, `callback`, is a callback function that is invoked with
* a possible error argument. If any of the accessibility checks fail, the error
* argument will be an `Error` object. The following examples check if`package.json` exists, and if it is readable or writable.
*
* ```js
* import { access, constants } from 'fs';
*
* const file = 'package.json';
*
* // Check if the file exists in the current directory.
* access(file, constants.F_OK, (err) => {
* console.log(`${file} ${err ? 'does not exist' : 'exists'}`);
* });
*
* // Check if the file is readable.
* access(file, constants.R_OK, (err) => {
* console.log(`${file} ${err ? 'is not readable' : 'is readable'}`);
* });
*
* // Check if the file is writable.
* access(file, constants.W_OK, (err) => {
* console.log(`${file} ${err ? 'is not writable' : 'is writable'}`);
* });
*
* // Check if the file is readable and writable.
* access(file, constants.R_OK | constants.W_OK, (err) => {
* console.log(`${file} ${err ? 'is not' : 'is'} readable and writable`);
* });
* ```
*
* Do not use `fs.access()` to check for the accessibility of a file before calling`fs.open()`, `fs.readFile()` or `fs.writeFile()`. Doing
* so introduces a race condition, since other processes may change the file's
* state between the two calls. Instead, user code should open/read/write the
* file directly and handle the error raised if the file is not accessible.
*
* **write (NOT RECOMMENDED)**
*
* ```js
* import { access, open, close } from 'fs';
*
* access('myfile', (err) => {
* if (!err) {
* console.error('myfile already exists');
* return;
* }
*
* open('myfile', 'wx', (err, fd) => {
* if (err) throw err;
*
* try {
* writeMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* });
* ```
*
* **write (RECOMMENDED)**
*
* ```js
* import { open, close } from 'fs';
*
* open('myfile', 'wx', (err, fd) => {
* if (err) {
* if (err.code === 'EEXIST') {
* console.error('myfile already exists');
* return;
* }
*
* throw err;
* }
*
* try {
* writeMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* ```
*
* **read (NOT RECOMMENDED)**
*
* ```js
* import { access, open, close } from 'fs';
* access('myfile', (err) => {
* if (err) {
* if (err.code === 'ENOENT') {
* console.error('myfile does not exist');
* return;
* }
*
* throw err;
* }
*
* open('myfile', 'r', (err, fd) => {
* if (err) throw err;
*
* try {
* readMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* });
* ```
*
* **read (RECOMMENDED)**
*
* ```js
* import { open, close } from 'fs';
*
* open('myfile', 'r', (err, fd) => {
* if (err) {
* if (err.code === 'ENOENT') {
* console.error('myfile does not exist');
* return;
* }
*
* throw err;
* }
*
* try {
* readMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* ```
*
* The "not recommended" examples above check for accessibility and then use the
* file; the "recommended" examples are better because they use the file directly
* and handle the error, if any.
*
* In general, check for the accessibility of a file only if the file will not be
* used directly, for example when its accessibility is a signal from another
* process.
*
* On Windows, access-control policies (ACLs) on a directory may limit access to
* a file or directory. The `fs.access()` function, however, does not check the
* ACL and therefore may report that a path is accessible even if the ACL restricts
* the user from reading or writing to it.
* @since v0.11.15
* @param [mode=fs.constants.F_OK]
*/
export function access(path: PathLike, mode: number | undefined, callback: NoParamCallback): void;
/**
* Asynchronously tests a user's permissions for the file specified by path.
* @param path A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
*/
export function access(path: PathLike, callback: NoParamCallback): void;
export namespace access {
/**
* Asynchronously tests a user's permissions for the file specified by path.
* @param path A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
*/
function __promisify__(path: PathLike, mode?: number): Promise<void>;
}
/**
* Synchronously tests a user's permissions for the file or directory specified
* by `path`. The `mode` argument is an optional integer that specifies the
* accessibility checks to be performed. `mode` should be either the value`fs.constants.F_OK` or a mask consisting of the bitwise OR of any of`fs.constants.R_OK`, `fs.constants.W_OK`, and
* `fs.constants.X_OK` (e.g.`fs.constants.W_OK | fs.constants.R_OK`). Check `File access constants` for
* possible values of `mode`.
*
* If any of the accessibility checks fail, an `Error` will be thrown. Otherwise,
* the method will return `undefined`.
*
* ```js
* import { accessSync, constants } from 'fs';
*
* try {
* accessSync('etc/passwd', constants.R_OK | constants.W_OK);
* console.log('can read/write');
* } catch (err) {
* console.error('no access!');
* }
* ```
* @since v0.11.15
* @param [mode=fs.constants.F_OK]
*/
export function accessSync(path: PathLike, mode?: number): void;
interface StreamOptions {
flags?: string | undefined;
encoding?: BufferEncoding | undefined;
fd?: number | promises.FileHandle | undefined;
mode?: number | undefined;
autoClose?: boolean | undefined;
/**
* @default false
*/
emitClose?: boolean | undefined;
start?: number | undefined;
highWaterMark?: number | undefined;
}
interface ReadStreamOptions extends StreamOptions {
end?: number | undefined;
}
/**
* Unlike the 16 kb default `highWaterMark` for a `stream.Readable`, the stream
* returned by this method has a default `highWaterMark` of 64 kb.
*
* `options` can include `start` and `end` values to read a range of bytes from
* the file instead of the entire file. Both `start` and `end` are inclusive and
* start counting at 0, allowed values are in the
* \[0, [`Number.MAX_SAFE_INTEGER`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER)\] range. If `fd` is specified and `start` is
* omitted or `undefined`, `fs.createReadStream()` reads sequentially from the
* current file position. The `encoding` can be any one of those accepted by `Buffer`.
*
* If `fd` is specified, `ReadStream` will ignore the `path` argument and will use
* the specified file descriptor. This means that no `'open'` event will be
* emitted. `fd` should be blocking; non-blocking `fd`s should be passed to `net.Socket`.
*
* If `fd` points to a character device that only supports blocking reads
* (such as keyboard or sound card), read operations do not finish until data is
* available. This can prevent the process from exiting and the stream from
* closing naturally.
*
* By default, the stream will emit a `'close'` event after it has been
* destroyed. Set the `emitClose` option to `false` to change this behavior.
*
* By providing the `fs` option, it is possible to override the corresponding `fs`implementations for `open`, `read`, and `close`. When providing the `fs` option,
* an override for `read` is required. If no `fd` is provided, an override for`open` is also required. If `autoClose` is `true`, an override for `close` is
* also required.
*
* ```js
* import { createReadStream } from 'fs';
*
* // Create a stream from some character device.
* const stream = createReadStream('/dev/input/event0');
* setTimeout(() => {
* stream.close(); // This may not close the stream.
* // Artificially marking end-of-stream, as if the underlying resource had
* // indicated end-of-file by itself, allows the stream to close.
* // This does not cancel pending read operations, and if there is such an
* // operation, the process may still not be able to exit successfully
* // until it finishes.
* stream.push(null);
* stream.read(0);
* }, 100);
* ```
*
* If `autoClose` is false, then the file descriptor won't be closed, even if
* there's an error. It is the application's responsibility to close it and make
* sure there's no file descriptor leak. If `autoClose` is set to true (default
* behavior), on `'error'` or `'end'` the file descriptor will be closed
* automatically.
*
* `mode` sets the file mode (permission and sticky bits), but only if the
* file was created.
*
* An example to read the last 10 bytes of a file which is 100 bytes long:
*
* ```js
* import { createReadStream } from 'fs';
*
* createReadStream('sample.txt', { start: 90, end: 99 });
* ```
*
* If `options` is a string, then it specifies the encoding.
* @since v0.1.31
*/
export function createReadStream(path: PathLike, options?: BufferEncoding | ReadStreamOptions): ReadStream;
/**
* `options` may also include a `start` option to allow writing data at some
* position past the beginning of the file, allowed values are in the
* \[0, [`Number.MAX_SAFE_INTEGER`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER)\] range. Modifying a file rather than
* replacing it may require the `flags` option to be set to `r+` rather than the
* default `w`. The `encoding` can be any one of those accepted by `Buffer`.
*
* If `autoClose` is set to true (default behavior) on `'error'` or `'finish'`the file descriptor will be closed automatically. If `autoClose` is false,
* then the file descriptor won't be closed, even if there's an error.
* It is the application's responsibility to close it and make sure there's no
* file descriptor leak.
*
* By default, the stream will emit a `'close'` event after it has been
* destroyed. Set the `emitClose` option to `false` to change this behavior.
*
* By providing the `fs` option it is possible to override the corresponding `fs`implementations for `open`, `write`, `writev` and `close`. Overriding `write()`without `writev()` can reduce
* performance as some optimizations (`_writev()`)
* will be disabled. When providing the `fs` option, overrides for at least one of`write` and `writev` are required. If no `fd` option is supplied, an override
* for `open` is also required. If `autoClose` is `true`, an override for `close`is also required.
*
* Like `fs.ReadStream`, if `fd` is specified, `fs.WriteStream` will ignore the`path` argument and will use the specified file descriptor. This means that no`'open'` event will be
* emitted. `fd` should be blocking; non-blocking `fd`s
* should be passed to `net.Socket`.
*
* If `options` is a string, then it specifies the encoding.
* @since v0.1.31
*/
export function createWriteStream(path: PathLike, options?: BufferEncoding | StreamOptions): WriteStream;
/**
* Forces all currently queued I/O operations associated with the file to the
* operating system's synchronized I/O completion state. Refer to the POSIX [`fdatasync(2)`](http://man7.org/linux/man-pages/man2/fdatasync.2.html) documentation for details. No arguments other
* than a possible
* exception are given to the completion callback.
* @since v0.1.96
*/
export function fdatasync(fd: number, callback: NoParamCallback): void;
export namespace fdatasync {
/**
* Asynchronous fdatasync(2) - synchronize a file's in-core state with storage device.
* @param fd A file descriptor.
*/
function __promisify__(fd: number): Promise<void>;
}
/**
* Forces all currently queued I/O operations associated with the file to the
* operating system's synchronized I/O completion state. Refer to the POSIX [`fdatasync(2)`](http://man7.org/linux/man-pages/man2/fdatasync.2.html) documentation for details. Returns `undefined`.
* @since v0.1.96
*/
export function fdatasyncSync(fd: number): void;
/**
* Asynchronously copies `src` to `dest`. By default, `dest` is overwritten if it
* already exists. No arguments other than a possible exception are given to the
* callback function. Node.js makes no guarantees about the atomicity of the copy
* operation. If an error occurs after the destination file has been opened for
* writing, Node.js will attempt to remove the destination.
*
* `mode` is an optional integer that specifies the behavior
* of the copy operation. It is possible to create a mask consisting of the bitwise
* OR of two or more values (e.g.`fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`).
*
* * `fs.constants.COPYFILE_EXCL`: The copy operation will fail if `dest` already
* exists.
* * `fs.constants.COPYFILE_FICLONE`: The copy operation will attempt to create a
* copy-on-write reflink. If the platform does not support copy-on-write, then a
* fallback copy mechanism is used.
* * `fs.constants.COPYFILE_FICLONE_FORCE`: The copy operation will attempt to
* create a copy-on-write reflink. If the platform does not support
* copy-on-write, then the operation will fail.
*
* ```js
* import { copyFile, constants } from 'fs';
*
* function callback(err) {
* if (err) throw err;
* console.log('source.txt was copied to destination.txt');
* }
*
* // destination.txt will be created or overwritten by default.
* copyFile('source.txt', 'destination.txt', callback);
*
* // By using COPYFILE_EXCL, the operation will fail if destination.txt exists.
* copyFile('source.txt', 'destination.txt', constants.COPYFILE_EXCL, callback);
* ```
* @since v8.5.0
* @param src source filename to copy
* @param dest destination filename of the copy operation
* @param [mode=0] modifiers for copy operation.
*/
export function copyFile(src: PathLike, dest: PathLike, callback: NoParamCallback): void;
export function copyFile(src: PathLike, dest: PathLike, mode: number, callback: NoParamCallback): void;
export namespace copyFile {
function __promisify__(src: PathLike, dst: PathLike, mode?: number): Promise<void>;
}
/**
* Synchronously copies `src` to `dest`. By default, `dest` is overwritten if it
* already exists. Returns `undefined`. Node.js makes no guarantees about the
* atomicity of the copy operation. If an error occurs after the destination file
* has been opened for writing, Node.js will attempt to remove the destination.
*
* `mode` is an optional integer that specifies the behavior
* of the copy operation. It is possible to create a mask consisting of the bitwise
* OR of two or more values (e.g.`fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`).
*
* * `fs.constants.COPYFILE_EXCL`: The copy operation will fail if `dest` already
* exists.
* * `fs.constants.COPYFILE_FICLONE`: The copy operation will attempt to create a
* copy-on-write reflink. If the platform does not support copy-on-write, then a
* fallback copy mechanism is used.
* * `fs.constants.COPYFILE_FICLONE_FORCE`: The copy operation will attempt to
* create a copy-on-write reflink. If the platform does not support
* copy-on-write, then the operation will fail.
*
* ```js
* import { copyFileSync, constants } from 'fs';
*
* // destination.txt will be created or overwritten by default.
* copyFileSync('source.txt', 'destination.txt');
* console.log('source.txt was copied to destination.txt');
*
* // By using COPYFILE_EXCL, the operation will fail if destination.txt exists.
* copyFileSync('source.txt', 'destination.txt', constants.COPYFILE_EXCL);
* ```
* @since v8.5.0
* @param src source filename to copy
* @param dest destination filename of the copy operation
* @param [mode=0] modifiers for copy operation.
*/
export function copyFileSync(src: PathLike, dest: PathLike, mode?: number): void;
/**
* Write an array of `ArrayBufferView`s to the file specified by `fd` using`writev()`.
*
* `position` is the offset from the beginning of the file where this data
* should be written. If `typeof position !== 'number'`, the data will be written
* at the current position.
*
* The callback will be given three arguments: `err`, `bytesWritten`, and`buffers`. `bytesWritten` is how many bytes were written from `buffers`.
*
* If this method is `util.promisify()` ed, it returns a promise for an`Object` with `bytesWritten` and `buffers` properties.
*
* It is unsafe to use `fs.writev()` multiple times on the same file without
* waiting for the callback. For this scenario, use {@link createWriteStream}.
*
* On Linux, positional writes don't work when the file is opened in append mode.
* The kernel ignores the position argument and always appends the data to
* the end of the file.
* @since v12.9.0
*/
export function writev(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, cb: (err: NodeJS.ErrnoException | null, bytesWritten: number, buffers: NodeJS.ArrayBufferView[]) => void): void;
export function writev(
fd: number,
buffers: ReadonlyArray<NodeJS.ArrayBufferView>,
position: number,
cb: (err: NodeJS.ErrnoException | null, bytesWritten: number, buffers: NodeJS.ArrayBufferView[]) => void
): void;
export interface WriteVResult {
bytesWritten: number;
buffers: NodeJS.ArrayBufferView[];
}
export namespace writev {
function __promisify__(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, position?: number): Promise<WriteVResult>;
}
/**
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link writev}.
* @since v12.9.0
* @return The number of bytes written.
*/
export function writevSync(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, position?: number): number;
/**
* Read from a file specified by `fd` and write to an array of `ArrayBufferView`s
* using `readv()`.
*
* `position` is the offset from the beginning of the file from where data
* should be read. If `typeof position !== 'number'`, the data will be read
* from the current position.
*
* The callback will be given three arguments: `err`, `bytesRead`, and`buffers`. `bytesRead` is how many bytes were read from the file.
*
* If this method is invoked as its `util.promisify()` ed version, it returns
* a promise for an `Object` with `bytesRead` and `buffers` properties.
* @since v13.13.0, v12.17.0
*/
export function readv(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, cb: (err: NodeJS.ErrnoException | null, bytesRead: number, buffers: NodeJS.ArrayBufferView[]) => void): void;
export function readv(
fd: number,
buffers: ReadonlyArray<NodeJS.ArrayBufferView>,
position: number,
cb: (err: NodeJS.ErrnoException | null, bytesRead: number, buffers: NodeJS.ArrayBufferView[]) => void
): void;
export interface ReadVResult {
bytesRead: number;
buffers: NodeJS.ArrayBufferView[];
}
export namespace readv {
function __promisify__(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, position?: number): Promise<ReadVResult>;
}
/**
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link readv}.
* @since v13.13.0, v12.17.0
* @return The number of bytes read.
*/
export function readvSync(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, position?: number): number;
export interface OpenDirOptions {
encoding?: BufferEncoding | undefined;
/**
* Number of directory entries that are buffered
* internally when reading from the directory. Higher values lead to better
* performance but higher memory usage.
* @default 32
*/
bufferSize?: number | undefined;
}
/**
* Synchronously open a directory. See [`opendir(3)`](http://man7.org/linux/man-pages/man3/opendir.3.html).
*
* Creates an `fs.Dir`, which contains all further functions for reading from
* and cleaning up the directory.
*
* The `encoding` option sets the encoding for the `path` while opening the
* directory and subsequent read operations.
* @since v12.12.0
*/
export function opendirSync(path: PathLike, options?: OpenDirOptions): Dir;
/**
* Asynchronously open a directory. See the POSIX [`opendir(3)`](http://man7.org/linux/man-pages/man3/opendir.3.html) documentation for
* more details.
*
* Creates an `fs.Dir`, which contains all further functions for reading from
* and cleaning up the directory.
*
* The `encoding` option sets the encoding for the `path` while opening the
* directory and subsequent read operations.
* @since v12.12.0
*/
export function opendir(path: PathLike, cb: (err: NodeJS.ErrnoException | null, dir: Dir) => void): void;
export function opendir(path: PathLike, options: OpenDirOptions, cb: (err: NodeJS.ErrnoException | null, dir: Dir) => void): void;
export namespace opendir {
function __promisify__(path: PathLike, options?: OpenDirOptions): Promise<Dir>;
}
export interface BigIntStats extends StatsBase<bigint> {
atimeNs: bigint;
mtimeNs: bigint;
ctimeNs: bigint;
birthtimeNs: bigint;
}
export interface BigIntOptions {
bigint: true;
}
export interface StatOptions {
bigint?: boolean | undefined;
}
export interface StatSyncOptions extends StatOptions {
throwIfNoEntry?: boolean | undefined;
}
interface CopyOptionsBase {
/**
* Dereference symlinks
* @default false
*/
dereference?: boolean;
/**
* When `force` is `false`, and the destination
* exists, throw an error.
* @default false
*/
errorOnExist?: boolean;
/**
* Overwrite existing file or directory. _The copy
* operation will ignore errors if you set this to false and the destination
* exists. Use the `errorOnExist` option to change this behavior.
* @default true
*/
force?: boolean;
/**
* Modifiers for copy operation. See `mode` flag of {@link copyFileSync()}
*/
mode?: number;
/**
* When `true` timestamps from `src` will
* be preserved.
* @default false
*/
preserveTimestamps?: boolean;
/**
* Copy directories recursively.
* @default false
*/
recursive?: boolean;
/**
* When true, path resolution for symlinks will be skipped
* @default false
*/
verbatimSymlinks?: boolean;
}
export interface CopyOptions extends CopyOptionsBase {
/**
* Function to filter copied files/directories. Return
* `true` to copy the item, `false` to ignore it.
*/
filter?(source: string, destination: string): boolean | Promise<boolean>;
}
export interface CopySyncOptions extends CopyOptionsBase {
/**
* Function to filter copied files/directories. Return
* `true` to copy the item, `false` to ignore it.
*/
filter?(source: string, destination: string): boolean;
}
/**
* Asynchronously copies the entire directory structure from `src` to `dest`,
* including subdirectories and files.
*
* When copying a directory to another directory, globs are not supported and
* behavior is similar to `cp dir1/ dir2/`.
* @since v16.7.0
* @experimental
* @param src source path to copy.
* @param dest destination path to copy to.
*/
export function cp(source: string | URL, destination: string | URL, callback: (err: NodeJS.ErrnoException | null) => void): void;
export function cp(source: string | URL, destination: string | URL, opts: CopyOptions, callback: (err: NodeJS.ErrnoException | null) => void): void;
/**
* Synchronously copies the entire directory structure from `src` to `dest`,
* including subdirectories and files.
*
* When copying a directory to another directory, globs are not supported and
* behavior is similar to `cp dir1/ dir2/`.
* @since v16.7.0
* @experimental
* @param src source path to copy.
* @param dest destination path to copy to.
*/
export function cpSync(source: string | URL, destination: string | URL, opts?: CopySyncOptions): void;
}
declare module 'node:fs' {
export * from 'fs';
} | PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/docs/releasenotes/0.7.7.rst | ===========================
Djblets 0.7.7 Release Notes
===========================
**Release date**: December 16, 2012
djblets.datagrid
================
* Fixed a possible XSS exploit in datagrids.
Patch by Alexander Artemenko.
* Failures during rendering the datagrid now results in a traceback.
djblets.extensions
==================
* Database evolutions are no longer applying using
``evolve --hint --execute``. This fixes unintentional database
changes elsewhere.
JavaScript
==========
* The second display of an ``inlineEditor`` no longer breaks the size
of the editor.
Contributors
============
* Alexander Artemenko
* Christian Hammond
* David Trowbridge
| PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/tiny_mce/plugins/media/editor_plugin.js | (function(){var d=tinymce.explode("id,name,width,height,style,align,class,hspace,vspace,bgcolor,type"),h=tinymce.makeMap(d.join(",")),b=tinymce.html.Node,f,a,g=tinymce.util.JSON,e;f=[["Flash","d27cdb6e-ae6d-11cf-96b8-444553540000","application/x-shockwave-flash","http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=6,0,40,0"],["ShockWave","166b1bca-3f9c-11cf-8075-444553540000","application/x-director","http://download.macromedia.com/pub/shockwave/cabs/director/sw.cab#version=8,5,1,0"],["WindowsMedia","6bf52a52-394a-11d3-b153-00c04f79faa6,22d6f312-b0f6-11d0-94ab-0080c74c7e95,05589fa1-c356-11ce-bf01-00aa0055595a","application/x-mplayer2","http://activex.microsoft.com/activex/controls/mplayer/en/nsmp2inf.cab#Version=5,1,52,701"],["QuickTime","02bf25d5-8c17-4b23-bc80-d3488abddc6b","video/quicktime","http://www.apple.com/qtactivex/qtplugin.cab#version=6,0,2,0"],["RealMedia","cfcdaa03-8be4-11cf-b84b-0020afbbccfa","audio/x-pn-realaudio-plugin","http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=6,0,40,0"],["Java","8ad9c840-044e-11d1-b3e9-00805f499d93","application/x-java-applet","http://java.sun.com/products/plugin/autodl/jinstall-1_5_0-windows-i586.cab#Version=1,5,0,0"],["Silverlight","dfeaf541-f3e1-4c24-acac-99c30715084a","application/x-silverlight-2"],["Iframe"],["Video"],["EmbeddedAudio"],["Audio"]];function c(m){var l,j,k;if(m&&!m.splice){j=[];for(k=0;true;k++){if(m[k]){j[k]=m[k]}else{break}}return j}return m}tinymce.create("tinymce.plugins.MediaPlugin",{init:function(n,j){var r=this,l={},m,p,q,k;function o(i){return i&&i.nodeName==="IMG"&&n.dom.hasClass(i,"mceItemMedia")}r.editor=n;r.url=j;a="";for(m=0;m<f.length;m++){k=f[m][0];q={name:k,clsids:tinymce.explode(f[m][1]||""),mimes:tinymce.explode(f[m][2]||""),codebase:f[m][3]};for(p=0;p<q.clsids.length;p++){l["clsid:"+q.clsids[p]]=q}for(p=0;p<q.mimes.length;p++){l[q.mimes[p]]=q}l["mceItem"+k]=q;l[k.toLowerCase()]=q;a+=(a?"|":"")+k}tinymce.each(n.getParam("media_types","video=mp4,m4v,ogv,webm;silverlight=xap;flash=swf,flv;shockwave=dcr;quicktime=mov,qt,mpg,mpeg;shockwave=dcr;windowsmedia=avi,wmv,wm,asf,asx,wmx,wvx;realmedia=rm,ra,ram;java=jar;audio=mp3,ogg").split(";"),function(v){var s,u,t;v=v.split(/=/);u=tinymce.explode(v[1].toLowerCase());for(s=0;s<u.length;s++){t=l[v[0].toLowerCase()];if(t){l[u[s]]=t}}});a=new RegExp("write("+a+")\\(([^)]+)\\)");r.lookup=l;n.onPreInit.add(function(){n.schema.addValidElements("object[id|style|width|height|classid|codebase|*],param[name|value],embed[id|style|width|height|type|src|*],video[*],audio[*],source[*]");n.parser.addNodeFilter("object,embed,video,audio,script,iframe",function(s){var t=s.length;while(t--){r.objectToImg(s[t])}});n.serializer.addNodeFilter("img",function(s,u,t){var v=s.length,w;while(v--){w=s[v];if((w.attr("class")||"").indexOf("mceItemMedia")!==-1){r.imgToObject(w,t)}}})});n.onInit.add(function(){if(n.theme&&n.theme.onResolveName){n.theme.onResolveName.add(function(i,s){if(s.name==="img"&&n.dom.hasClass(s.node,"mceItemMedia")){s.name="media"}})}if(n&&n.plugins.contextmenu){n.plugins.contextmenu.onContextMenu.add(function(s,t,i){if(i.nodeName==="IMG"&&i.className.indexOf("mceItemMedia")!==-1){t.add({title:"media.edit",icon:"media",cmd:"mceMedia"})}})}});n.addCommand("mceMedia",function(){var s,i;i=n.selection.getNode();if(o(i)){s=n.dom.getAttrib(i,"data-mce-json");if(s){s=g.parse(s);tinymce.each(d,function(t){var u=n.dom.getAttrib(i,t);if(u){s[t]=u}});s.type=r.getType(i.className).name.toLowerCase()}}if(!s){s={type:"flash",video:{sources:[]},params:{}}}n.windowManager.open({file:j+"/media.htm",width:430+parseInt(n.getLang("media.delta_width",0)),height:500+parseInt(n.getLang("media.delta_height",0)),inline:1},{plugin_url:j,data:s})});n.addButton("media",{title:"media.desc",cmd:"mceMedia"});n.onNodeChange.add(function(s,i,t){i.setActive("media",o(t))})},convertUrl:function(k,n){var j=this,m=j.editor,l=m.settings,o=l.url_converter,i=l.url_converter_scope||j;if(!k){return k}if(n){return m.documentBaseURI.toAbsolute(k)}return o.call(i,k,"src","object")},getInfo:function(){return{longname:"Media",author:"Moxiecode Systems AB",authorurl:"http://tinymce.moxiecode.com",infourl:"http://wiki.moxiecode.com/index.php/TinyMCE:Plugins/media",version:tinymce.majorVersion+"."+tinymce.minorVersion}},dataToImg:function(m,k){var r=this,o=r.editor,p=o.documentBaseURI,j,q,n,l;m.params.src=r.convertUrl(m.params.src,k);q=m.video.attrs;if(q){q.src=r.convertUrl(q.src,k)}if(q){q.poster=r.convertUrl(q.poster,k)}j=c(m.video.sources);if(j){for(l=0;l<j.length;l++){j[l].src=r.convertUrl(j[l].src,k)}}n=r.editor.dom.create("img",{id:m.id,style:m.style,align:m.align,hspace:m.hspace,vspace:m.vspace,src:r.editor.theme.url+"/img/trans.gif","class":"mceItemMedia mceItem"+r.getType(m.type).name,"data-mce-json":g.serialize(m,"'")});n.width=m.width||(m.type=="audio"?"300":"320");n.height=m.height||(m.type=="audio"?"32":"240");return n},dataToHtml:function(i,j){return this.editor.serializer.serialize(this.dataToImg(i,j),{forced_root_block:"",force_absolute:j})},htmlToData:function(k){var j,i,l;l={type:"flash",video:{sources:[]},params:{}};j=this.editor.parser.parse(k);i=j.getAll("img")[0];if(i){l=g.parse(i.attr("data-mce-json"));l.type=this.getType(i.attr("class")).name.toLowerCase();tinymce.each(d,function(m){var n=i.attr(m);if(n){l[m]=n}})}return l},getType:function(m){var k,j,l;j=tinymce.explode(m," ");for(k=0;k<j.length;k++){l=this.lookup[j[k]];if(l){return l}}},imgToObject:function(z,o){var u=this,p=u.editor,C,H,j,t,I,y,G,w,k,E,s,q,A,D,m,x,l,B,F;function r(i,n){var M,L,N,K,J;J=p.getParam("flash_video_player_url",u.convertUrl(u.url+"/moxieplayer.swf"));if(J){M=p.documentBaseURI;G.params.src=J;if(p.getParam("flash_video_player_absvideourl",true)){i=M.toAbsolute(i||"",true);n=M.toAbsolute(n||"",true)}N="";L=p.getParam("flash_video_player_flashvars",{url:"$url",poster:"$poster"});tinymce.each(L,function(P,O){P=P.replace(/\$url/,i||"");P=P.replace(/\$poster/,n||"");if(P.length>0){N+=(N?"&":"")+O+"="+escape(P)}});if(N.length){G.params.flashvars=N}K=p.getParam("flash_video_player_params",{allowfullscreen:true,allowscriptaccess:true});tinymce.each(K,function(P,O){G.params[O]=""+P})}}G=z.attr("data-mce-json");if(!G){return}G=g.parse(G);q=this.getType(z.attr("class"));B=z.attr("data-mce-style");if(!B){B=z.attr("style");if(B){B=p.dom.serializeStyle(p.dom.parseStyle(B,"img"))}}if(q.name==="Iframe"){x=new b("iframe",1);tinymce.each(d,function(i){var n=z.attr(i);if(i=="class"&&n){n=n.replace(/mceItem.+ ?/g,"")}if(n&&n.length>0){x.attr(i,n)}});for(I in G.params){x.attr(I,G.params[I])}x.attr({style:B,src:G.params.src});z.replace(x);return}if(this.editor.settings.media_use_script){x=new b("script",1).attr("type","text/javascript");y=new b("#text",3);y.value="write"+q.name+"("+g.serialize(tinymce.extend(G.params,{width:z.attr("width"),height:z.attr("height")}))+");";x.append(y);z.replace(x);return}if(q.name==="Video"&&G.video.sources[0]){C=new b("video",1).attr(tinymce.extend({id:z.attr("id"),width:z.attr("width"),height:z.attr("height"),style:B},G.video.attrs));if(G.video.attrs){l=G.video.attrs.poster}k=G.video.sources=c(G.video.sources);for(A=0;A<k.length;A++){if(/\.mp4$/.test(k[A].src)){m=k[A].src}}if(!k[0].type){C.attr("src",k[0].src);k.splice(0,1)}for(A=0;A<k.length;A++){w=new b("source",1).attr(k[A]);w.shortEnded=true;C.append(w)}if(m){r(m,l);q=u.getType("flash")}else{G.params.src=""}}if(q.name==="Audio"&&G.video.sources[0]){F=new b("audio",1).attr(tinymce.extend({id:z.attr("id"),width:z.attr("width"),height:z.attr("height"),style:B},G.video.attrs));if(G.video.attrs){l=G.video.attrs.poster}k=G.video.sources=c(G.video.sources);if(!k[0].type){F.attr("src",k[0].src);k.splice(0,1)}for(A=0;A<k.length;A++){w=new b("source",1).attr(k[A]);w.shortEnded=true;F.append(w)}G.params.src=""}if(q.name==="EmbeddedAudio"){j=new b("embed",1);j.shortEnded=true;j.attr({id:z.attr("id"),width:z.attr("width"),height:z.attr("height"),style:B,type:z.attr("type")});for(I in G.params){j.attr(I,G.params[I])}tinymce.each(d,function(i){if(G[i]&&i!="type"){j.attr(i,G[i])}});G.params.src=""}if(G.params.src){if(/\.flv$/i.test(G.params.src)){r(G.params.src,"")}if(o&&o.force_absolute){G.params.src=p.documentBaseURI.toAbsolute(G.params.src)}H=new b("object",1).attr({id:z.attr("id"),width:z.attr("width"),height:z.attr("height"),style:B});tinymce.each(d,function(i){var n=G[i];if(i=="class"&&n){n=n.replace(/mceItem.+ ?/g,"")}if(n&&i!="type"){H.attr(i,n)}});for(I in G.params){s=new b("param",1);s.shortEnded=true;y=G.params[I];if(I==="src"&&q.name==="WindowsMedia"){I="url"}s.attr({name:I,value:y});H.append(s)}if(this.editor.getParam("media_strict",true)){H.attr({data:G.params.src,type:q.mimes[0]})}else{H.attr({classid:"clsid:"+q.clsids[0],codebase:q.codebase});j=new b("embed",1);j.shortEnded=true;j.attr({id:z.attr("id"),width:z.attr("width"),height:z.attr("height"),style:B,type:q.mimes[0]});for(I in G.params){j.attr(I,G.params[I])}tinymce.each(d,function(i){if(G[i]&&i!="type"){j.attr(i,G[i])}});H.append(j)}if(G.object_html){y=new b("#text",3);y.raw=true;y.value=G.object_html;H.append(y)}if(C){C.append(H)}}if(C){if(G.video_html){y=new b("#text",3);y.raw=true;y.value=G.video_html;C.append(y)}}if(F){if(G.video_html){y=new b("#text",3);y.raw=true;y.value=G.video_html;F.append(y)}}var v=C||F||H||j;if(v){z.replace(v)}else{z.remove()}},objectToImg:function(C){var L,k,F,s,M,N,y,A,x,G,E,t,q,I,B,l,K,o,H=this.lookup,m,z,v=this.editor.settings.url_converter,n=this.editor.settings.url_converter_scope,w,r,D,j;function u(i){return new tinymce.html.Serializer({inner:true,validate:false}).serialize(i)}function J(O,i){return H[(O.attr(i)||"").toLowerCase()]}function p(O){var i=O.replace(/^.*\.([^.]+)$/,"$1");return H[i.toLowerCase()||""]}if(!C.parent){return}if(C.name==="script"){if(C.firstChild){m=a.exec(C.firstChild.value)}if(!m){return}o=m[1];K={video:{},params:g.parse(m[2])};A=K.params.width;x=K.params.height}K=K||{video:{},params:{}};M=new b("img",1);M.attr({src:this.editor.theme.url+"/img/trans.gif"});N=C.name;if(N==="video"||N=="audio"){F=C;L=C.getAll("object")[0];k=C.getAll("embed")[0];A=F.attr("width");x=F.attr("height");y=F.attr("id");K.video={attrs:{},sources:[]};z=K.video.attrs;for(N in F.attributes.map){z[N]=F.attributes.map[N]}B=C.attr("src");if(B){K.video.sources.push({src:v.call(n,B,"src",C.name)})}l=F.getAll("source");for(E=0;E<l.length;E++){B=l[E].remove();K.video.sources.push({src:v.call(n,B.attr("src"),"src","source"),type:B.attr("type"),media:B.attr("media")})}if(z.poster){z.poster=v.call(n,z.poster,"poster",C.name)}}if(C.name==="object"){L=C;k=C.getAll("embed")[0]}if(C.name==="embed"){k=C}if(C.name==="iframe"){s=C;o="Iframe"}if(L){A=A||L.attr("width");x=x||L.attr("height");G=G||L.attr("style");y=y||L.attr("id");w=w||L.attr("hspace");r=r||L.attr("vspace");D=D||L.attr("align");j=j||L.attr("bgcolor");K.name=L.attr("name");I=L.getAll("param");for(E=0;E<I.length;E++){q=I[E];N=q.remove().attr("name");if(!h[N]){K.params[N]=q.attr("value")}}K.params.src=K.params.src||L.attr("data")}if(k){A=A||k.attr("width");x=x||k.attr("height");G=G||k.attr("style");y=y||k.attr("id");w=w||k.attr("hspace");r=r||k.attr("vspace");D=D||k.attr("align");j=j||k.attr("bgcolor");for(N in k.attributes.map){if(!h[N]&&!K.params[N]){K.params[N]=k.attributes.map[N]}}}if(s){A=s.attr("width");x=s.attr("height");G=G||s.attr("style");y=s.attr("id");w=s.attr("hspace");r=s.attr("vspace");D=s.attr("align");j=s.attr("bgcolor");tinymce.each(d,function(i){M.attr(i,s.attr(i))});for(N in s.attributes.map){if(!h[N]&&!K.params[N]){K.params[N]=s.attributes.map[N]}}}if(K.params.movie){K.params.src=K.params.src||K.params.movie;delete K.params.movie}if(K.params.src){K.params.src=v.call(n,K.params.src,"src","object")}if(F){if(C.name==="video"){o=H.video.name}else{if(C.name==="audio"){o=H.audio.name}}}if(L&&!o){o=(J(L,"clsid")||J(L,"classid")||J(L,"type")||{}).name}if(k&&!o){o=(J(k,"type")||p(K.params.src)||{}).name}if(k&&o=="EmbeddedAudio"){K.params.type=k.attr("type")}C.replace(M);if(k){k.remove()}if(L){t=u(L.remove());if(t){K.object_html=t}}if(F){t=u(F.remove());if(t){K.video_html=t}}K.hspace=w;K.vspace=r;K.align=D;K.bgcolor=j;M.attr({id:y,"class":"mceItemMedia mceItem"+(o||"Flash"),style:G,width:A||(C.name=="audio"?"300":"320"),height:x||(C.name=="audio"?"32":"240"),hspace:w,vspace:r,align:D,bgcolor:j,"data-mce-json":g.serialize(K,"'")})}});tinymce.PluginManager.add("media",tinymce.plugins.MediaPlugin)})(); | PypiClean |
/MTfit-1.0.6a5.tar.gz/MTfit-1.0.6a5/docs/source/mtplot.rst | *********************************
Plotting the Moment Tensor
*********************************
:mod:`MTfit` has a plotting submodule that can be used to represent the source. There are several different plot types, shown below, and MTplot can be used both from the command line, and from within the python interpreter.
This section describes how to use the plotting tools and shows the different plot types:
* :ref:`Beachball<beachball>`
* :ref:`Fault plane<faultplane>`
* :ref:`Riedesel-Jordan<rjplot>`
* :ref:`Radiation<radiation>`
* :ref:`Lune<lune>`
* :ref:`Hudson<hudson>`
These are plotted using `matplotlib <http://matplotlib.org/>`_, using a class based system. The main plotting class is the :class:`MTplot` class, which stores the figure and handles the plotting, and each axes plotted is shown using a plot class from :mod:`MTfit.plot.plot_classes`. The plotting methods are designed to enable easy plotting without much user input, but also allow more complex plots to be made.
The :ref:`examples section <mtplot-examples>` shows two examples of using the plotting submodule.
The source code is shown in :doc:`source-plot_classes`.
.. warning::
:mod:`matplotlib` does not plot 3d plots very well, as each object is converted to a 2d object and plotted, and given a constant zorder for the whole plot. Consequently, the bi-axes plot (:ref:`Chapman and Leaney, 2011 <Chapman-2011>`) is not included as an option, and other 3D plots may not always work correctly.
.. only:: not latex
For the full plot_classes documentation, see :doc:`plot_classes`.
Using MTplot from the command line
======================================
:mod:`MTplot` can be run from the command line. A script should have been installed onto the path during installation and should be callable as::
$ MTplot
However it may be necessary to install the script manually. This is platform dependent.
Script Installation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Linux
-------------------------------
Add this python script to a directory on the $PATH environmental variable::
#!/usr/bin/env python
import MTfit
MTfit.plot.__run__()
and make sure it is executable.
Windows
--------------------------------
Add the linux script (above) to the path or if using powershell edit the powershell profile (usually found in *Documents/WindowsPowerShell/* - if not present use ``$PROFILE|Format-List -Force`` to locate it, it may be necessary to create the profile) and add::
function MTplot{
$script={
python -c "import MTfit;MTfit.plot.__run__()" $args
}
Invoke-Command -ScriptBlock $script -ArgumentList $args
}
Windows Powershell does seem to have some errors with commandline arguments, if necessary these should be enclosed in quotation marks e.g. "-d=datafile.inv"
Command Line Options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are several command line options available, these can be found by calling::
$ MTplot -h
The command line defaults can be set using the same defaults file as for MTfit (see :doc:`run`).
Using MTplot from the Python interpreter
==========================================
Although MTplot can be run from the command line, it is much more powerful to run it from within the python interpreter.
To run MTplot from the python interpreter, create the :class:`~MTfit.plot.plot_classes.MTplot` object::
>>> from MTfit.plot import MTplot
>>> MTplot(MTs,plot_type='beachball',stations={},plot=True,*args,**kwargs)
See :ref:`Making the MTplot class <mtplotclass>` for more information on the :class:`~MTfit.plot.plot_classes.MTplot` object.
.. _input-data-label:
Input Data
==================================
:func:`MTfit.plot.__core__.read` can read the three default output formats (MATLAB, hyp and pickle) for MTfit results.
Additional parsers can be installed using the `MTfit.plot_read` entry point described in :doc:`extensions`.
.. _mtplotclass:
MTplot Class
========================
The MTplot class is used to handle plotting the moment tensors. The moment tensors are stored as an :ref:`MTData<mtdataclass>` class.
.. autoclass:: MTfit.plot.plot_classes.MTplot
:members:
-------------------
.. _mtdataclass:
MTData Class
========================
The MTData class is used for storing and converting the moment tensors for plotting.
.. autoclass:: MTfit.plot.plot_classes.MTData
:members:
.. _beachball:
Beachball plot
==========================
The simplest plot is a beachball plot using the :class:`MTfit.plot.plot_classes._AmplitudePlot` class.
Using the MTplot function, it can be made with the following commands::
>>> import MTfit
>>> import numpy as np
>>> MTfit.plot.MTplot(np.array([[1],[0],[-1],[0],[0],[0]]),'beachball',
fault_plane=True)
This plots the equal area projection of the source (a double-couple).
Stations can be included as a dictionary, with the azimuths and takeoff angles in degrees, such as::
>>> stations={'names':['S01','S02','S03','S04'],
'azimuth':np.array([120.,5.,250.,75.]),
'takeoff_angle':np.array([30.,60.,45.,10.]),
'polarity':np.array([1,0,1,-1])}
>>> MTfit.plot.MTplot(np.array([[1],[0],[-1],[0],[0],[0]]),'beachball',
stations=stations,fault_plane=True)
If the polarity probabilities have been used in the inversion, the probabilities can be plotted on the receivers, by setting the stations ``polarity`` array as an array of the larger polarity probabilities, with negative polarity probabilities corresponding to polarities in the negative direction, e.g.::
>>> stations={'names':['S01','S02','S03','S04'],
'azimuth':np.array([120.,5.,250.,75.]),
'takeoff_angle':np.array([30.,60.,45.,10.]),
'polarity':np.array([0.8,0.5,0.7,-0.9])}
>>> MTfit.plot.MTplot(np.array([[1],[0],[-1],[0],[0],[0]]),'beachball',
stations=stations,fault_plane=True)
To tweak the plot further, the plot class can be used directly::
>>> import MTfit
>>> import numpy as np
>>> X=MTfit.plot.plot_classes._AmplitudePlot(False,False,
np.array([[1],[0],[-1],[0],[0],[0]]),'beachball',
stations=stations,fault_plane=True)
>>> X.plot()
The first two arguments correspond to the subplot_spec and matplotlib figure to be used - if these are False, then a new figure is created.
It uses the :class:`MTfit.plot.plot_classes._AmplitudePlot` class:
.. autoclass:: MTfit.plot.plot_classes._AmplitudePlot
:members: plot
.. _faultplane:
Fault Plane plot
==========================
A similar plot to the amplitude beachball plot is the fault plane plot, made using the :class:`MTfit.plot.plot_classes._FaultPlanePlot` class.
Using the MTplot function, it can be made with the following commands::
>>> import MTfit
>>> import numpy as np
>>> MTfit.plot.MTplot(np.array([[1],[0],[-1],[0],[0],[0]]),'faultplane',
fault_plane=True)
This plots the equal area projection of the source (a double-couple).
Stations can be included as a dictionary, like with the beachball plot.
The fault plane plot also can plot the solutions for multiple moment tensors, so the input array can be longer::
>>> import MTfit
>>> import numpy as np
>>> MTfit.plot.MTplot(np.array([[ 1,0.9, 1.1,0.4],
[ 0,0.1,-0.1,0.6],
[-1, -1, -1, -1],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0]]),
'faultplane',fault_plane=True)
There are additional initialisation arguments, such as ``show_max_likelihood`` and ``show_mean`` boolean flags, which shows the maximum likelihood fault planes in the color given by the default color argument, and the mean orientation in green.
Additionally, if the probability argument is set, the fault planes are coloured by the probability, with more likely planes darker.
It uses the :class:`MTfit.plot.plot_classes._FaultPlanePlot` class:
.. autoclass:: MTfit.plot.plot_classes._FaultPlanePlot
:members: plot
.. _rjplot:
Riedesel-Jordan plot
==========================
The Riedesel-Jordan plot is more complicated, and is described in :ref:`Riedesel and Jordan (1989)<Riedesel-1989>`. It plots the source type on the focal sphere, in a region described by the source eigenvectors.
Using the MTplot function, it can be made with the following commands::
>>> import MTfit
>>> import numpy as np
>>> MTfit.plot.MTplot(np.array([[1],[0],[-1],[0],[0],[0]]),'riedeseljordan')
This plots the equal area projection of the source (a double-couple).
Stations cannot be shown on this plot.
The Riedesel-Jordan plot cannot plot the solutions for multiple moment tensors, so the input array can only be one moment tensor.
It uses the :class:`MTfit.plot.plot_classes._RiedeselJordanPlot` class:
.. autoclass:: MTfit.plot.plot_classes._RiedeselJordanPlot
:members: plot
.. _radiation:
Radiation plot
==========================
The radiation plot shows the same pattern as the beachball plot, except the shape is scaled by the amplitude on the focal sphere.
Using the MTplot function, it can be made with the following commands::
>>> import MTfit
>>> import numpy as np
>>> MTfit.plot.MTplot(np.array([[1],[0],[-1],[0],[0],[0]]),'radiation')
This plots the equal area projection of the source (a double-couple).
Stations cannot be shown on this plot.
The radiation plot cannot plot the solutions for multiple moment tensors, so the input array can only be one moment tensor.
It uses the :class:`MTfit.plot.plot_classes._RadiationPlot` class:
.. autoclass:: MTfit.plot.plot_classes._RadiationPlot
:members: plot
.. _hudson:
Hudson plot
==========================
The Hudson plot is a source type plot, described in :ref:`Hudson et al. (1989)<Hudson-1989>`. It plots the source type in a quadrilateral, depending on the chosen projection. There are two projections, the tau-k plot and the u-v plot, with the latter being more common (and the default).
Using the MTplot function, it can be made with the following commands::
>>> import MTfit
>>> import numpy as np
>>> MTfit.plot.MTplot(np.array([[1],[0],[-1],[0],[0],[0]]),'hudson')
This plots the u-v plot of the source (a double-couple).
Stations cannot be shown on this plot.
The Hudson plot can plot the solutions for multiple moment tensors, so the input array can be longer. Additionally, it can also plot a histogram of the PDF, if the probability argument is set.
It uses the :class:`MTfit.plot.plot_classes._HudsonPlot` class:
.. autoclass:: MTfit.plot.plot_classes._HudsonPlot
:members: plot
.. _lune:
Lune plot
==========================
The Lune plot is a source type plot, described in :ref:`Tape and Tape (2012)<Tape-2012>`. It plots the source type in the fundamental eigenvalue lune, which can be projected into 2 dimensions.
Using the MTplot function, it can be made with the following commands::
>>> import MTfit
>>> import numpy as np
>>> MTfit.plot.MTplot(np.array([[1],[0],[-1],[0],[0],[0]]),'lune')
Stations cannot be shown on this plot.
The Lune plot can plot the solutions for multiple moment tensors, so the input array can be longer. Additionally, it can also plot a histogram of the PDF, if the probability argument is set.
It uses the :class:`MTfit.plot.plot_classes._LunePlot` class:
.. autoclass:: MTfit.plot.plot_classes._LunePlot
:members: plot
.. _mtplot-examples:
Examples
===============================
This section shows a pair of simple examples and their results.
The first example is to plot the data from :ref:`Krafla P Polarity example<real-p-polarity>`::
import MTfit
import numpy as np
#Load Data
st_dist=MTfit.plot.read('krafla_event_ppolarityDCStationDistribution.mat',
station_distribution=True)
DCs,DCstations=MTfit.plot.read('krafla_event_ppolarityDC.mat')
MTs,MTstations=MTfit.plot.read('krafla_event_ppolarityMT.mat')
#Plot
plot=MTfit.plot.MTplot([np.array([1,0,-1,0,0,0]),DCs,MTs],
stations=[DCstations,DCstations,MTstations],
station_distribution=[st_dist,False,False],
plot_type=['faultplane','faultplane','hudson'],fault_plane=[False,True,False],
show_mean=False,show_max=True,grid_lines=True,TNP=False,text=[False,False,True])
.. only:: not latex
This produces a :mod:`matplotlib` figure:
.. figure:: figures/krafla_event_mtplot_example.png
:figwidth: 60 %
:width: 90%
:align: center
:alt: Beachball plot showing the fault plane orientations for the double-couple constrained inversion and the marginalised source-type PDF for the full moment tensor inversion of the krafla data.
*Beachball plots showing the station location uncertainty, and the fault plane orientations for the double-couple constrained inversion and the marginalised source-type PDF for the faultplane moment tensor inversion of the krafla data using polarity probabilities.*
.. only:: latex
This produces a :mod:`matplotlib` figure, shown in Fig. :ref:`11.1<krafla-event-mtplot-example-fig>`.
.. _krafla-event-mtplot-example-fig:
.. figure:: figures/krafla_event_mtplot_example.png
:width: 100%
:align: center
:alt: Beachball plot showing the fault plane orientations for the double-couple constrained inversion and the marginalised source-type PDF for the full moment tensor inversion of the krafla data.
*Beachball plots showing the station location uncertainty, and the fault plane orientations for the double-couple constrained inversion and the marginalised source-type PDF for the faultplane moment tensor inversion of the krafla data using polarity probabilities.*
The second example shows the different plot types::
import MTfit
import numpy as np
import scipy.stats as sp
#Generate Data
n=100
DCs=MTfit.MTconvert.Tape_MT6(np.zeros(n),np.zeros(n),np.pi+0.1*np.random.randn(n),
0.5+0.01*np.random.randn(n),0.1*np.random.randn(n))
probDCs=np.random.rand(n)
n=10000
g=-np.pi/12+0.01*np.random.randn(n)
d=np.pi/3+0.1*np.random.randn(n)
MTs=MTfit.MTconvert.Tape_MT6(g,d,np.pi+0.1*np.random.randn(n),
0.5+0.01*np.random.randn(n),0.1*np.random.randn(n))
probMTs=sp.norm.pdf(g,-np.pi/12,0.01)*sp.norm.pdf(d,np.pi/3,0.1)
plot_sources=[np.array([1,0,1,-1,0,0]),DCs,MTs,MTs,np.array([1,0,1,-1,0,0])]
#Plot
plot=MTfit.plot.MTplot(plot_sources,
plot_type=['beachball','faultplane','hudson','lune','riedeseljordan'],
probability=[False,probDCs,probMTs,probMTs,False],
colormap=['bwr','bwr','viridis','viridis','bwr'],
stations=[{'names':['S01','S02','S03','S04'],
'azimuth':np.array([120.,45.,238.,341.]),
'takeoff_angle':np.array([12.,56.,37.,78.]),
'polarity':[1,0,-1,-1]},{},{},{},{}],
show_mean=True,show_max=True,grid_lines=True,TNP=False,fontsize=6,
station_markersize=2,markersize=2)
.. only:: not latex
This produces a :mod:`matplotlib` figure:
.. figure:: figures/mtplot_example.png
:figwidth: 60 %
:width: 90%
:align: center
:alt: MTplot examples showing an equal area projection of a beachball for an example moment tensor source, fault plane distribution showing the mean orientation in green, Hudson and lune type plots of a full moment tensor PDF, and a Riedesel-Jordan type plot of an example moment tensor source.
*MTplot examples showing an equal area projection of a beachball for an example moment tensor source, fault plane distribution showing the mean orientation in green, Hudson and lune type plots of a full moment tensor PDF, and a Riedesel-Jordan type plot of an example moment tensor source.*
.. only:: latex
This produces a :mod:`matplotlib` figure, shown in Fig. :ref:`11.2<mtplot-example-fig>`.
.. _mtplot-example-fig:
.. figure:: figures/mtplot_example.png
:width: 100%
:align: center
:alt: MTplot examples showing an equal area projection of a beachball for an example moment tensor source, fault plane distribution showing the mean orientation in green, Hudson and lune type plots of a full moment tensor PDF, and a Riedesel-Jordan type plot of an example moment tensor source.
*MTplot examples showing an equal area projection of a beachball for an example moment tensor source, fault plane distribution showing the mean orientation in green, Hudson and lune type plots of a full moment tensor PDF, and a Riedesel-Jordan type plot of an example moment tensor source.* | PypiClean |
/Fluence-0.1.7-py3-none-any.whl/fluence/adaptive/entmax.py | __all__ = ["AlphaChooser", "EntmaxAlpha", "EntmaxBisectFunction", "entmax_bisect"]
import torch
from torch import nn
from torch.autograd import Function
class AlphaChooser(torch.nn.Module):
"""
It manages the alpha values in alpha-entmax
function.
"""
def __init__(self, head_count):
super(AlphaChooser, self).__init__()
self.pre_alpha = nn.Parameter(torch.randn(head_count))
def forward(self):
alpha = 1 + torch.sigmoid(self.pre_alpha)
return torch.clamp(alpha, min=1.01, max=2)
class EntmaxAlpha(nn.Module):
def __init__(self, head_count, dim=0):
super(EntmaxAlpha, self).__init__()
self.dim = dim
self.alpha_chooser = nn.Parameter(AlphaChooser(head_count)())
self.alpha = self.alpha_chooser
def forward(self, att_scores):
batch_size, head_count, query_len, key_len = att_scores.size()
expanded_alpha = self.alpha.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
# [1,nb_heads,1,1]
expanded_alpha = expanded_alpha.expand((batch_size, -1, query_len, 1))
# [bs, nb_heads, query_len,1]
p_star = entmax_bisect(att_scores, expanded_alpha)
return p_star
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
X = X * (alpha - 1)
max_val, _ = X.max(dim=dim, keepdim=True)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
(Y,) = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
# alpha gradient computation
# d_alpha = (partial_y / partial_alpha) * dY
# NOTE: ensure alpha is not close to 1
# since there is an indetermination
# batch_size, _ = dY.shape
# shannon terms
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
# shannon entropy
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / ((ctx.alpha - 1) ** 2)
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
def entmax_bisect(X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True):
"""
alpha-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
This function is differentiable with respect to both X and alpha.
Parameters
----------
X : torch.Tensor
The input tensor.
alpha : float or torch.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != dim else 1)
A value of alpha=2 corresponds to sparsemax, and alpha=1 corresponds to
softmax (but computing it this way is likely unstable).
dim : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return EntmaxBisectFunction.apply(X, alpha, dim, n_iter, ensure_sum_one) | PypiClean |
/MLZ-DESC-1.2.tar.gz/MLZ-DESC-1.2/mlz_desc/utils/data.py | __author__ = 'Matias Carrasco Kind'
import numpy
import astropy.io.fits as pf
import random
import copy
from . import utils_mlz
import sys
def read_catalog(filename, myrank=0, check='no', get_ng='no', L_1=0, L_2=-1, A_T=''):
"""
Read the catalog, either for training or testing
currently accepting ascii tables, numpy tables
.. todo::
HDF5 files
:param str filename: Filename of the catalod
:param int myrank: current processor id, for parallel reading (not implemented)
:param str check: To check the code, only uses 200 lines of catalog
:param str get_ng: Just get the total number og galaxies in the catalog
:param int L_1: if passed get catalog between L_1 and L_2
:param int L_2: if passed get catalog between L_1 and L_2
:return: The whole catalog
:rtype: float array
"""
if filename[-3:] == 'npy':
filein = numpy.load(filename)
if check == 'yes': filein = filein[0:200]
if get_ng == 'yes': return len(filein)
if L_2 != -1: filein = filein[L_1:L_2]
elif filename[-4:] == 'fits':
GH = pf.open(filename, mode='readonly', memmap=False)
if get_ng == 'yes':
if check == 'yes':
GH.close()
return 200
ngt = GH[1].header['NAXIS2'] # it assumed is present, TODO: check automatically
GH.close()
return ngt
if L_2 > -1:
Ta = GH[1].data[L_1:L_2]
else:
if check == 'no':
Ta = GH[1].data
else:
Ta = GH[1].data[0:200]
if A_T != '':
col_index = []
klist = []
for k in list(A_T.keys()):
if A_T[k]['ind'] >= 0:
col_index.append(A_T[k]['ind'])
klist.append(k)
if A_T[k]['eind'] >= 0:
col_index.append(A_T[k]['eind'])
filein = numpy.zeros((len(Ta), max(col_index) + 1))
for k in klist:
T_temp = Ta.field(k)
filein[:, A_T[k]['ind']] = T_temp
if A_T[k]['eind'] >= 0:
T_temp = Ta.field('e' + k)
filein[:, A_T[k]['eind']] = T_temp
else:
filein = numpy.array(Ta.tolist())
GH.close()
del Ta, T_temp
try:
del GH[1].data
except:
pass
elif filename[-3:] == 'csv':
filein = numpy.loadtxt(filename, delimiter=',')
if check == 'yes': filein = filein[0:200]
if get_ng == 'yes': return len(filein)
if L_2 != -1: filein = filein[L_1:L_2]
else:
filein = numpy.loadtxt(filename)
if check == 'yes': filein = filein[0:200]
if get_ng == 'yes': return len(filein)
if L_2 != -1: filein = filein[L_1:L_2]
return filein
def create_random_realizations(AT, F, N, keyatt):
"""
Create random realizations using error in magnitudes,
saves a temporarily file on train data directory.
Uses normal distribution
.. todo::
Add other distributions
:param dict AT: dictionary with columns names and colum index
:param float F: Training data
:param int N: Number of realizations
:param str keyatt: Attribute name to be predicted or classifed
:return: Returns an array with random realizations
"""
BigCat = {}
total = len(F)
for key in list(AT.keys()):
if (key != keyatt): BigCat[key] = numpy.zeros((total, N))
for i in range(total):
for k in list(BigCat.keys()):
sigg = F[i][AT[k]['eind']]
sigg = max(0.001, sigg)
sigg = min(sigg, 0.3)
if AT[k]['eind'] == -1: sigg = 0.00005
BigCat[k][i] = numpy.random.normal(F[i][AT[k]['ind']], sigg, N)
return BigCat
def make_AT(cols, attributes, keyatt):
"""
Creates dictionary used on all routines
.. note::
Make sure all columns have different names, and error columns are the same as attribute
columns with a 'e' in front of it, ex. 'mag_u' and 'emag_u'
:param str cols: str array with column names from file
:param str attributes: attributes to be used from those columns
:param str keyatt: Attribute to be predicted or classified
:return: dictionary, each key correspond to an attribute and itself a dictionary where 'ind' is the
column index and 'eind' is the error column for the same attribute,
ex., A={u:{'ind'=1, 'eind'=6}}
:rtype: dict
"""
AT = {}
for nc in attributes:
w = numpy.where(cols == nc)
AT[nc] = {'type': 'real'}
AT[keyatt] = {'type': 'real'}
for c in list(AT.keys()):
j = numpy.where(cols == c)[0]
ej = numpy.where(cols == 'e' + c)[0]
if len(ej) == 0: ej = numpy.array([-1])
if len(j) == 0: j = numpy.array([-1])
AT[c]['ind'] = j[0]
AT[c]['eind'] = ej[0]
return AT
def bootstrap_index(N, SS):
"""
Returns bootstrapping indexes of sample N from array of indices
:param int N: size of boostrap sample
:param int SS: extract indexes from 0 to SS
:return: array of bootstrap indices
:rtype: int array
"""
index = []
for i in range(N):
index.append(random.randint(0, SS - 1))
return numpy.array(index)
# return stat.randint.rvs(0,SS,size=N)
class catalog():
"""
Creates a catalog instance for training or testing
:param class Pars: Class of parameters read from inputs files
:param str cat_type: 'train' or 'test' file (names are taken from Pars class)
:param int L1: keep only entries between L1 and L2
:param int L2: keep only entries between L1 and L2
"""
def __init__(self, Pars, cat_type='train', L1=0, L2=-1, rank=0):
self.Pars = Pars
self.cat_type = cat_type
if cat_type == 'train':
self.filename = Pars.path_train + Pars.trainfile
self.cols = numpy.array(Pars.columns)
if not Pars.keyatt in Pars.columns:
if rank == 0: utils_mlz.printpz_err("Column ", Pars.keyatt,
" not found in training file, check inputs file")
sys.exit(0)
if cat_type == 'test':
self.filename = Pars.path_test + Pars.testfile
self.cols = numpy.array(Pars.columns_test)
self.atts = Pars.att
self.AT = make_AT(self.cols, self.atts, Pars.keyatt)
self.cat = read_catalog(self.filename, check=Pars.checkonly, get_ng='no', L_1=L1, L_2=L2, A_T=self.AT)
# if L2 != -1: self.cat = self.cat[L1:L2]
self.cat_or = copy.deepcopy(self.cat)
self.nobj = len(self.cat)
self.ndim = len(self.atts)
self.has_random = False
self.oob = 'no'
def has_X(self):
"""
Is X already loaded in memory?
:return: Boolean
"""
try:
type(self.X)
return True
except AttributeError:
return False
def has_Y(self):
"""
Is Y already loaded in memory?
:return: Boolean
"""
try:
type(self.Y)
return True
except AttributeError:
return False
def get_XY(self, curr_at='all', bootstrap='no'):
"""
Creates X and Y methods based on catalog, using random realization or bootstrapping,
after this both X and Y are loaded and ready to be used
:param dict curr_at: dictionary of attributes to be used (like a subsample of them), 'all' by default
:param str bootstrap: Bootstrapping sample? ('yes'/'no')
:return: Saves X, Y oob (and no-oob) data if required and original catalog
"""
self.boot = bootstrap
if curr_at == 'all':
self.curr_at = self.atts
else:
self.curr_at = curr_at['atts']
indx = []
for key in self.curr_at:
indx.append(self.AT[key]['ind'])
indx = numpy.array(indx)
self.indx = indx
self.X = self.cat[:, indx]
nboot = len(self.X)
if self.oob == 'yes': self.Xoob = self.cat_oob[:, indx]
if self.boot == 'yes':
self.in_boot = bootstrap_index(nboot, nboot)
self.X = self.X[self.in_boot]
if self.boot == 'no':
self.in_boot = numpy.arange(nboot)
if self.AT[self.Pars.keyatt]['ind'] != -1:
self.Y = self.cat[:, self.AT[self.Pars.keyatt]['ind']]
if self.oob == 'yes': self.Yoob = self.cat_oob[:, self.AT[self.Pars.keyatt]['ind']]
if self.boot == 'yes':
self.Y = self.Y[self.in_boot]
self.cat2 = copy.deepcopy(self.cat[self.in_boot])
def make_random(self, outfileran='', ntimes=-1):
"""
Actually makes the random realizations
:param str outfileran: output file (not needed)
:param int ntimes: taken from class Pars unless otherwise indicated
"""
if ntimes == -1: ntimes = int(self.Pars.nrandom)
if outfileran == '': outfileran = self.Pars.randomcatname
self.BigRan = create_random_realizations(self.AT, self.cat, ntimes, self.Pars.keyatt)
numpy.save(self.Pars.path_train + outfileran, self.BigRan)
self.has_random = True
def load_random(self):
"""
Loads the random catalog with the realizations
"""
Junk = numpy.load(self.Pars.path_train + self.Pars.randomcatname + '.npy')
self.BigRan = Junk.item()
del Junk
def newcat(self, i):
self.cat = copy.deepcopy(self.cat_or)
if i > 0:
for k in list(self.AT.keys()):
if k != self.Pars.keyatt: self.cat[:, self.AT[k]['ind']] = self.BigRan[k][:, i]
def oob_data(self, frac=0.):
"""
Creates oob data and separates it from the no-oob data for further tests
:param float frac: Fraction of the data to be separated, taken from class Pars (default is 1/3)
"""
if not self.has_X() or not self.has_Y(): print('ERROR2')
if frac == 0.: frac = self.Pars.oobfraction
self.noob = int(self.nobj * frac)
self.oob_index = random.sample(range(self.nobj), self.noob)
index_all = numpy.arange(self.nobj)
index_all[self.oob_index] = -1
woob = numpy.where(index_all >= 0)[0]
self.no_oob_index = index_all[woob]
self.Xoob = self.X[self.oob_index]
self.Yoob = self.Y[self.oob_index]
self.X = self.X[self.no_oob_index]
self.Y = self.Y[self.no_oob_index]
self.oob_index_or = self.in_boot[self.oob_index]
def oob_data_cat(self, frac=0.):
self.oob = 'yes'
self.cat = copy.deepcopy(self.cat_or)
if frac == 0.: frac = self.Pars.oobfraction
self.noob = int(self.nobj * frac)
self.oob_index = random.sample(range(self.nobj), self.noob)
index_all = numpy.arange(self.nobj)
index_all[self.oob_index] = -1
woob = numpy.where(index_all >= 0)[0]
self.no_oob_index = index_all[woob]
self.cat_oob = self.cat[self.oob_index]
self.cat = self.cat[self.no_oob_index]
self.oob_index_or = self.oob_index
def sample_dim(self, nsample):
"""
Samples from the list of attributes
:param int nsample: size of subsample
:return: dictionary with subsample attributes and their locations
"""
self.ndim_sample = nsample
r_dim = random.sample(self.atts, nsample)
self.dict_dim = {}
self.dict_dim['atts'] = r_dim
for k in r_dim:
self.dict_dim[k] = self.AT[k]['ind']
return self.dict_dim | PypiClean |
/EXOSIMS-3.1.6.tar.gz/EXOSIMS-3.1.6/README.md | 
Exoplanet Open-Source Imaging Mission Simulator
<a href="http://ascl.net/1706.010"><img src="https://img.shields.io/badge/ascl-1706.010-blue.svg?colorB=262255" alt="ascl:1706.010" /></a>

[](https://exosims.readthedocs.io/en/latest/?badge=latest)
[](https://coveralls.io/github/dsavransky/EXOSIMS?branch=master)
[](https://badge.fury.io/py/EXOSIMS)
[](http://www.astropy.org/)
[](code_of_conduct.md)
Quick Install
--------------------------
Clone the repository, navigate to the top level directory (containing setup.py) and execute:
```
pip install -e .
```
Full installation and configuration instructions available here: https://exosims.readthedocs.io/en/latest/install.html
Documentation and Quick Start Guide
-----------------------------------------------------------
- https://exosims.readthedocs.io
- https://exosims.readthedocs.io/en/latest/quickstart.html
Contributing
-------------------------------------
All contributions are very welcome. Before starting on your first contribution to EXOSIMS, please read the [Contributing Guide](https://github.com/dsavransky/EXOSIMS/blob/master/CONTRIBUTING.md)
Credits and Acknowledgements
------------------------------
Created by Dmitry Savransky
Written by Christian Delacroix, Daniel Garrett, Dean Keithly, Gabriel Soto, Corey Spohn, Walker Dula, Sonny Rappaport, Michael Turmon, Rhonda Morgan, Grace Genszler, and Dmitry Savransky, with contributions by Patrick Lowrance, Ewan Douglas, Jackson Kulik, Jeremy Turner, Jayson Figueroa, Owen Sorber and Neil Zimmerman.
EXOSIMS makes use of Astropy, a community-developed core Python package for Astronomy (Astropy Collaboration, 2013).
EXOSIMS optionally makes use of Forecaster (http://ascl.net/1701.007).
EXOSIMS optionally makes use of NASA's Navigation and Ancillary Information Facility's SPICE system components (https://naif.jpl.nasa.gov/naif/).
EXOSIMS optionally uses values from: Mamjek, E. "A Modern Mean Dwarf Stellar Color and Effective Temperature Sequence", http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt, Version 2017.09.06
EXOSIMS development is supported by NASA Grant Nos. NNX14AD99G (GSFC), NNX15AJ67G (WPS) and NNG16PJ24C (SIT).
For further information, please see EXOSIMS's ASCL page and the following papers:
- http://adsabs.harvard.edu/abs/2016JATIS...2a1006S
- http://adsabs.harvard.edu/abs/2016SPIE.9911E..19D
| PypiClean |
/OAuthBrowser-0.0.1.tar.gz/OAuthBrowser-0.0.1/README.md | # OAuthBrowser
This module will let you authenticate [OAuth 2.0](https://oauth.net/2/) via system browser and get response URL. Currently supporting **Google Chrome** and **Safari** browsers. Built with *applescript* and [*osascript*](https://ss64.com/osx/osascript.html), only works on Mac OSX.
A lot more can be done with **OAuthBrowser** like getting source code without getting automation detection from websites.
## Installation
Use the package manager [pip](https://pip.pypa.io/en/stable/) to install [OAuthBrowser](https://pypi.org/project/OAuthBrowser/).
```bash
pip install OAuthBrowser
```
Or install the latest repo from here.
```bash
pip install git+https://github.com/Saadmairaj/OAuthBrowser#egg=OAuthBrowser
```
## Usage
Usage is very simple
1. Pass authentication URL.
2. Apply `Wait` class to wait for the browser to redirect.
3. Fetch URL.
4. Close browser.
```python
from OAuthBrowser import Safari, Wait
from urllib.parse import urlparse, parse_qs
URL = """https://accounts.google.com/signin/oauth/oauthchooseaccount?
scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.metadata.readonly&
state=state_parameter_passthrough_value&
redirect_uri=https%3A%2F%2Foauth2.example.com%2Fcode&
access_type=offline&
response_type=code&
client_id=583306224539-atbcaa8ne8g85e8kc006o6vmq99qiid0.apps.googleusercontent.com&
o2v=2&as=CDdm3G6Zd1UOG9o_gWXzQQ&flowName=GeneralOAuthFlow"""
# Initialise browser
browser = Safari(window_geometry=(100, 22, 400, 690))
# Pass Authentication URL
browser.open_new_window(URL)
# Initialise Wait
wait = Wait(browser)
# Wait till query "code" is present in the URL.
wait.until_present_query('code')
# Fetch the url
response_url = urlparse(browser.get_current_url())
code = parse_qs(response_url.query).get('code')[0]
print("\nCode: %s\n" % code)
# Close the browser
browser.quit()
```
## Demonstration
```bash
python -m OAuthBrowser
```
## Contributing
When contributing to this repository, please first discuss the change you wish to make via issue, email, or any other method of this repository before making a change.
Please make sure to update tests as appropriate.
## License
[MIT](https://github.com/Saadmairaj/OAuthBrowser/blob/master/LICENSE.txt) | PypiClean |
/Arpeggio-2.0.2.tar.gz/Arpeggio-2.0.2/arpeggio/__init__.py |
from __future__ import print_function, unicode_literals
import sys
from collections import OrderedDict
import codecs
import re
import bisect
from arpeggio.utils import isstr
import types
__version__ = "2.0.2"
if sys.version < '3':
text = unicode
else:
text = str
DEFAULT_WS = '\t\n\r '
NOMATCH_MARKER = 0
class ArpeggioError(Exception):
"""
Base class for arpeggio errors.
"""
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class GrammarError(ArpeggioError):
"""
Error raised during parser building phase used to indicate error in the
grammar definition.
"""
class SemanticError(ArpeggioError):
"""
Error raised during the phase of semantic analysis used to indicate
semantic error.
"""
class NoMatch(Exception):
"""
Exception raised by the Match classes during parsing to indicate that the
match is not successful.
Args:
rules (list of ParsingExpression): Rules that are tried at the position
of the exception.
position (int): A position in the input stream where exception
occurred.
parser (Parser): An instance of a parser.
"""
def __init__(self, rules, position, parser):
self.rules = rules
self.position = position
self.parser = parser
def eval_attrs(self):
"""
Call this to evaluate `message`, `context`, `line` and `col`. Called by __str__.
"""
def rule_to_exp_str(rule):
if hasattr(rule, '_exp_str'):
# Rule may override expected report string
return rule._exp_str
elif rule.root:
return rule.rule_name
elif isinstance(rule, Match) and \
not isinstance(rule, EndOfFile):
return "'{}'".format(rule.to_match.replace('\n', '\\n'))
else:
return rule.name
if not self.rules:
self.message = "Not expected input"
else:
what_is_expected = OrderedDict.fromkeys(
["{}".format(rule_to_exp_str(r)) for r in self.rules])
what_str = " or ".join(what_is_expected)
self.message = "Expected {}".format(what_str)
self.context = self.parser.context(position=self.position)
self.line, self.col = self.parser.pos_to_linecol(self.position)
def __str__(self):
self.eval_attrs()
return "{} at position {}{} => '{}'."\
.format(self.message,
"{}:".format(self.parser.file_name)
if self.parser.file_name else "",
(self.line, self.col),
self.context)
def __unicode__(self):
return self.__str__()
def flatten(_iterable):
'''Flattening of python iterables.'''
result = []
for e in _iterable:
if hasattr(e, "__iter__") and not type(e) in [text, NonTerminal]:
result.extend(flatten(e))
else:
result.append(e)
return result
class DebugPrinter(object):
"""
Mixin class for adding debug print support.
Attributes:
debug (bool): If true debugging messages will be printed.
_current_indent(int): Current indentation level for prints.
"""
def __init__(self, **kwargs):
self.debug = kwargs.pop("debug", False)
self.file = kwargs.pop("file", sys.stdout)
self._current_indent = 0
super(DebugPrinter, self).__init__(**kwargs)
def dprint(self, message, indent_change=0):
"""
Handle debug message. Print to the stream specified by the 'file'
keyword argument at the current indentation level. Default stream is
stdout.
"""
if indent_change < 0:
self._current_indent += indent_change
print(("%s%s" % (" " * self._current_indent, message)),
file=self.file)
if indent_change > 0:
self._current_indent += indent_change
# ---------------------------------------------------------
# Parser Model (PEG Abstract Semantic Graph) elements
class ParsingExpression(object):
"""
An abstract class for all parsing expressions.
Represents the node of the Parser Model.
Attributes:
elements: A list (or other python object) used as a staging structure
for python based grammar definition. Used in _from_python for
building nodes list of child parser expressions.
rule_name (str): The name of the parser rule if this is the root rule.
root (bool): Does this parser expression represents the
root of the parser rule? The root parser rule will create
non-terminal node of the parse tree during parsing.
nodes (list of ParsingExpression): A list of child parser expressions.
suppress (bool): If this is set to True than no ParseTreeNode will be
created for this ParsingExpression. Default False.
"""
suppress = False
def __init__(self, *elements, **kwargs):
if len(elements) == 1:
elements = elements[0]
self.elements = elements
self.rule_name = kwargs.get('rule_name', '')
self.root = kwargs.get('root', False)
nodes = kwargs.get('nodes', [])
if not hasattr(nodes, '__iter__'):
nodes = [nodes]
self.nodes = nodes
if 'suppress' in kwargs:
self.suppress = kwargs['suppress']
# Memoization. Every node cache the parsing results for the given input
# positions.
self._result_cache = {} # position -> parse tree at the position
@property
def desc(self):
return "{}{}".format(self.name, "-" if self.suppress else "")
@property
def name(self):
if self.root:
return "%s=%s" % (self.rule_name, self.__class__.__name__)
else:
return self.__class__.__name__
@property
def id(self):
if self.root:
return self.rule_name
else:
return id(self)
def _clear_cache(self, processed=None):
"""
Clears memoization cache. Should be called on input change and end
of parsing.
Args:
processed (set): Set of processed nodes to prevent infinite loops.
"""
self._result_cache = {}
if not processed:
processed = set()
for node in self.nodes:
if node not in processed:
processed.add(node)
node._clear_cache(processed)
def parse(self, parser):
if parser.debug:
name = self.name
if name.startswith('__asgn'):
name = "{}[{}]".format(self.name, self._attr_name)
parser.dprint(">> Matching rule {}{} at position {} => {}"
.format(name,
" in {}".format(parser.in_rule)
if parser.in_rule else "",
parser.position,
parser.context()), 1)
# Current position could change in recursive calls
# so save it.
c_pos = parser.position
# Memoization.
# If this position is already parsed by this parser expression use
# the result
if parser.memoization:
try:
result, new_pos = self._result_cache[c_pos]
parser.position = new_pos
parser.cache_hits += 1
if parser.debug:
parser.dprint(
"** Cache hit for [{}, {}] = '{}' : new_pos={}"
.format(name, c_pos, text(result), text(new_pos)))
parser.dprint(
"<<+ Matched rule {} at position {}"
.format(name, new_pos), -1)
# If NoMatch is recorded at this position raise.
if result is NOMATCH_MARKER:
raise parser.nm
# else return cached result
return result
except KeyError:
parser.cache_misses += 1
# Remember last parsing expression and set this as
# the new last.
last_pexpression = parser.last_pexpression
parser.last_pexpression = self
if self.rule_name:
# If we are entering root rule
# remember previous root rule name and set
# this one on the parser to be available for
# debugging messages
previous_root_rule_name = parser.in_rule
parser.in_rule = self.rule_name
try:
result = self._parse(parser)
if self.suppress or (type(result) is list and
result and result[0] is None):
result = None
except NoMatch:
parser.position = c_pos # Backtracking
# Memoize NoMatch at this position for this rule
if parser.memoization:
self._result_cache[c_pos] = (NOMATCH_MARKER, c_pos)
raise
finally:
# Recover last parsing expression.
parser.last_pexpression = last_pexpression
if parser.debug:
parser.dprint("<<{} rule {}{} at position {} => {}"
.format("- Not matched"
if parser.position is c_pos
else "+ Matched",
name,
" in {}".format(parser.in_rule)
if parser.in_rule else "",
parser.position,
parser.context()), -1)
# If leaving root rule restore previous root rule name.
if self.rule_name:
parser.in_rule = previous_root_rule_name
# For root rules flatten non-terminal/list
if self.root and result and not isinstance(result, Terminal):
if not isinstance(result, NonTerminal):
result = flatten(result)
# Tree reduction will eliminate Non-terminal with single child.
if parser.reduce_tree and len(result) == 1:
result = result[0]
# If the result is not parse tree node it must be a plain list
# so create a new NonTerminal.
if not isinstance(result, ParseTreeNode):
result = NonTerminal(self, result)
# Result caching for use by memoization.
if parser.memoization:
self._result_cache[c_pos] = (result, parser.position)
return result
class Sequence(ParsingExpression):
"""
Will match sequence of parser expressions in exact order they are defined.
"""
def __init__(self, *elements, **kwargs):
super(Sequence, self).__init__(*elements, **kwargs)
self.ws = kwargs.pop('ws', None)
self.skipws = kwargs.pop('skipws', None)
def _parse(self, parser):
results = []
c_pos = parser.position
if self.ws is not None:
old_ws = parser.ws
parser.ws = self.ws
if self.skipws is not None:
old_skipws = parser.skipws
parser.skipws = self.skipws
# Prefetching
append = results.append
try:
for e in self.nodes:
result = e.parse(parser)
if result:
append(result)
except NoMatch:
parser.position = c_pos # Backtracking
raise
finally:
if self.ws is not None:
parser.ws = old_ws
if self.skipws is not None:
parser.skipws = old_skipws
if results:
return results
class OrderedChoice(Sequence):
"""
Will match one of the parser expressions specified. Parser will try to
match expressions in the order they are defined.
"""
def _parse(self, parser):
result = None
match = False
c_pos = parser.position
if self.ws is not None:
old_ws = parser.ws
parser.ws = self.ws
if self.skipws is not None:
old_skipws = parser.skipws
parser.skipws = self.skipws
try:
for e in self.nodes:
try:
result = e.parse(parser)
if result is not None:
match = True
result = [result]
break
except NoMatch:
parser.position = c_pos # Backtracking
finally:
if self.ws is not None:
parser.ws = old_ws
if self.skipws is not None:
parser.skipws = old_skipws
if not match:
parser._nm_raise(self, c_pos, parser)
return result
class Repetition(ParsingExpression):
"""
Base class for all repetition-like parser expressions (?,*,+)
Args:
eolterm(bool): Flag that indicates that end of line should
terminate repetition match.
"""
def __init__(self, *elements, **kwargs):
super(Repetition, self).__init__(*elements, **kwargs)
self.eolterm = kwargs.get('eolterm', False)
self.sep = kwargs.get('sep', None)
class Optional(Repetition):
"""
Optional will try to match parser expression specified and will not fail
in case match is not successful.
"""
def _parse(self, parser):
result = None
c_pos = parser.position
try:
result = [self.nodes[0].parse(parser)]
except NoMatch:
parser.position = c_pos # Backtracking
return result
class ZeroOrMore(Repetition):
"""
ZeroOrMore will try to match parser expression specified zero or more
times. It will never fail.
"""
def _parse(self, parser):
results = []
if self.eolterm:
# Remember current eolterm and set eolterm of
# this repetition
old_eolterm = parser.eolterm
parser.eolterm = self.eolterm
# Prefetching
append = results.append
p = self.nodes[0].parse
sep = self.sep.parse if self.sep else None
result = None
while True:
try:
c_pos = parser.position
if sep and result:
sep_result = sep(parser)
if sep_result:
append(sep_result)
result = p(parser)
if not result:
break
append(result)
except NoMatch:
parser.position = c_pos # Backtracking
break
if self.eolterm:
# Restore previous eolterm
parser.eolterm = old_eolterm
return results
class OneOrMore(Repetition):
"""
OneOrMore will try to match parser expression specified one or more times.
"""
def _parse(self, parser):
results = []
first = True
if self.eolterm:
# Remember current eolterm and set eolterm of
# this repetition
old_eolterm = parser.eolterm
parser.eolterm = self.eolterm
# Prefetching
append = results.append
p = self.nodes[0].parse
sep = self.sep.parse if self.sep else None
result = None
try:
while True:
try:
c_pos = parser.position
if sep and result:
sep_result = sep(parser)
if sep_result:
append(sep_result)
result = p(parser)
if not result:
break
append(result)
first = False
except NoMatch:
parser.position = c_pos # Backtracking
if first:
raise
break
finally:
if self.eolterm:
# Restore previous eolterm
parser.eolterm = old_eolterm
return results
class UnorderedGroup(Repetition):
"""
Will try to match all of the parsing expression in any order.
"""
def _parse(self, parser):
results = []
c_pos = parser.position
if self.eolterm:
# Remember current eolterm and set eolterm of
# this repetition
old_eolterm = parser.eolterm
parser.eolterm = self.eolterm
# Prefetching
append = results.append
nodes_to_try = list(self.nodes)
sep = self.sep.parse if self.sep else None
result = None
sep_result = None
first = True
while nodes_to_try:
sep_exc = None
# Separator
c_loc_pos_sep = parser.position
if sep and not first:
try:
sep_result = sep(parser)
except NoMatch as e:
parser.position = c_loc_pos_sep # Backtracking
# This still might be valid if all remaining subexpressions
# are optional and none of them will match
sep_exc = e
c_loc_pos = parser.position
match = True
all_optionals_fail = True
for e in list(nodes_to_try):
try:
result = e.parse(parser)
if result:
if sep_exc:
raise sep_exc
if sep_result:
append(sep_result)
first = False
match = True
all_optionals_fail = False
append(result)
nodes_to_try.remove(e)
break
except NoMatch:
match = False
parser.position = c_loc_pos # local backtracking
if not match or all_optionals_fail:
# If sep is matched backtrack it
parser.position = c_loc_pos_sep
break
if self.eolterm:
# Restore previous eolterm
parser.eolterm = old_eolterm
if not match:
# Unsuccessful match of the whole PE - full backtracking
parser.position = c_pos
parser._nm_raise(self, c_pos, parser)
if results:
return results
class SyntaxPredicate(ParsingExpression):
"""
Base class for all syntax predicates (and, not, empty).
Predicates are parser expressions that will do the match but will not
consume any input.
"""
class And(SyntaxPredicate):
"""
This predicate will succeed if the specified expression matches current
input.
"""
def _parse(self, parser):
c_pos = parser.position
for e in self.nodes:
try:
e.parse(parser)
except NoMatch:
parser.position = c_pos
raise
parser.position = c_pos
class Not(SyntaxPredicate):
"""
This predicate will succeed if the specified expression doesn't match
current input.
"""
def _parse(self, parser):
c_pos = parser.position
old_in_not = parser.in_not
parser.in_not = True
try:
for e in self.nodes:
try:
e.parse(parser)
except NoMatch:
parser.position = c_pos
return
parser.position = c_pos
parser._nm_raise(self, c_pos, parser)
finally:
parser.in_not = old_in_not
class Empty(SyntaxPredicate):
"""
This predicate will always succeed without consuming input.
"""
def _parse(self, parser):
pass
class Decorator(ParsingExpression):
"""
Decorator are special kind of parsing expression used to mark
a containing pexpression and give it some special semantics.
For example, decorators are used to mark pexpression as lexical
rules (see :class:Lex).
"""
class Combine(Decorator):
"""
This decorator defines pexpression that represents a lexeme rule.
This rules will always return a Terminal parse tree node.
Whitespaces will be preserved. Comments will not be matched.
"""
def _parse(self, parser):
results = []
oldin_lex_rule = parser.in_lex_rule
parser.in_lex_rule = True
c_pos = parser.position
try:
for parser_model_node in self.nodes:
results.append(parser_model_node.parse(parser))
results = flatten(results)
# Create terminal from result
return Terminal(self, c_pos,
"".join([x.flat_str() for x in results]))
except NoMatch:
parser.position = c_pos # Backtracking
raise
finally:
parser.in_lex_rule = oldin_lex_rule
class Match(ParsingExpression):
"""
Base class for all classes that will try to match something from the input.
"""
def __init__(self, rule_name, root=False, **kwargs):
super(Match, self).__init__(rule_name=rule_name, root=root, **kwargs)
@property
def name(self):
if self.root:
return "%s=%s(%s)" % (self.rule_name, self.__class__.__name__,
self.to_match)
else:
return "%s(%s)" % (self.__class__.__name__, self.to_match)
def _parse_comments(self, parser):
"""Parse comments."""
try:
parser.in_parse_comments = True
if parser.comments_model:
try:
while True:
# TODO: Consumed whitespaces and comments should be
# attached to the first match ahead.
parser.comments.append(
parser.comments_model.parse(parser))
if parser.skipws:
# Whitespace skipping
pos = parser.position
ws = parser.ws
i = parser.input
length = len(i)
while pos < length and i[pos] in ws:
pos += 1
parser.position = pos
except NoMatch:
# NoMatch in comment matching is perfectly
# legal and no action should be taken.
pass
finally:
parser.in_parse_comments = False
def parse(self, parser):
if parser.skipws and not parser.in_lex_rule:
# Whitespace skipping
pos = parser.position
ws = parser.ws
i = parser.input
length = len(i)
while pos < length and i[pos] in ws:
pos += 1
parser.position = pos
if parser.debug:
parser.dprint(
"?? Try match rule {}{} at position {} => {}"
.format(self.name,
" in {}".format(parser.in_rule)
if parser.in_rule else "",
parser.position,
parser.context()))
if parser.skipws and parser.position in parser.comment_positions:
# Skip comments if already parsed.
parser.position = parser.comment_positions[parser.position]
else:
if not parser.in_parse_comments and not parser.in_lex_rule:
comment_start = parser.position
self._parse_comments(parser)
parser.comment_positions[comment_start] = parser.position
result = self._parse(parser)
if not self.suppress:
return result
class RegExMatch(Match):
'''
This Match class will perform input matching based on Regular Expressions.
Args:
to_match (regex string): A regular expression string to match.
It will be used to create regular expression using re.compile.
ignore_case(bool): If case insensitive match is needed.
Default is None to support propagation from global parser setting.
multiline(bool): allow regex to works on multiple lines
(re.DOTALL flag). Default is None to support propagation from
global parser setting.
str_repr(str): A string that is used to represent this regex.
re_flags: flags parameter for re.compile if neither ignore_case
or multiple are set.
'''
def __init__(self, to_match, rule_name='', root=False, ignore_case=None,
multiline=None, str_repr=None, re_flags=re.MULTILINE,
**kwargs):
super(RegExMatch, self).__init__(rule_name, root, **kwargs)
self.to_match_regex = to_match
self.ignore_case = ignore_case
self.multiline = multiline
self.explicit_flags = re_flags
self.to_match = str_repr if str_repr is not None else to_match
def compile(self):
flags = self.explicit_flags
if self.multiline is True:
flags |= re.DOTALL
if self.multiline is False and flags & re.DOTALL:
flags -= re.DOTALL
if self.ignore_case is True:
flags |= re.IGNORECASE
if self.ignore_case is False and flags & re.IGNORECASE:
flags -= re.IGNORECASE
self.regex = re.compile(self.to_match_regex, flags)
def __str__(self):
return self.to_match
def __unicode__(self):
return self.__str__()
def _parse(self, parser):
c_pos = parser.position
m = self.regex.match(parser.input, c_pos)
if m:
matched = m.group()
if parser.debug:
parser.dprint(
"++ Match '%s' at %d => '%s'" %
(matched, c_pos, parser.context(len(matched))))
parser.position += len(matched)
if matched:
return Terminal(self, c_pos, matched, extra_info=m)
else:
if parser.debug:
parser.dprint("-- NoMatch at {}".format(c_pos))
parser._nm_raise(self, c_pos, parser)
class StrMatch(Match):
"""
This Match class will perform input matching by a string comparison.
Args:
to_match (str): A string to match.
ignore_case(bool): If case insensitive match is needed.
Default is None to support propagation from global parser setting.
"""
def __init__(self, to_match, rule_name='', root=False, ignore_case=None,
**kwargs):
super(StrMatch, self).__init__(rule_name, root, **kwargs)
self.to_match = to_match
self.ignore_case = ignore_case
def _parse(self, parser):
c_pos = parser.position
input_frag = parser.input[c_pos:c_pos+len(self.to_match)]
if self.ignore_case:
match = input_frag.lower() == self.to_match.lower()
else:
match = input_frag == self.to_match
if match:
if parser.debug:
parser.dprint(
"++ Match '{}' at {} => '{}'"
.format(self.to_match, c_pos,
parser.context(len(self.to_match))))
parser.position += len(self.to_match)
# If this match is inside sequence than mark for suppression
suppress = type(parser.last_pexpression) is Sequence
return Terminal(self, c_pos, self.to_match, suppress=suppress)
else:
if parser.debug:
parser.dprint(
"-- No match '{}' at {} => '{}'"
.format(self.to_match, c_pos,
parser.context(len(self.to_match))))
parser._nm_raise(self, c_pos, parser)
def __str__(self):
return self.to_match
def __unicode__(self):
return self.__str__()
def __eq__(self, other):
return self.to_match == text(other)
def __hash__(self):
return hash(self.to_match)
# HACK: Kwd class is a bit hackish. Need to find a better way to
# introduce different classes of string tokens.
class Kwd(StrMatch):
"""
A specialization of StrMatch to specify keywords of the language.
"""
def __init__(self, to_match):
super(Kwd, self).__init__(to_match)
self.to_match = to_match
self.root = True
self.rule_name = 'keyword'
class EndOfFile(Match):
"""
The Match class that will succeed in case end of input is reached.
"""
def __init__(self):
super(EndOfFile, self).__init__("EOF")
@property
def name(self):
return "EOF"
def _parse(self, parser):
c_pos = parser.position
if len(parser.input) == c_pos:
return Terminal(EOF(), c_pos, '', suppress=True)
else:
if parser.debug:
parser.dprint("!! EOF not matched.")
parser._nm_raise(self, c_pos, parser)
def EOF():
return EndOfFile()
# ---------------------------------------------------------
# ---------------------------------------------------
# Parse Tree node classes
class ParseTreeNode(object):
"""
Abstract base class representing node of the Parse Tree.
The node can be terminal(the leaf of the parse tree) or non-terminal.
Attributes:
rule (ParsingExpression): The rule that created this node.
rule_name (str): The name of the rule that created this node if
root rule or empty string otherwise.
position (int): A position in the input stream where the match
occurred.
position_end (int, read-only): A position in the input stream where
the node ends.
This position is one char behind the last char contained in this
node. Thus, position_end - position = length of the node.
error (bool): Is this a false parse tree node created during error
recovery.
comments : A parse tree of comment(s) attached to this node.
"""
def __init__(self, rule, position, error):
assert rule
assert rule.rule_name is not None
self.rule = rule
self.rule_name = rule.rule_name
self.position = position
self.error = error
self.comments = None
@property
def name(self):
return "%s [%s]" % (self.rule_name, self.position)
@property
def position_end(self):
"Must be implemented in subclasses."
raise NotImplementedError
def visit(self, visitor):
"""
Visitor pattern implementation.
Args:
visitor(PTNodeVisitor): The visitor object.
"""
if visitor.debug:
visitor.dprint("Visiting {} type:{} str:{}"
.format(self.name, type(self).__name__, text(self)))
children = SemanticActionResults()
if isinstance(self, NonTerminal):
for node in self:
child = node.visit(visitor)
# If visit returns None suppress that child node
if child is not None:
children.append_result(node.rule_name, child)
visit_name = "visit_%s" % self.rule_name
if hasattr(visitor, visit_name):
# Call visit method.
result = getattr(visitor, visit_name)(self, children)
# If there is a method with 'second' prefix save
# the result of visit for post-processing
if hasattr(visitor, "second_%s" % self.rule_name):
visitor.for_second_pass.append((self.rule_name, result))
return result
elif visitor.defaults:
# If default actions are enabled
return visitor.visit__default__(self, children)
def tree_str(self, indent=0):
return '{}{} [{}-{}]'.format(' ' * indent, self.rule.name,
self.position, self.position_end)
class Terminal(ParseTreeNode):
"""
Leaf node of the Parse Tree. Represents matched string.
Attributes:
rule (ParsingExpression): The rule that created this terminal.
position (int): A position in the input stream where match occurred.
value (str): Matched string at the given position or missing token
name in the case of an error node.
suppress(bool): If True this terminal can be ignored in semantic
analysis.
extra_info(object): additional information (e.g. the re matcher
object)
"""
__slots__ = ['rule', 'rule_name', 'position', 'error', 'comments',
'value', 'suppress', 'extra_info']
def __init__(self, rule, position, value, error=False, suppress=False,
extra_info=None):
super(Terminal, self).__init__(rule, position, error)
self.value = value
self.suppress = suppress
self.extra_info = extra_info
@property
def desc(self):
if self.value:
return "%s '%s' [%s]" % (self.rule_name, self.value, self.position)
else:
return "%s [%s]" % (self.rule_name, self.position)
@property
def position_end(self):
return self.position + len(self.value)
def flat_str(self):
return self.value
def __str__(self):
return self.value
def __unicode__(self):
return self.__str__()
def __repr__(self):
return self.desc
def tree_str(self, indent=0):
return '{}: {}'.format(super(Terminal, self).tree_str(indent),
self.value)
def __eq__(self, other):
return text(self) == text(other)
class NonTerminal(ParseTreeNode, list):
"""
Non-leaf node of the Parse Tree. Represents language syntax construction.
At the same time used in ParseTreeNode navigation expressions.
See test_ptnode_navigation_expressions.py for examples of navigation
expressions.
Attributes:
nodes (list of ParseTreeNode): Children parse tree nodes.
_filtered (bool): Is this NT a dynamically created filtered NT.
This is used internally.
"""
__slots__ = ['rule', 'rule_name', 'position', 'error', 'comments',
'_filtered', '_expr_cache']
def __init__(self, rule, nodes, error=False, _filtered=False):
# Inherit position from the first child node
position = nodes[0].position if nodes else 0
super(NonTerminal, self).__init__(rule, position, error)
self.extend(flatten([nodes]))
self._filtered = _filtered
@property
def value(self):
"""Terminal protocol."""
return text(self)
@property
def desc(self):
return self.name
@property
def position_end(self):
return self[-1].position_end if self else self.position
def flat_str(self):
"""
Return flatten string representation.
"""
return "".join([x.flat_str() for x in self])
def __str__(self):
return " | ".join([text(x) for x in self])
def __unicode__(self):
return self.__str__()
def __repr__(self):
return "[ %s ]" % ", ".join([repr(x) for x in self])
def tree_str(self, indent=0):
return '{}\n{}'.format(super(NonTerminal, self).tree_str(indent),
'\n'.join([c.tree_str(indent + 1)
for c in self]))
def __getattr__(self, rule_name):
"""
Find a child (non)terminal by the rule name.
Args:
rule_name(str): The name of the rule that is referenced from
this node rule.
"""
# Prevent infinite recursion
if rule_name in ['_expr_cache', '_filtered', 'rule', 'rule_name',
'position', 'append', 'extend']:
raise AttributeError
try:
# First check the cache
if rule_name in self._expr_cache:
return self._expr_cache[rule_name]
except AttributeError:
# Navigation expression cache. Used for lookup by rule name.
self._expr_cache = {}
# If result is not found in the cache collect all nodes
# with the given rule name and create new NonTerminal
# and cache it for later access.
nodes = []
rule = None
for n in self:
if self._filtered:
# For filtered NT rule_name is a rule on
# each of its children
for m in n:
if m.rule_name == rule_name:
nodes.append(m)
rule = m.rule
else:
if n.rule_name == rule_name:
nodes.append(n)
rule = n.rule
if rule is None:
# If rule is not found resort to default behavior
return self.__getattribute__(rule_name)
result = NonTerminal(rule=rule, nodes=nodes, _filtered=True)
self._expr_cache[rule_name] = result
return result
# ----------------------------------------------------
# Semantic Actions
#
class PTNodeVisitor(DebugPrinter):
"""
Base class for all parse tree visitors.
"""
def __init__(self, defaults=True, **kwargs):
"""
Args:
defaults(bool): If the default visit method should be applied in
case no method is defined.
"""
self.for_second_pass = []
self.defaults = defaults
super(PTNodeVisitor, self).__init__(**kwargs)
def visit__default__(self, node, children):
"""
Called if no visit method is defined for the node.
Args:
node(ParseTreeNode):
children(processed children ParseTreeNode-s):
"""
if isinstance(node, Terminal):
# Default for Terminal is to convert to string unless suppress flag
# is set in which case it is suppressed by setting to None.
retval = text(node) if not node.suppress else None
else:
retval = node
# Special case. If only one child exist return it.
if len(children) == 1:
retval = children[0]
else:
# If there is only one non-string child return
# that by default. This will support e.g. bracket
# removals.
last_non_str = None
for c in children:
if not isstr(c):
if last_non_str is None:
last_non_str = c
else:
# If there is multiple non-string objects
# by default convert non-terminal to string
if self.debug:
self.dprint("*** Warning: Multiple "
"non-string objects found in "
"default visit. Converting non-"
"terminal to a string.")
retval = text(node)
break
else:
# Return the only non-string child
retval = last_non_str
return retval
def visit_parse_tree(parse_tree, visitor):
"""
Applies visitor to parse_tree and runs the second pass
afterwards.
Args:
parse_tree(ParseTreeNode):
visitor(PTNodeVisitor):
"""
if not parse_tree:
raise Exception(
"Parse tree is empty. You did call parse(), didn't you?")
if visitor.debug:
visitor.dprint("ASG: First pass")
# Visit tree.
result = parse_tree.visit(visitor)
# Second pass
if visitor.debug:
visitor.dprint("ASG: Second pass")
for sa_name, asg_node in visitor.for_second_pass:
getattr(visitor, "second_%s" % sa_name)(asg_node)
return result
class SemanticAction(object):
"""
Semantic actions are executed during semantic analysis. They are in charge
of producing Abstract Semantic Graph (ASG) out of the parse tree.
Every non-terminal and terminal can have semantic action defined which will
be triggered during semantic analysis.
Semantic action triggering is separated in two passes. first_pass method is
required and the method called second_pass is optional and will be called
if exists after the first pass. Second pass can be used for forward
referencing, e.g. linking to the declaration registered in the first pass
stage.
"""
def first_pass(self, parser, node, nodes):
"""
Called in the first pass of tree walk.
This is the default implementation used if no semantic action is
defined.
"""
if isinstance(node, Terminal):
# Default for Terminal is to convert to string unless suppress flag
# is set in which case it is suppressed by setting to None.
retval = text(node) if not node.suppress else None
else:
retval = node
# Special case. If only one child exist return it.
if len(nodes) == 1:
retval = nodes[0]
else:
# If there is only one non-string child return
# that by default. This will support e.g. bracket
# removals.
last_non_str = None
for c in nodes:
if not isstr(c):
if last_non_str is None:
last_non_str = c
else:
# If there is multiple non-string objects
# by default convert non-terminal to string
if parser.debug:
parser.dprint(
"*** Warning: Multiple non-"
"string objects found in applying "
"default semantic action. Converting "
"non-terminal to string.")
retval = text(node)
break
else:
# Return the only non-string child
retval = last_non_str
return retval
class SemanticActionResults(list):
"""
Used in visitor methods call to supply results of semantic analysis
of children parse tree nodes.
Enables dot access by the name of the rule similar to NonTerminal
tree navigation.
Enables index access as well as iteration.
"""
def __init__(self):
self.results = {}
def append_result(self, name, result):
if name:
if name not in self.results:
self.results[name] = []
self.results[name].append(result)
self.append(result)
def __getattr__(self, attr_name):
if attr_name == 'results':
raise AttributeError
return self.results.get(attr_name, [])
# Common semantic actions
class SemanticActionSingleChild(SemanticAction):
def first_pass(self, parser, node, children):
return children[0]
class SemanticActionBodyWithBraces(SemanticAction):
def first_pass(self, parser, node, children):
return children[1:-1]
class SemanticActionToString(SemanticAction):
def first_pass(self, parser, node, children):
return text(node)
# ----------------------------------------------------
# Parsers
class Parser(DebugPrinter):
"""
Abstract base class for all parsers.
Attributes:
comments_model: parser model for comments.
comments(list): A list of ParseTreeNode for matched comments.
sem_actions(dict): A dictionary of semantic actions keyed by the
rule name.
parse_tree(NonTerminal): The parse tree consisting of NonTerminal and
Terminal instances.
in_rule (str): Current rule name.
in_parse_comments (bool): True if parsing comments.
in_lex_rule (bool): True if in lexical rule. Currently used in Combine
decorator to convert match to a single Terminal.
in_not (bool): True if in Not parsing expression. Used for better error
reporting.
last_pexpression (ParsingExpression): Last parsing expression
traversed.
"""
# Not marker for NoMatch rules list. Used if the first unsuccessful rule
# match is Not.
FIRST_NOT = Not()
def __init__(self, skipws=True, ws=None, reduce_tree=False, autokwd=False,
ignore_case=False, memoization=False, **kwargs):
"""
Args:
skipws (bool): Should the whitespace skipping be done. Default is
True.
ws (str): A string consisting of whitespace characters.
reduce_tree (bool): If true non-terminals with single child will be
eliminated from the parse tree. Default is False.
autokwd(bool): If keyword-like StrMatches are matched on word
boundaries. Default is False.
ignore_case(bool): If case is ignored (default=False)
memoization(bool): If memoization should be used
(a.k.a. packrat parsing)
"""
super(Parser, self).__init__(**kwargs)
# Used to indicate state in which parser should not
# treat newlines as whitespaces.
self._eolterm = False
self.skipws = skipws
if ws is not None:
self.ws = ws
else:
self.ws = DEFAULT_WS
self.reduce_tree = reduce_tree
self.autokwd = autokwd
self.ignore_case = ignore_case
self.memoization = memoization
self.comments_model = None
self.comments = []
self.comment_positions = {}
self.sem_actions = {}
self.parse_tree = None
# Create regex used for autokwd matching
flags = 0
if ignore_case:
flags = re.IGNORECASE
self.keyword_regex = re.compile(r'[^\d\W]\w*', flags)
# Keep track of root rule we are currently in.
# Used for debugging purposes
self.in_rule = ''
self.in_parse_comments = False
# Are we in lexical rule? If so do not
# skip whitespaces.
self.in_lex_rule = False
# Are we in Not parsing expression?
self.in_not = False
# Last parsing expression traversed
self.last_pexpression = None
@property
def ws(self):
return self._ws
@ws.setter
def ws(self, new_value):
self._real_ws = new_value
self._ws = new_value
if self.eolterm:
self._ws = self._ws.replace('\n', '').replace('\r', '')
@property
def eolterm(self):
return self._eolterm
@eolterm.setter
def eolterm(self, new_value):
# Toggle newline char in ws on eolterm property set.
# During eolterm state parser should not treat
# newline as a whitespace.
self._eolterm = new_value
if self._eolterm:
self._ws = self._ws.replace('\n', '').replace('\r', '')
else:
self._ws = self._real_ws
def parse(self, _input, file_name=None):
"""
Parses input and produces parse tree.
Args:
_input(str): An input string to parse.
file_name(str): If input is loaded from file this can be
set to file name. It is used in error messages.
"""
self.position = 0 # Input position
self.nm = None # Last NoMatch exception
self.line_ends = []
self.input = _input
self.file_name = file_name
self.comment_positions = {}
self.cache_hits = 0
self.cache_misses = 0
try:
self.parse_tree = self._parse()
except NoMatch as e:
# Remove Not marker
if e.rules[0] is Parser.FIRST_NOT:
del e.rules[0]
# Get line and column from position
e.line, e.col = self.pos_to_linecol(e.position)
raise
finally:
# At end of parsing clear all memoization caches.
# Do this here to free memory.
if self.memoization:
self._clear_caches()
# In debug mode export parse tree to dot file for
# visualization
if self.debug and self.parse_tree:
from arpeggio.export import PTDOTExporter
root_rule_name = self.parse_tree.rule_name
PTDOTExporter().exportFile(
self.parse_tree, "{}_parse_tree.dot".format(root_rule_name))
return self.parse_tree
def parse_file(self, file_name):
"""
Parses content from the given file.
Args:
file_name(str): A file name.
"""
with codecs.open(file_name, 'r', 'utf-8') as f:
content = f.read()
return self.parse(content, file_name=file_name)
def getASG(self, sem_actions=None, defaults=True):
"""
Creates Abstract Semantic Graph (ASG) from the parse tree.
Args:
sem_actions (dict): The semantic actions dictionary to use for
semantic analysis. Rule names are the keys and semantic action
objects are values.
defaults (bool): If True a default semantic action will be
applied in case no action is defined for the node.
"""
if not self.parse_tree:
raise Exception(
"Parse tree is empty. You did call parse(), didn't you?")
if sem_actions is None:
if not self.sem_actions:
raise Exception("Semantic actions not defined.")
else:
sem_actions = self.sem_actions
if type(sem_actions) is not dict:
raise Exception("Semantic actions parameter must be a dictionary.")
for_second_pass = []
def tree_walk(node):
"""
Walking the parse tree and calling first_pass for every registered
semantic actions and creating list of object that needs to be
called in the second pass.
"""
if self.debug:
self.dprint(
"Walking down %s type: %s str: %s" %
(node.name, type(node).__name__, text(node)))
children = SemanticActionResults()
if isinstance(node, NonTerminal):
for n in node:
child = tree_walk(n)
if child is not None:
children.append_result(n.rule_name, child)
if self.debug:
self.dprint("Processing %s = '%s' type:%s len:%d" %
(node.name, text(node), type(node).__name__,
len(node) if isinstance(node, list) else 0))
for i, a in enumerate(children):
self.dprint(" %d:%s type:%s" %
(i+1, text(a), type(a).__name__))
if node.rule_name in sem_actions:
sem_action = sem_actions[node.rule_name]
if isinstance(sem_action, types.FunctionType):
retval = sem_action(self, node, children)
else:
retval = sem_action.first_pass(self, node, children)
if hasattr(sem_action, "second_pass"):
for_second_pass.append((node.rule_name, retval))
if self.debug:
action_name = sem_action.__name__ \
if hasattr(sem_action, '__name__') \
else sem_action.__class__.__name__
self.dprint(" Applying semantic action %s" % action_name)
else:
if defaults:
# If no rule is present use some sane defaults
if self.debug:
self.dprint(" Applying default semantic action.")
retval = SemanticAction().first_pass(self, node, children)
else:
retval = node
if self.debug:
if retval is None:
self.dprint(" Suppressed.")
else:
self.dprint(" Resolved to = %s type:%s" %
(text(retval), type(retval).__name__))
return retval
if self.debug:
self.dprint("ASG: First pass")
asg = tree_walk(self.parse_tree)
# Second pass
if self.debug:
self.dprint("ASG: Second pass")
for sa_name, asg_node in for_second_pass:
sem_actions[sa_name].second_pass(self, asg_node)
return asg
def pos_to_linecol(self, pos):
"""
Calculate (line, column) tuple for the given position in the stream.
"""
if not self.line_ends:
try:
# TODO: Check this implementation on Windows.
self.line_ends.append(self.input.index("\n"))
while True:
try:
self.line_ends.append(
self.input.index("\n", self.line_ends[-1] + 1))
except ValueError:
break
except ValueError:
pass
line = bisect.bisect_left(self.line_ends, pos)
col = pos
if line > 0:
col -= self.line_ends[line - 1]
if self.input[self.line_ends[line - 1]] in '\n\r':
col -= 1
return line + 1, col + 1
def context(self, length=None, position=None):
"""
Returns current context substring, i.e. the substring around current
position.
Args:
length(int): If given used to mark with asterisk a length chars
from the current position.
position(int): The position in the input stream.
"""
if not position:
position = self.position
if length:
retval = "{}*{}*{}".format(
text(self.input[max(position - 10, 0):position]),
text(self.input[position:position + length]),
text(self.input[position + length:position + 10]))
else:
retval = "{}*{}".format(
text(self.input[max(position - 10, 0):position]),
text(self.input[position:position + 10]))
return retval.replace('\n', ' ').replace('\r', '')
def _nm_raise(self, *args):
"""
Register new NoMatch object if the input is consumed
from the last NoMatch and raise last NoMatch.
Args:
args: A NoMatch instance or (value, position, parser)
"""
rule, position, parser = args
if self.nm is None or not parser.in_parse_comments:
if self.nm is None or position > self.nm.position:
if self.in_not:
self.nm = NoMatch([Parser.FIRST_NOT], position, parser)
else:
self.nm = NoMatch([rule], position, parser)
elif position == self.nm.position and isinstance(rule, Match) \
and not self.in_not:
self.nm.rules.append(rule)
raise self.nm
def _clear_caches(self):
"""
Clear memoization caches if packrat parser is used.
"""
self.parser_model._clear_cache()
if self.comments_model:
self.comments_model._clear_cache()
class CrossRef(object):
'''
Used for rule reference resolving.
'''
def __init__(self, target_rule_name, position=-1):
self.target_rule_name = target_rule_name
self.position = position
class ParserPython(Parser):
def __init__(self, language_def, comment_def=None, syntax_classes=None,
*args, **kwargs):
"""
Constructs parser from python statements and expressions.
Args:
language_def (python function): A python function that defines
the root rule of the grammar.
comment_def (python function): A python function that defines
the root rule of the comments grammar.
syntax_classes (dict): Overrides of special syntax parser
expression classes (StrMatch, Sequence, OrderedChoice).
"""
super(ParserPython, self).__init__(*args, **kwargs)
self.syntax_classes = syntax_classes if syntax_classes else {}
# PEG Abstract Syntax Graph
self.parser_model = self._from_python(language_def)
self.comments_model = None
if comment_def:
self.comments_model = self._from_python(comment_def)
self.comments_model.root = True
self.comments_model.rule_name = comment_def.__name__
# In debug mode export parser model to dot for
# visualization
if self.debug:
from arpeggio.export import PMDOTExporter
root_rule = language_def.__name__
PMDOTExporter().exportFile(self.parser_model,
"{}_parser_model.dot".format(root_rule))
def _parse(self):
return self.parser_model.parse(self)
def _from_python(self, expression):
"""
Create parser model from the definition given in the form of python
functions returning lists, tuples, callables, strings and
ParsingExpression objects.
Returns:
Parser Model (PEG Abstract Semantic Graph)
"""
__rule_cache = {"EndOfFile": EndOfFile()}
__for_resolving = [] # Expressions that needs crossref resolvnih
self.__cross_refs = 0
_StrMatch = self.syntax_classes.get('StrMatch', StrMatch)
_OrderedChoice = self.syntax_classes.get('OrderedChoice',
OrderedChoice)
_Sequence = self.syntax_classes.get('Sequence', Sequence)
def inner_from_python(expression):
retval = None
if isinstance(expression, types.FunctionType):
# If this expression is a parser rule
rule_name = expression.__name__
if rule_name in __rule_cache:
c_rule = __rule_cache.get(rule_name)
if self.debug:
self.dprint("Rule {} founded in cache."
.format(rule_name))
if isinstance(c_rule, CrossRef):
self.__cross_refs += 1
if self.debug:
self.dprint("CrossRef usage: {}"
.format(c_rule.target_rule_name))
return c_rule
# Semantic action for the rule
if hasattr(expression, "sem"):
self.sem_actions[rule_name] = expression.sem
# Register rule cross-ref to support recursion
__rule_cache[rule_name] = CrossRef(rule_name)
curr_expr = expression
while isinstance(curr_expr, types.FunctionType):
# If function directly returns another function
# go into until non-function is returned.
curr_expr = curr_expr()
retval = inner_from_python(curr_expr)
retval.rule_name = rule_name
retval.root = True
# Update cache
__rule_cache[rule_name] = retval
if self.debug:
self.dprint("New rule: {} -> {}"
.format(rule_name, retval.__class__.__name__))
elif type(expression) is text or isinstance(expression, _StrMatch):
if type(expression) is text:
retval = _StrMatch(expression,
ignore_case=self.ignore_case)
else:
retval = expression
if expression.ignore_case is None:
expression.ignore_case = self.ignore_case
if self.autokwd:
to_match = retval.to_match
match = self.keyword_regex.match(to_match)
if match and match.span() == (0, len(to_match)):
retval = RegExMatch(r'{}\b'.format(to_match),
ignore_case=self.ignore_case,
str_repr=to_match)
retval.compile()
elif isinstance(expression, RegExMatch):
# Regular expression are not compiled yet
# to support global settings propagation from
# parser.
if expression.ignore_case is None:
expression.ignore_case = self.ignore_case
expression.compile()
retval = expression
elif isinstance(expression, Match):
retval = expression
elif isinstance(expression, UnorderedGroup):
retval = expression
for n in retval.elements:
retval.nodes.append(inner_from_python(n))
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
elif isinstance(expression, _Sequence) or \
isinstance(expression, Repetition) or \
isinstance(expression, SyntaxPredicate) or \
isinstance(expression, Decorator):
retval = expression
retval.nodes.append(inner_from_python(retval.elements))
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
elif type(expression) in [list, tuple]:
if type(expression) is list:
retval = _OrderedChoice(expression)
else:
retval = _Sequence(expression)
retval.nodes = [inner_from_python(e) for e in expression]
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
else:
raise GrammarError("Unrecognized grammar element '%s'." %
text(expression))
# Translate separator expression.
if isinstance(expression, Repetition) and expression.sep:
expression.sep = inner_from_python(expression.sep)
return retval
# Cross-ref resolving
def resolve():
for e in __for_resolving:
for i, node in enumerate(e.nodes):
if isinstance(node, CrossRef):
self.__cross_refs -= 1
e.nodes[i] = __rule_cache[node.target_rule_name]
parser_model = inner_from_python(expression)
resolve()
assert self.__cross_refs == 0, "Not all crossrefs are resolved!"
return parser_model
def errors(self):
pass | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/app-route/.github/ISSUE_TEMPLATE.md | <!-- Instructions: https://github.com/PolymerElements/app-route/CONTRIBUTING.md#filing-issues -->
### Description
<!-- Example: The `paper-foo` element causes the page to turn pink when clicked. -->
### Expected outcome
<!-- Example: The page stays the same color. -->
### Actual outcome
<!-- Example: The page turns pink. -->
### Live Demo
<!-- Example: https://jsbin.com/hirore/edit?html,output -->
### Steps to reproduce
<!-- Example
1. Put a `paper-foo` element in the page.
2. Open the page in a web browser.
3. Click the `paper-foo` element.
-->
### Browsers Affected
<!-- Check all that apply -->
- [ ] Chrome
- [ ] Firefox
- [ ] Safari 9
- [ ] Safari 8
- [ ] Safari 7
- [ ] Edge
- [ ] IE 11
- [ ] IE 10
| PypiClean |
/FlaskBB-2.0.2.tar.gz/FlaskBB-2.0.2/flaskbb/management/views.py | import logging
import sys
from celery import __version__ as celery_version
from flask import __version__ as flask_version
from flask import (Blueprint, current_app, flash, jsonify, redirect, request,
url_for)
from flask.views import MethodView
from flask_allows import Not, Permission
from flask_babelplus import gettext as _
from flask_login import current_user, login_fresh
from pluggy import HookimplMarker
from flaskbb import __version__ as flaskbb_version
from flaskbb.extensions import allows, celery, db
from flaskbb.forum.forms import UserSearchForm
from flaskbb.forum.models import Category, Forum, Post, Report, Topic
from flaskbb.management.forms import (AddForumForm, AddGroupForm, AddUserForm,
CategoryForm, EditForumForm,
EditGroupForm, EditUserForm)
from flaskbb.management.models import Setting, SettingsGroup
from flaskbb.plugins.models import PluginRegistry, PluginStore
from flaskbb.plugins.utils import validate_plugin
from flaskbb.user.models import Group, Guest, User
from flaskbb.utils.forms import populate_settings_dict, populate_settings_form
from flaskbb.utils.helpers import (get_online_users, register_view,
render_template, time_diff, time_utcnow)
from flaskbb.utils.requirements import (CanBanUser, CanEditUser, IsAdmin,
IsAtleastModerator,
IsAtleastSuperModerator)
from flaskbb.utils.settings import flaskbb_config
impl = HookimplMarker('flaskbb')
logger = logging.getLogger(__name__)
class ManagementSettings(MethodView):
decorators = [allows.requires(IsAdmin)]
def _determine_active_settings(self, slug, plugin):
"""Determines which settings are active.
Returns a tuple in following order:
``form``, ``old_settings``, ``plugin_obj``, ``active_nav``
"""
# Any ideas how to do this better?
slug = slug if slug else 'general'
active_nav = {} # used to build the navigation
plugin_obj = None
if plugin is not None:
plugin_obj = PluginRegistry.query.filter_by(name=plugin
).first_or_404()
active_nav.update(
{
'key': plugin_obj.name,
'title': plugin_obj.name.title()
}
)
form = plugin_obj.get_settings_form()
old_settings = plugin_obj.settings
elif slug is not None:
group_obj = SettingsGroup.query.filter_by(key=slug).first_or_404()
active_nav.update({'key': group_obj.key, 'title': group_obj.name})
form = Setting.get_form(group_obj)()
old_settings = Setting.get_settings(group_obj)
return form, old_settings, plugin_obj, active_nav
def get(self, slug=None, plugin=None):
form, old_settings, plugin_obj, active_nav = \
self._determine_active_settings(slug, plugin)
# get all groups and plugins - used to build the navigation
all_groups = SettingsGroup.query.all()
all_plugins = PluginRegistry.query.filter(db.and_(
PluginRegistry.values != None,
PluginRegistry.enabled == True
)).all()
form = populate_settings_form(form, old_settings)
return render_template(
"management/settings.html",
form=form,
all_groups=all_groups,
all_plugins=all_plugins,
active_nav=active_nav
)
def post(self, slug=None, plugin=None):
form, old_settings, plugin_obj, active_nav = \
self._determine_active_settings(slug, plugin)
all_groups = SettingsGroup.query.all()
all_plugins = PluginRegistry.query.filter(db.and_(
PluginRegistry.values != None,
PluginRegistry.enabled == True
)).all()
if form.validate_on_submit():
new_settings = populate_settings_dict(form, old_settings)
if plugin_obj is not None:
plugin_obj.update_settings(new_settings)
else:
Setting.update(settings=new_settings, app=current_app)
flash(_("Settings saved."), "success")
return render_template(
"management/settings.html",
form=form,
all_groups=all_groups,
all_plugins=all_plugins,
active_nav=active_nav
)
class ManageUsers(MethodView):
decorators = [allows.requires(IsAtleastModerator)]
form = UserSearchForm
def get(self):
page = request.args.get('page', 1, type=int)
form = self.form()
users = User.query.order_by(User.id.asc()).paginate(
page, flaskbb_config['USERS_PER_PAGE'], False
)
return render_template(
'management/users.html', users=users, search_form=form
)
def post(self):
page = request.args.get('page', 1, type=int)
form = self.form()
if form.validate():
users = form.get_results().\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template(
'management/users.html', users=users, search_form=form
)
users = User.query.order_by(User.id.asc()).paginate(
page, flaskbb_config['USERS_PER_PAGE'], False
)
return render_template(
'management/users.html', users=users, search_form=form
)
class EditUser(MethodView):
decorators = [allows.requires(IsAtleastModerator & CanEditUser)]
form = EditUserForm
def get(self, user_id):
user = User.query.filter_by(id=user_id).first_or_404()
form = self.form(user)
member_group = db.and_(
* [
db.not_(getattr(Group, p))
for p in ['admin', 'mod', 'super_mod', 'banned', 'guest']
]
)
filt = db.or_(
Group.id.in_(g.id for g in current_user.groups), member_group
)
if Permission(IsAtleastSuperModerator, identity=current_user):
filt = db.or_(filt, Group.mod)
if Permission(IsAdmin, identity=current_user):
filt = db.or_(filt, Group.admin, Group.super_mod)
if Permission(CanBanUser, identity=current_user):
filt = db.or_(filt, Group.banned)
group_query = Group.query.filter(filt)
form.primary_group.query = group_query
form.secondary_groups.query = group_query
return render_template(
'management/user_form.html', form=form, title=_('Edit User')
)
def post(self, user_id):
user = User.query.filter_by(id=user_id).first_or_404()
member_group = db.and_(
* [
db.not_(getattr(Group, p))
for p in ['admin', 'mod', 'super_mod', 'banned', 'guest']
]
)
filt = db.or_(
Group.id.in_(g.id for g in current_user.groups), member_group
)
if Permission(IsAtleastSuperModerator, identity=current_user):
filt = db.or_(filt, Group.mod)
if Permission(IsAdmin, identity=current_user):
filt = db.or_(filt, Group.admin, Group.super_mod)
if Permission(CanBanUser, identity=current_user):
filt = db.or_(filt, Group.banned)
group_query = Group.query.filter(filt)
form = EditUserForm(user)
form.primary_group.query = group_query
form.secondary_groups.query = group_query
if form.validate_on_submit():
form.populate_obj(user)
user.primary_group_id = form.primary_group.data.id
# Don't override the password
if form.password.data:
user.password = form.password.data
user.save(groups=form.secondary_groups.data)
flash(_('User updated.'), 'success')
return redirect(url_for('management.edit_user', user_id=user.id))
return render_template(
'management/user_form.html', form=form, title=_('Edit User')
)
class DeleteUser(MethodView):
decorators = [allows.requires(IsAdmin)]
def post(self, user_id=None):
# ajax request
if request.is_xhr:
ids = request.get_json()["ids"]
data = []
for user in User.query.filter(User.id.in_(ids)).all():
# do not delete current user
if current_user.id == user.id:
continue
if user.delete():
data.append(
{
"id": user.id,
"type": "delete",
"reverse": False,
"reverse_name": None,
"reverse_url": None
}
)
return jsonify(
message="{} users deleted.".format(len(data)),
category="success",
data=data,
status=200
)
user = User.query.filter_by(id=user_id).first_or_404()
if current_user.id == user.id:
flash(_("You cannot delete yourself.", "danger"))
return redirect(url_for("management.users"))
user.delete()
flash(_("User deleted."), "success")
return redirect(url_for("management.users"))
class AddUser(MethodView):
decorators = [allows.requires(IsAdmin)]
form = AddUserForm
def get(self):
return render_template(
'management/user_form.html', form=self.form(), title=_('Add User')
)
def post(self):
form = self.form()
if form.validate_on_submit():
form.save()
flash(_('User added.'), 'success')
return redirect(url_for('management.users'))
return render_template(
'management/user_form.html', form=form, title=_('Add User')
)
class BannedUsers(MethodView):
decorators = [allows.requires(IsAtleastModerator)]
form = UserSearchForm
def get(self):
page = request.args.get('page', 1, type=int)
search_form = self.form()
users = User.query.filter(
Group.banned == True, Group.id == User.primary_group_id
).paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template(
'management/banned_users.html',
users=users,
search_form=search_form
)
def post(self):
page = request.args.get('page', 1, type=int)
search_form = self.form()
users = User.query.filter(
Group.banned == True, Group.id == User.primary_group_id
).paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
if search_form.validate():
users = search_form.get_results().\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template(
'management/banned_users.html',
users=users,
search_form=search_form
)
return render_template(
'management/banned_users.html',
users=users,
search_form=search_form
)
class BanUser(MethodView):
decorators = [allows.requires(IsAtleastModerator)]
def post(self, user_id=None):
if not Permission(CanBanUser, identity=current_user):
flash(
_("You do not have the permissions to ban this user."),
"danger"
)
return redirect(url_for("management.overview"))
# ajax request
if request.is_xhr:
ids = request.get_json()["ids"]
data = []
users = User.query.filter(User.id.in_(ids)).all()
for user in users:
# don't let a user ban himself and do not allow a moderator
# to ban a admin user
if (current_user.id == user.id or
Permission(IsAdmin, identity=user) and
Permission(Not(IsAdmin), current_user)):
continue
elif user.ban():
data.append(
{
"id":
user.id,
"type":
"ban",
"reverse":
"unban",
"reverse_name":
_("Unban"),
"reverse_url":
url_for("management.unban_user", user_id=user.id)
}
)
return jsonify(
message="{} users banned.".format(len(data)),
category="success",
data=data,
status=200
)
user = User.query.filter_by(id=user_id).first_or_404()
# Do not allow moderators to ban admins
if Permission(IsAdmin, identity=user) and Permission(
Not(IsAdmin), identity=current_user):
flash(_("A moderator cannot ban an admin user."), "danger")
return redirect(url_for("management.overview"))
if not current_user.id == user.id and user.ban():
flash(_("User is now banned."), "success")
else:
flash(_("Could not ban user."), "danger")
return redirect(url_for("management.banned_users"))
class UnbanUser(MethodView):
decorators = [allows.requires(IsAtleastModerator)]
def post(self, user_id=None):
if not Permission(CanBanUser, identity=current_user):
flash(
_("You do not have the permissions to unban this user."),
"danger"
)
return redirect(url_for("management.overview"))
# ajax request
if request.is_xhr:
ids = request.get_json()["ids"]
data = []
for user in User.query.filter(User.id.in_(ids)).all():
if user.unban():
data.append(
{
"id": user.id,
"type": "unban",
"reverse": "ban",
"reverse_name": _("Ban"),
"reverse_url": url_for("management.ban_user",
user_id=user.id)
}
)
return jsonify(
message="{} users unbanned.".format(len(data)),
category="success",
data=data,
status=200
)
user = User.query.filter_by(id=user_id).first_or_404()
if user.unban():
flash(_("User is now unbanned."), "success")
else:
flash(_("Could not unban user."), "danger")
return redirect(url_for("management.banned_users"))
class Groups(MethodView):
decorators = [allows.requires(IsAdmin)]
def get(self):
page = request.args.get("page", 1, type=int)
groups = Group.query.\
order_by(Group.id.asc()).\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/groups.html", groups=groups)
class AddGroup(MethodView):
decorators = [allows.requires(IsAdmin)]
form = AddGroupForm
def get(self):
return render_template(
'management/group_form.html',
form=self.form(),
title=_('Add Group')
)
def post(self):
form = AddGroupForm()
if form.validate_on_submit():
form.save()
flash(_('Group added.'), 'success')
return redirect(url_for('management.groups'))
return render_template(
'management/group_form.html', form=form, title=_('Add Group')
)
class EditGroup(MethodView):
decorators = [allows.requires(IsAdmin)]
form = EditGroupForm
def get(self, group_id):
group = Group.query.filter_by(id=group_id).first_or_404()
form = self.form(group)
return render_template(
'management/group_form.html', form=form, title=_('Edit Group')
)
def post(self, group_id):
group = Group.query.filter_by(id=group_id).first_or_404()
form = EditGroupForm(group)
if form.validate_on_submit():
form.populate_obj(group)
group.save()
if group.guest:
Guest.invalidate_cache()
flash(_('Group updated.'), 'success')
return redirect(url_for('management.groups', group_id=group.id))
return render_template(
'management/group_form.html', form=form, title=_('Edit Group')
)
class DeleteGroup(MethodView):
decorators = [allows.requires(IsAdmin)]
def post(self, group_id=None):
if request.is_xhr:
ids = request.get_json()["ids"]
# TODO: Get rid of magic numbers
if not (set(ids) & set(["1", "2", "3", "4", "5", "6"])):
data = []
for group in Group.query.filter(Group.id.in_(ids)).all():
group.delete()
data.append(
{
"id": group.id,
"type": "delete",
"reverse": False,
"reverse_name": None,
"reverse_url": None
}
)
return jsonify(
message="{} groups deleted.".format(len(data)),
category="success",
data=data,
status=200
)
return jsonify(
message=_("You cannot delete one of the standard groups."),
category="danger",
data=None,
status=404
)
if group_id is not None:
if group_id <= 6: # there are 6 standard groups
flash(
_(
"You cannot delete the standard groups. "
"Try renaming it instead.", "danger"
)
)
return redirect(url_for("management.groups"))
group = Group.query.filter_by(id=group_id).first_or_404()
group.delete()
flash(_("Group deleted."), "success")
return redirect(url_for("management.groups"))
flash(_("No group chosen."), "danger")
return redirect(url_for("management.groups"))
class Forums(MethodView):
decorators = [allows.requires(IsAdmin)]
def get(self):
categories = Category.query.order_by(Category.position.asc()).all()
return render_template("management/forums.html", categories=categories)
class EditForum(MethodView):
decorators = [allows.requires(IsAdmin)]
form = EditForumForm
def get(self, forum_id):
forum = Forum.query.filter_by(id=forum_id).first_or_404()
form = self.form(forum)
if forum.moderators:
form.moderators.data = ','.join(
[user.username for user in forum.moderators]
)
else:
form.moderators.data = None
return render_template(
'management/forum_form.html', form=form, title=_('Edit Forum')
)
def post(self, forum_id):
forum = Forum.query.filter_by(id=forum_id).first_or_404()
form = self.form(forum)
if form.validate_on_submit():
form.save()
flash(_('Forum updated.'), 'success')
return redirect(url_for('management.edit_forum',
forum_id=forum.id))
else:
if forum.moderators:
form.moderators.data = ','.join(
[user.username for user in forum.moderators]
)
else:
form.moderators.data = None
return render_template(
'management/forum_form.html', form=form, title=_('Edit Forum')
)
class AddForum(MethodView):
decorators = [allows.requires(IsAdmin)]
form = AddForumForm
def get(self, category_id=None):
form = self.form()
form.groups.data = Group.query.order_by(Group.id.asc()).all()
if category_id:
category = Category.query.filter_by(id=category_id).first()
form.category.data = category
return render_template(
'management/forum_form.html', form=form, title=_('Add Forum')
)
def post(self, category_id=None):
form = self.form()
if form.validate_on_submit():
form.save()
flash(_('Forum added.'), 'success')
return redirect(url_for('management.forums'))
else:
form.groups.data = Group.query.order_by(Group.id.asc()).all()
if category_id:
category = Category.query.filter_by(id=category_id).first()
form.category.data = category
return render_template(
'management/forum_form.html', form=form, title=_('Add Forum')
)
class DeleteForum(MethodView):
decorators = [allows.requires(IsAdmin)]
def post(self, forum_id):
forum = Forum.query.filter_by(id=forum_id).first_or_404()
involved_users = User.query.filter(
Topic.forum_id == forum.id, Post.user_id == User.id
).all()
forum.delete(involved_users)
flash(_("Forum deleted."), "success")
return redirect(url_for("management.forums"))
class AddCategory(MethodView):
decorators = [allows.requires(IsAdmin)]
form = CategoryForm
def get(self):
return render_template(
'management/category_form.html',
form=self.form(),
title=_('Add Category')
)
def post(self):
form = self.form()
if form.validate_on_submit():
form.save()
flash(_('Category added.'), 'success')
return redirect(url_for('management.forums'))
return render_template(
'management/category_form.html', form=form, title=_('Add Category')
)
class EditCategory(MethodView):
decorators = [allows.requires(IsAdmin)]
form = CategoryForm
def get(self, category_id):
category = Category.query.filter_by(id=category_id).first_or_404()
form = self.form(obj=category)
return render_template(
'management/category_form.html',
form=form,
title=_('Edit Category')
)
def post(self, category_id):
category = Category.query.filter_by(id=category_id).first_or_404()
form = self.form(obj=category)
if form.validate_on_submit():
form.populate_obj(category)
flash(_('Category updated.'), 'success')
category.save()
return render_template(
'management/category_form.html',
form=form,
title=_('Edit Category')
)
class DeleteCategory(MethodView):
decorators = [allows.requires(IsAdmin)]
def post(self, category_id):
category = Category.query.filter_by(id=category_id).first_or_404()
involved_users = User.query.filter(
Forum.category_id == category.id, Topic.forum_id == Forum.id,
Post.user_id == User.id
).all()
category.delete(involved_users)
flash(_("Category with all associated forums deleted."), "success")
return redirect(url_for("management.forums"))
class Reports(MethodView):
decorators = [allows.requires(IsAtleastModerator)]
def get(self):
page = request.args.get("page", 1, type=int)
reports = Report.query.\
order_by(Report.id.asc()).\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/reports.html", reports=reports)
class UnreadReports(MethodView):
decorators = [allows.requires(IsAtleastModerator)]
def get(self):
page = request.args.get("page", 1, type=int)
reports = Report.query.\
filter(Report.zapped == None).\
order_by(Report.id.desc()).\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/reports.html", reports=reports)
class MarkReportRead(MethodView):
decorators = [allows.requires(IsAtleastModerator)]
def post(self, report_id=None):
# AJAX request
if request.is_xhr:
ids = request.get_json()["ids"]
data = []
for report in Report.query.filter(Report.id.in_(ids)).all():
report.zapped_by = current_user.id
report.zapped = time_utcnow()
report.save()
data.append(
{
"id": report.id,
"type": "read",
"reverse": False,
"reverse_name": None,
"reverse_url": None
}
)
return jsonify(
message="{} reports marked as read.".format(len(data)),
category="success",
data=data,
status=200
)
# mark single report as read
if report_id:
report = Report.query.filter_by(id=report_id).first_or_404()
if report.zapped:
flash(
_("Report %(id)s is already marked as read.", id=report.id),
"success"
)
return redirect(url_for("management.reports"))
report.zapped_by = current_user.id
report.zapped = time_utcnow()
report.save()
flash(_("Report %(id)s marked as read.", id=report.id), "success")
return redirect(url_for("management.reports"))
# mark all as read
reports = Report.query.filter(Report.zapped == None).all()
report_list = []
for report in reports:
report.zapped_by = current_user.id
report.zapped = time_utcnow()
report_list.append(report)
db.session.add_all(report_list)
db.session.commit()
flash(_("All reports were marked as read."), "success")
return redirect(url_for("management.reports"))
class DeleteReport(MethodView):
decorators = [allows.requires(IsAtleastModerator)]
def post(self, report_id=None):
if request.is_xhr:
ids = request.get_json()["ids"]
data = []
for report in Report.query.filter(Report.id.in_(ids)).all():
if report.delete():
data.append(
{
"id": report.id,
"type": "delete",
"reverse": False,
"reverse_name": None,
"reverse_url": None
}
)
return jsonify(
message="{} reports deleted.".format(len(data)),
category="success",
data=data,
status=200
)
report = Report.query.filter_by(id=report_id).first_or_404()
report.delete()
flash(_("Report deleted."), "success")
return redirect(url_for("management.reports"))
class CeleryStatus(MethodView):
decorators = [allows.requires(IsAtleastModerator)]
def get(self):
celery_inspect = celery.control.inspect()
try:
celery_running = True if celery_inspect.ping() else False
except Exception:
# catching Exception is bad, and just catching ConnectionError
# from redis is also bad because you can run celery with other
# brokers as well.
celery_running = False
return jsonify(celery_running=celery_running, status=200)
class ManagementOverview(MethodView):
decorators = [allows.requires(IsAtleastModerator)]
def get(self):
# user and group stats
banned_users = User.query.filter(
Group.banned == True, Group.id == User.primary_group_id
).count()
if not current_app.config["REDIS_ENABLED"]:
online_users = User.query.filter(User.lastseen >= time_diff()
).count()
else:
online_users = len(get_online_users())
unread_reports = Report.query.\
filter(Report.zapped == None).\
order_by(Report.id.desc()).\
count()
python_version = "{}.{}.{}".format(
sys.version_info[0], sys.version_info[1], sys.version_info[2]
)
stats = {
"current_app": current_app,
"unread_reports": unread_reports,
# stats stats
"all_users": User.query.count(),
"banned_users": banned_users,
"online_users": online_users,
"all_groups": Group.query.count(),
"report_count": Report.query.count(),
"topic_count": Topic.query.count(),
"post_count": Post.query.count(),
# components
"python_version": python_version,
"celery_version": celery_version,
"flask_version": flask_version,
"flaskbb_version": flaskbb_version,
# plugins
"plugins": PluginRegistry.query.all()
}
return render_template("management/overview.html", **stats)
class PluginsView(MethodView):
decorators = [allows.requires(IsAdmin)]
def get(self):
plugins = PluginRegistry.query.all()
return render_template("management/plugins.html", plugins=plugins)
class EnablePlugin(MethodView):
decorators = [allows.requires(IsAdmin)]
def post(self, name):
validate_plugin(name)
plugin = PluginRegistry.query.filter_by(name=name).first_or_404()
if plugin.enabled:
flash(
_("Plugin %(plugin)s is already enabled.", plugin=plugin.name),
"info"
)
return redirect(url_for("management.plugins"))
plugin.enabled = True
plugin.save()
flash(
_(
"Plugin %(plugin)s enabled. Please restart FlaskBB now.",
plugin=plugin.name
), "success"
)
return redirect(url_for("management.plugins"))
class DisablePlugin(MethodView):
decorators = [allows.requires(IsAdmin)]
def post(self, name):
validate_plugin(name)
plugin = PluginRegistry.query.filter_by(name=name).first_or_404()
if not plugin.enabled:
flash(
_("Plugin %(plugin)s is already disabled.", plugin=plugin.name),
"info"
)
return redirect(url_for("management.plugins"))
plugin.enabled = False
plugin.save()
flash(
_(
"Plugin %(plugin)s disabled. Please restart FlaskBB now.",
plugin=plugin.name
), "success"
)
return redirect(url_for("management.plugins"))
class UninstallPlugin(MethodView):
decorators = [allows.requires(IsAdmin)]
def post(self, name):
validate_plugin(name)
plugin = PluginRegistry.query.filter_by(name=name).first_or_404()
PluginStore.query.filter_by(plugin_id=plugin.id).delete()
db.session.commit()
flash(_("Plugin has been uninstalled."), "success")
return redirect(url_for("management.plugins"))
class InstallPlugin(MethodView):
decorators = [allows.requires(IsAdmin)]
def post(self, name):
plugin_module = validate_plugin(name)
plugin = PluginRegistry.query.filter_by(name=name).first_or_404()
if not plugin.enabled:
flash(
_(
"Can't install plugin. Enable '%(plugin)s' plugin first.",
plugin=plugin.name
), "danger"
)
return redirect(url_for("management.plugins"))
plugin.add_settings(plugin_module.SETTINGS)
flash(_("Plugin has been installed."), "success")
return redirect(url_for("management.plugins"))
@impl(tryfirst=True)
def flaskbb_load_blueprints(app):
management = Blueprint("management", __name__)
@management.before_request
def check_fresh_login():
"""Checks if the login is fresh for the current user, otherwise the user
has to reauthenticate."""
if not login_fresh():
return current_app.login_manager.needs_refresh()
# Categories
register_view(
management,
routes=['/category/add'],
view_func=AddCategory.as_view('add_category')
)
register_view(
management,
routes=["/category/<int:category_id>/delete"],
view_func=DeleteCategory.as_view('delete_category')
)
register_view(
management,
routes=['/category/<int:category_id>/edit'],
view_func=EditCategory.as_view('edit_category')
)
# Forums
register_view(
management,
routes=['/forums/add', '/forums/<int:category_id>/add'],
view_func=AddForum.as_view('add_forum')
)
register_view(
management,
routes=['/forums/<int:forum_id>/delete'],
view_func=DeleteForum.as_view('delete_forum')
)
register_view(
management,
routes=['/forums/<int:forum_id>/edit'],
view_func=EditForum.as_view('edit_forum')
)
register_view(
management, routes=['/forums'], view_func=Forums.as_view('forums')
)
# Groups
register_view(
management,
routes=['/groups/add'],
view_func=AddGroup.as_view('add_group')
)
register_view(
management,
routes=['/groups/<int:group_id>/delete', '/groups/delete'],
view_func=DeleteGroup.as_view('delete_group')
)
register_view(
management,
routes=['/groups/<int:group_id>/edit'],
view_func=EditGroup.as_view('edit_group')
)
register_view(
management, routes=['/groups'], view_func=Groups.as_view('groups')
)
# Plugins
register_view(
management,
routes=['/plugins/<path:name>/disable'],
view_func=DisablePlugin.as_view('disable_plugin')
)
register_view(
management,
routes=['/plugins/<path:name>/enable'],
view_func=EnablePlugin.as_view('enable_plugin')
)
register_view(
management,
routes=['/plugins/<path:name>/install'],
view_func=InstallPlugin.as_view('install_plugin')
)
register_view(
management,
routes=['/plugins/<path:name>/uninstall'],
view_func=UninstallPlugin.as_view('uninstall_plugin')
)
register_view(
management,
routes=['/plugins'],
view_func=PluginsView.as_view('plugins')
)
# Reports
register_view(
management,
routes=['/reports/<int:report_id>/delete', '/reports/delete'],
view_func=DeleteReport.as_view('delete_report')
)
register_view(
management,
routes=['/reports/<int:report_id>/markread', '/reports/markread'],
view_func=MarkReportRead.as_view('report_markread')
)
register_view(
management,
routes=['/reports/unread'],
view_func=UnreadReports.as_view('unread_reports')
)
register_view(
management, routes=['/reports'], view_func=Reports.as_view('reports')
)
# Settings
register_view(
management,
routes=[
'/settings', '/settings/<path:slug>',
'/settings/plugin/<path:plugin>'
],
view_func=ManagementSettings.as_view('settings')
)
# Users
register_view(
management,
routes=['/users/add'],
view_func=AddUser.as_view('add_user')
)
register_view(
management,
routes=['/users/banned'],
view_func=BannedUsers.as_view('banned_users')
)
register_view(
management,
routes=['/users/ban', '/users/<int:user_id>/ban'],
view_func=BanUser.as_view('ban_user')
)
register_view(
management,
routes=['/users/delete', '/users/<int:user_id>/delete'],
view_func=DeleteUser.as_view('delete_user')
)
register_view(
management,
routes=['/users/<int:user_id>/edit'],
view_func=EditUser.as_view('edit_user')
)
register_view(
management,
routes=['/users/unban', '/users/<int:user_id>/unban'],
view_func=UnbanUser.as_view('unban_user')
)
register_view(
management, routes=['/users'], view_func=ManageUsers.as_view('users')
)
register_view(
management,
routes=['/celerystatus'],
view_func=CeleryStatus.as_view('celery_status')
)
register_view(
management,
routes=['/'],
view_func=ManagementOverview.as_view('overview')
)
app.register_blueprint(
management, url_prefix=app.config["ADMIN_URL_PREFIX"]
) | PypiClean |
/flatlat-2.0.0.tar.gz/flatlat-2.0.0/src/Lat2D_mattb242/Lat2Dv2.py | import numpy as np
import pandas as pd
import csv
import copy
from math import atan2
from math import degrees
import matplotlib
import matplotlib.pyplot as plt
import tenpy
matplotlib.rcParams['text.usetex'] = True
'-----------------------------------------------------'
'UTILITY FUNCTIONS'
'-----------------------------------------------------'
'General L_q distance calculator for vectors in R_2'
def distgen(q, v_1, v_2):
if q == 0:
return max(np.abs(v_1[0] - v_2[0]), np.abs(v_1[1] - v_2[1]))
else:
return ((np.abs(v_1[0] - v_2[0])**q) + (np.abs(v_1[1] - v_2[1])**q))**(1/q)
'Cycle any list so that its least member is first'
def mincyc(l):
top = l.index(min(l))
return l[top:] + l[:top]
'Round a list of floats to some defined level (default = 2dp)'
def roundlist(l, r=2):
rl = []
for i in l:
rl.append(round(i,r))
return rl
'Calculate M_inf quantity (see maths paper, Lemma C.2)'
def minf(a,b,c,d):
return max([np.abs(a-b), np.abs(c-d), (np.abs(a+b-c+d))/2])
'Make a pair of 2D Lattice vectors from length and angle parameters (in degrees)'
def makelat (a, b, t):
trad = np.deg2rad(t)
return Lat2d([a, 0], [b*np.cos(trad), b*np.sin(trad)])
'Given a list of quantities, return a list consisting of the index each quantity'
'would be at in the sorted version of the list'
def index_sorted(l):
scomp = sorted(l.copy())
indl = []
countlist = []
for i in l:
shift = countlist.count(i)
indl.append(scomp.index(i) + shift)
countlist.append(i)
return indl
'Order a list of vectors by length'
def len_order(l):
poslist = index_sorted([np.linalg.norm(i) for i in l])
ordlist = []
for i in range(0,max(poslist)+1):
ordlist.append(l[poslist.index(i)])
return ordlist
'Given an obtuse superbase, calculate the lattice sign from angles'
def sb_sign_old(veclist):
u_1 = veclist[1]/np.linalg.norm(veclist[1])
u_0 = veclist[0]/np.linalg.norm(veclist[0])
u_2 = veclist[2]/np.linalg.norm(veclist[2])
ang_1 = degrees(atan2(u_1[1], u_1[0])) % 360
ang_0 = degrees(atan2(u_0[1], u_0[0])) % 360
ang_2 = degrees(atan2(u_2[1], u_2[0])) % 360
ang_12 = (ang_2 - ang_1) % 360
ang_10 = (ang_0 - ang_1) % 360
if ang_12 < ang_10:
sgn = 1
else:
sgn = -1
return sgn
'Given an obtuse superbase as a list of vectors, calculate the lattice sign from determinant'
def sb_sign(l):
latm = np.array(len_order(l)[:2])
if np.linalg.det(latm) < 0:
return -1
elif np.linalg.det(latm) > 0:
return 1
else:
return 0
'----------------------------------------------------'
'LATTICE CLASS IN 2D WITH ROOT FORM GENERATOR'
'----------------------------------------------------'
'Basic Lattice Class - takes two input vectors in R2'
class Lat2d:
def __init__(self, vec_1, vec_2):
self.x = np.array(vec_1)
self.y = np.array(vec_2)
self.ob = -(self.x + self.y)
self.xlen = np.linalg.norm(vec_1)
self.ylen = np.linalg.norm(vec_2)
self.oblen = np.linalg.norm(self.ob)
self.inner = np.dot(vec_1, vec_2)
self.angle = np.rad2deg(np.arccos(self.inner/(self.xlen * self.ylen)))
'Return a lattice in terms of its length and angle parameters'
def param_lat(self):
return([self.xlen, self.ylen, self.angle])
'Reduce any input lattice to its obtuse superbase'
def make_obsb(self, step = False):
v_1 = self.x
v_2 = self.y
v_0 = self.ob
inners = [-np.dot(v_1, v_2), -np.dot(v_0, v_1), -np.dot(v_0, v_2)]
stepcount = 0
if min(inners) >= 0:
return ([[v_0, v_1, v_2], stepcount])
else:
while min(inners) < 0:
if step:
print('Step ' + repr(stepcount) +':')
print('Superbase vectors: ' + repr([[v_0, v_1,v_2]]))
print('Inner products: ' + repr(inners))
input('Press Enter to Run a Reduction Step: ')
if inners[0] < 0:
v_0 = v_1 - v_2
v_1 = -v_1
elif inners[1] < 0:
v_2 = v_0 - v_1
v_0 = -v_0
else:
v_1 = v_0 - v_2
v_0 = -v_0
inners = [-np.dot(v_1, v_2), -np.dot(v_0, v_1), -np.dot(v_0, v_2)]
stepcount +=1
return ([[v_0, v_1, v_2], stepcount])
'Generate the Coform of the obtuse superbase of a lattice as a list'
def make_cf(self):
obsb = self.make_obsb()[0]
return [-np.dot(obsb[1], obsb[2]), -np.dot(obsb[0], obsb[1]), -np.dot(obsb[0], obsb[2])]
'Calculate the sign of a lattice'
def lattice_sign(self, tol=10**-6):
return sb_sign(self.make_obsb()[0])
'Calculate the root form of a lattice - returns a ROOT FORM object'
def make_rf(self, tol=10**-6):
cf = self.make_cf()
#print('we start with the coform ' + repr(cf))
sgn = self.lattice_sign()
#print('The lattice has sign ' + repr(sgn))
rf = sorted([np.sqrt(i) for i in cf])
#print('now the root form is ' + repr(rf))
if np.abs(rf[0])<tol or (np.abs(rf[0] - rf[1]) < tol or np.abs(rf[1] - rf[2])<tol):
return RF2_signed(rf, 0)
else:
return RF2_signed(rf, sgn)
'Calculate the projected form of a lattice'
def make_pf(self, tol = 10**-6):
return self.make_rf().projform()
'Calculate the position of a lattice in the quotient square'
def make_qs(self, tol = 10**-6):
return self.make_rf().projform().qs_plot()
'----------------------------------------------------'
'2D ORIENTED ROOT FORM CLASS'
'----------------------------------------------------'
'Basic Root Form Class - takes a list of positive numbers and a sign'
class RF2_signed:
def __init__(self, vec, sign):
self.vec = vec
self.r_12 = vec[0]
self.r_01 = vec[1]
self.r_02 = vec[2]
self.sign = sign
'Throws a warning if the root form is not ordered'
if sorted(self.vec) != self.vec:
print('Warning! Root form is not ordered.')
'Create correctly ordered unoriented root form'
def rightsign(self):
if self.sign == -1:
return [self.vec[0], self.vec[2], self.vec[1]]
else:
return self.vec
'Calculate Positive G-Chirality for groups D2, D4, D6 (L_inf and L_2)'
def rf_grpchir(self, pgroup = 2, dtype = 0):
if pgroup == 2:
if dtype == 0:
return min([self.r_12, (self.r_01 - self.r_12)/2, (self.r_02 - self.r_01)/2])
elif dtype == 2:
return min([self.r_12, (self.r_01 - self.r_12)/np.sqrt(2), (self.r_02 - self.r_01)/np.sqrt(2)])
else:
print('I can only calculate L_2 and L_inf distances')
return 0
elif pgroup == 4:
if dtype == 0:
return min([self.r_12, (self.r_02-self.r_01)/2])
elif dtype == 2:
return np.sqrt((self.r_12)**2 + 0.25*(self.r_02 - self.r_02)**2)
print('I can only calculate L_2 and L_inf distances')
return 0
elif pgroup == 6:
if dtype == 0:
return (self.r_02 - self.r_12)/2
elif dtype == 2:
return np.sqrt(2/3 *(self.r_12**2 + self.r_01**2 + self.r_02**2 -(self.r_01*self.r_12) - (self.r_12*self.r_02) - (self.r_01*self.r_02)))
print('I can only calculate L_2 and L_inf distances')
return 0
else:
print('Please enter a meaningful 2D point group')
return 0
'Calculate the signed D2 chirality (i.e. overall chirality)'
def rf_chir(self, dtype = 0):
return self.sign*min([self.rf_grpchir(pgroup = 2, dtype = dtype), self.rf_grpchir(pgroup = 4, dtype = dtype), self.rf_grpchir(pgroup = 6, dtype = dtype)])
'Return projected form'
def projform(self):
a = sum(self.vec)
return PF2([(self.r_02 - self.r_01)/a, (3*self.r_12)/a], self.sign)
'Return Coform'
def coform2d(self):
return[i**2 for i in self.rightsign()]
'Return Voform'
def voform2d(self):
c = self.coform2d()
return[c[1]+c[2], c[0]+c[1], c[0]+c[2]]
'Reconstitute lattice'
def make2lat(self):
l = self.coform2d()
m = self.voform2d()
cs = -l[0]/(np.sqrt(m[1])*np.sqrt(m[2]))
alph = np.arccos(cs)
v_1 = [np.sqrt(m[1]), 0]
v_2 = [np.sqrt(m[2])*np.cos(alph), np.sqrt(m[2])*np.sin(alph)]
return [v_1, v_2]
'----------------------------------------------------'
'PROJECTED FORM CLASS'
'----------------------------------------------------'
'Projected Form Basic Class - takes point (x,y) as list and a sign'
class PF2:
def __init__(self, point, sign, tol = 10**-6):
self.qtpoint = point
self.x = self.qtpoint[0]
self.y = self.qtpoint[1]
'Throws a warning if the projected form is not in QT'
if self.x + self.y > 1 :
print('Warning! Projected form is not in Quotient Triangle')
'Chirality sign reverts to 0 if on boundary'
if np.abs(1 - (self.x + self.y)) < tol or (self.x < tol or self.y < tol):
self.sign = 0
else:
self.sign = sign
'Plots co-ordinates in the quotient square'
def qs_plot(self):
if self.sign == 0:
return [self.x, self.y]
elif self.sign < 0:
return [1-self.y, 1-self.x]
else:
return [self.x, self.y]
'Calculates chirality based on infinity metric and position in quotient square'
def pf_grpchir(self, dtype = 0, pgroup = 2):
if dtype == 0:
if pgroup == 2:
return min([self.x, self.y, (1-self.x -self.y)/2])
elif pgroup == 4:
return self.x
elif pgroup == 6:
return (1-self.y)
else:
print('Please enter a meaningful point group!')
return 0
elif dtype == 2:
if pgroup == 2:
return min([self.x, self.y, (1-self.x -self.y)/2])
elif pgroup == 4:
return distgen(2, [self.x, self.y], [0,0])
elif pgroup == 6:
return distgen(2, [self.x, self.y], [0,1])
else:
print('Please enter a meaningful point group!')
return 0
else:
if pgroup == 2:
print('Can only calculate D2 chirality for either L_2 or L_inf metric')
return 0
elif pgroup == 4:
return distgen(dtype, [self.x, self.y], [0,0])
elif pgroup == 6:
return distgen(dtype, [self.x, self.y], [0,1])
else:
print('Please enter a meaningful point group!')
return 0
def pf_chir(self, dtype = 0):
return self.sign*min([self.pf_grpchir(pgroup = 2, dtype = dtype), self.pf_grpchir(pgroup = 4, dtype = dtype), self.pf_grpchir(pgroup = 6, dtype = dtype)])
'Returns root form from projected form at a given scale '
def root_from_PF2(self, scale = 1):
x = self.x
y = self.y
r_12 = scale * (y/3)
r_01 = scale *((1-(r_12 + x))/2)
r_02 = scale *((1 -r_12 + x)/2)
l = sorted([r_12, r_01, r_02])
return RF2_signed(l, self.sign)
def lattice_from_PF2(self, sc = 1):
return self.root_from_PF2(scale = sc).make2lat()
'Plots spherical co-ordinates based on projected form co-ordinates'
def sphere_proj(self):
t = 1-(1/np.sqrt(2))
if ([self.x, self.y]) == (t,t):
countfail +=1
psi = np.nan
else:
if self.x == t and self.y > t:
countnpole +=1
psi = 90
elif self.x == t and self.y < t:
countspole +=1
psi = -90
else:
psi = np.rad2deg(np.arctan((self.y-t)/(self.x-t)))
if self.x - t < 0:
mu = psi + 22.5
else:
if psi >= -22.5:
mu = psi-157.5
else:
mu = psi+202.5
if -180 < mu < -45:
phi = r['sgn']*((1- self.x - self.y)/(np.sqrt(2)-1))*90
if np.abs(phi) > 90:
phi = r['sgn']*90
elif -45 <= mu < 67.5:
phi = r['sgn']*((np.sqrt(2)*self.x)/(np.sqrt(2)-1))*90
if np.abs(phi) > 90:
phi = r['sgn']*90
else:
phi = r['sgn']*((np.sqrt(2)*self.y)/(np.sqrt(2)-1))*90
if np.abs(phi) > 90:
phi = r['sgn']*90
return([mu, phi])
'----------------------------------------------------'
'LATTICE DISTANCE CALCULATIONS'
'----------------------------------------------------'
'Calculate Chebyshev distance between two root forms. '
'Set orient = true to calculate oriented distance'
def rf2dist(rf_1, rf_2, orient = True, dtype = 0):
rfv_1 = rf_1.vec
rfv_2 = rf_2.vec
if not orient or ((rf_1.sign == rf_2.sign) or (rf_1.sign == 0 or rf_2.sign == 0)):
return max(np.abs(rfv_1[0] - rfv_2[0]),np.abs(rfv_1[1] - rfv_2[1]),np.abs(rfv_1[2] - rfv_2[2]))
else:
if dtype == 0:
d_0 = max(rfv_1[0] + rfv_2[0], np.abs(rfv_1[1]-rfv_2[1]), np.abs(rfv_1[2] - rfv_2[2]))
d_1 = max(np.abs(rfv_1[2] - rfv_2[2]), minf(rfv_1[0], rfv_1[1], rfv_2[0], rfv_2[1]))
d_2 = max(np.abs(rfv_1[0] - rfv_2[0]), minf(rfv_1[1], rfv_1[2], rfv_2[1], rfv_2[2]))
return min(d_0, d_1, d_2)
else:
c1 = (-rfv_2[0], rfv_2[1], rfv_2[2])
c2 = (rfv_2[2], rfv_2[0], rfv_2[1])
c3 = (rfv_2[0], rfv_2[2], rfv_2[1])
return min(distgen(2, rfv_1, c1), distgen(2, rfv_1, c2), distgen(2, rfv_1, c3))
'Calculate Chebyshev or L_2 distance between two projected forms. '
'Set orient = true to calculate oriented distance'
def pf2dist(pf_1, pf_2, orient = True, dtype =0):
p_1 = pf_1.qtpoint
p_2 = pf_2.qtpoint
if not orient or (pf_1.sign == pf_2.sign or (pf_1.sign == 0 or pf_2.sign == 0)):
return distgen (2, p_1, p_2)
else:
if dtype == 0:
d_x = max([np.abs(p_2[0] - p_1[0]), p_2[1]+p_1[1]])
d_y = max([p_2[0] + p_1[0], np.abs(p_2[1]-p_1[1])])
d_xy = max([np.abs(p_2[0]-p_1[0]), 1-p_2[0]-p_2[1], np.abs(1-p_1[1]-p_2[0])])
return min(d_x, d_y, d_xy)
else:
return min(distgen(2, p_1, [-p_2[0], p_2[1]]), distgen(2, p_1, [p_2[0], -p_2[1]]), distgen(2, p_1, [1-p_2[1], 1-p_2[0]]))
'---------------------------------' | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_ss-sz.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"Lisontfo",
"uMsombuluko",
"Lesibili",
"Lesitsatfu",
"Lesine",
"Lesihlanu",
"uMgcibelo"
],
"MONTH": [
"Bhimbidvwane",
"iNdlovana",
"iNdlovu-lenkhulu",
"Mabasa",
"iNkhwekhweti",
"iNhlaba",
"Kholwane",
"iNgci",
"iNyoni",
"iMphala",
"Lweti",
"iNgongoni"
],
"SHORTDAY": [
"Son",
"Mso",
"Bil",
"Tsa",
"Ne",
"Hla",
"Mgc"
],
"SHORTMONTH": [
"Bhi",
"Van",
"Vol",
"Mab",
"Nkh",
"Nhl",
"Kho",
"Ngc",
"Nyo",
"Mph",
"Lwe",
"Ngo"
],
"fullDate": "y MMMM d, EEEE",
"longDate": "y MMMM d",
"medium": "y MMM d HH:mm:ss",
"mediumDate": "y MMM d",
"mediumTime": "HH:mm:ss",
"short": "y-MM-dd HH:mm",
"shortDate": "y-MM-dd",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "SZL",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "ss-sz",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/ImSwitch-2.0.0.tar.gz/ImSwitch-2.0.0/imswitch/imcontrol/model/managers/detectors/JetsonCamManager.py | import numpy as np
from imswitch.imcommon.model import initLogger
from .DetectorManager import DetectorManager, DetectorAction, DetectorNumberParameter, DetectorListParameter
class JetsonCamManager(DetectorManager):
""" DetectorManager that deals with TheImagingSource cameras and the
parameters for frame extraction from them.
Manager properties:
- ``cameraListIndex`` -- the camera's index in the Allied Vision camera list (list
indexing starts at 0); set this string to an invalid value, e.g. the
string "mock" to load a mocker
- ``av`` -- dictionary of Allied Vision camera properties
"""
def __init__(self, detectorInfo, name, **_lowLevelManagers):
self.__logger = initLogger(self, instanceName=name)
self._camera = self._getJetsonObj()
model = self._camera.model
self._running = False
for propertyName, propertyValue in detectorInfo.managerProperties['avcam'].items():
self._camera.setPropertyValue(propertyName, propertyValue)
fullShape = (self._camera.getPropertyValue('image_width'),
self._camera.getPropertyValue('image_height'))
# Prepare parameters
parameters = {
'exposure': DetectorNumberParameter(group='Misc', value=100, valueUnits='ms',
editable=True),
'gain': DetectorNumberParameter(group='Misc', value=1, valueUnits='arb.u.',
editable=True),
'blacklevel': DetectorNumberParameter(group='Misc', value=100, valueUnits='arb.u.',
editable=True),
'image_width': DetectorNumberParameter(group='Misc', value=fullShape[0], valueUnits='arb.u.',
editable=False),
'image_height': DetectorNumberParameter(group='Misc', value=fullShape[1], valueUnits='arb.u.',
editable=False),
'pixel_format': DetectorListParameter(group='Misc', value='Mono12', options=['Mono8','Mono12'], editable=True)
}
# Prepare actions
actions = {
'More properties': DetectorAction(group='Misc',
func=self._camera.openPropertiesGUI)
}
super().__init__(detectorInfo, name, fullShape=fullShape, supportedBinnings=[1],
model=model, parameters=parameters, actions=actions, croppable=True)
def getLatestFrame(self, is_save=False):
if is_save:
return self._camera.getLastChunk()
else:
return self._camera.getLast()
def setParameter(self, name, value):
"""Sets a parameter value and returns the value.
If the parameter doesn't exist, i.e. the parameters field doesn't
contain a key with the specified parameter name, an error will be
raised."""
super().setParameter(name, value)
if name not in self._DetectorManager__parameters:
raise AttributeError(f'Non-existent parameter "{name}" specified')
value = self._camera.setPropertyValue(name, value)
return value
def getParameter(self, name):
"""Gets a parameter value and returns the value.
If the parameter doesn't exist, i.e. the parameters field doesn't
contain a key with the specified parameter name, an error will be
raised."""
if name not in self._parameters:
raise AttributeError(f'Non-existent parameter "{name}" specified')
value = self._camera.getPropertyValue(name)
return value
def setBinning(self, binning):
super().setBinning(binning)
def getChunk(self):
return np.expand_dims(self._camera.getLastChunk(),0)
def flushBuffers(self):
pass
def startAcquisition(self):
if not self._running:
self._camera.start_live()
self._running = True
self.__logger.debug('startlive')
def stopAcquisition(self):
if self._running:
self._running = False
self._camera.suspend_live()
self.__logger.debug('suspendlive')
def stopAcquisitionForROIChange(self):
self._running = False
self._camera.stop_live()
self.__logger.debug('stoplive for roi change')
def finalize(self) -> None:
super().finalize()
self.__logger.debug('Safely disconnecting the camera...')
self._camera.close()
@property
def pixelSizeUm(self):
return [1, 1, 1]
def crop(self, hpos, vpos, hsize, vsize):
def cropAction():
# self.__logger.debug(
# f'{self._camera.model}: crop frame to {hsize}x{vsize} at {hpos},{vpos}.'
# )
self._camera.setROI(hpos, vpos, hsize, vsize)
self._performSafeCameraAction(cropAction)
# TODO: unsure if frameStart is needed? Try without.
# This should be the only place where self.frameStart is changed
# self._frameStart = (hpos, vpos)
# Only place self.shapes is changed
#vsize = self._camera.getPropertyValue('image_width')
#hsize = self._camera.getPropertyValue('image_height')
self._shape = self._camera.shape
def _performSafeCameraAction(self, function):
""" This method is used to change those camera properties that need
the camera to be idle to be able to be adjusted.
"""
wasrunning = self._running
self.stopAcquisitionForROIChange()
function()
if wasrunning:
self.startAcquisition()
def openPropertiesDialog(self):
self._camera.openPropertiesGUI()
def _getJetsonObj(self):
try:
from imswitch.imcontrol.model.interfaces.jetsoncam import CameraJETSON
self.__logger.debug(f'Trying to initialize Jetson IMX219 camera')
camera = CameraJETSON()
except Exception as e:
self.__logger.error(e)
self.__logger.warning(f'Failed to initialize Jetson IMX219 camera, loading TIS mocker')
from imswitch.imcontrol.model.interfaces.tiscamera_mock import MockCameraTIS
camera = MockCameraTIS()
self.__logger.info(f'Initialized camera, model: {camera.model}')
return camera
def closeEvent(self):
self._camera.close()
# Copyright (C) ImSwitch developers 2021
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/fisinma-0.1.0.tar.gz/fisinma-0.1.0/docs/source/getting_started/examples/damped_oscillator.rst | Damped Oscillator
=================
A damped oscillator can be described by the second order ordinary differential equation
.. math::
:name: eq:damped-osci-ode
\ddot{x} + \mu(h)\dot{x} + \lambda x = 0
Here we additionally assume that the parameter :math:`\mu` depends on another input
variable :math:`h` via :math:`\mu(h)=ah + b`.
To obtain a first-order equation, we substitute :math:`\dot{x}=A` and :math:`x=B` and obtain
.. math::
:name: eq:damped-osci-ode-first-order
\begin{align}
\dot{A} &= -\mu(h) A - \lambda B\\
\dot{B} &= A
\end{align}
This is now a first order ODE with the parameters :math:`a,b,\lambda` and the input variable :math:`h`.
Now we can begin with the numerical description of the system.
It is good practice to first import every needed libraries at the top of our file.
.. literalinclude:: ../../../../examples/damped_oscillator.py
:language: python
:linenos:
:lineno-start: 7
:lines: 7-8
To define the system numerically, we write the preceding equations as a function.
Only the right-hand side of the :ref:`ODE equation <eq:damped-osci-ode-first-order>`.
.. literalinclude:: ../../../../examples/damped_oscillator.py
:language: python
:linenos:
:lineno-start: 10
:lines: 10-18
We seek to maximize the amount of information in the system to most accurately estimate the parameters
:math:`(a, b, \lambda)`.
To achieve this, we must also define the derivatives of the :ref:`ODE equation <eq:damped-osci-ode-first-order>`.
with respect to the components of the ODE :math:`(A, B)` and the parameters.
.. literalinclude:: ../../../../examples/damped_oscillator.py
:language: python
:linenos:
:lineno-start: 20
:lines: 20-36
Now we have defined the overall structure of the ODE but are still lacking actual numerical values
to be able to solve the system.
We gather them in the main function of our script and start with the initial guesses of the parameters.
.. literalinclude:: ../../../../examples/damped_oscillator.py
:language: python
:linenos:
:lineno-start: 41
:lines: 41-47
Next, we define the initial values of the :ref:`ODE system<eq:damped-osci-ode-first-order>`.
Notice, that since we have a two-component system, we need to define values for :math:`(A, B)`,
meaning in our case the variable :math:`x` as well as its time derivative :math:`\dot{x}`.
In the next steps, we define helper variables to later pick explicit values for the input variable :math:`h`
and a range to optimize time points :math:`t_i` when to evaluate the solution of the ODE.
.. literalinclude:: ../../../../examples/damped_oscillator.py
:language: python
:linenos:
:lineno-start: 53
:lines: 53-61
The next statement fixes the explicit values of :math:`h`.
.. literalinclude:: ../../../../examples/damped_oscillator.py
:language: python
:linenos:
:lineno-start: 63
:lines: 63-66
A short inspection reveals that the following lines of code yield us with a numpy array of explicit values.
.. code:: python
# Numerical values for the input variable h
>>> h_low = 0.08
>>> h_high = 0.12
>>> n_h = 1
>>> np.linspace(h_low, h_high, n_h)
array([0.08])
So far we have not yet used the methods developed in this package.
But now we are ready to define the fisher model which can then be solved to obtain optimal conditions
for our experimental design.
.. literalinclude:: ../../../../examples/damped_oscillator.py
:language: python
:linenos:
:lineno-start: 68
:lines: 68-78
The next step solves this model and actually does the optimization.
The result is called a fisher result and contains information on final values and the
optimization procedure.
.. literalinclude:: ../../../../examples/damped_oscillator.py
:language: python
:linenos:
:lineno-start: 80
:lines: 80-81
When executing the script in a terminal, the output might look like the following.
.. literalinclude:: ../../../source/_static/damped_osci_plots/output_example.txt
In our final step we can visualize the results by autmatically generating images or saving
results as a json file.
.. literalinclude:: ../../../../examples/damped_oscillator.py
:language: python
:linenos:
:lineno-start: 83
:lines: 83-85
This image shows one of the results of this optimization run.
You can see the :math:`B` component of the :ref:`ODE <eq:damped-osci-ode-first-order>`.
.. image:: ../../../source/_static/damped_osci_plots/Observable_Results_damped_osci_fisher_determinant__000_x_01.svg
| PypiClean |
/GraFT_Python-0.0.2-py3-none-any.whl/GraFT_Python/main_functions_graft.py | #%% Imports
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
#from quadprog import solve_qp
from qpsolvers import solve_qp #https://scaron.info/doc/qpsolvers/quadratic-programming.html#qpsolvers.solve_qp https://pypi.org/project/qpsolvers/
#print(qpsolvers.available_solvers)
import matplotlib
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import itertools
import pandas as pd
import seaborn as sns
import random
import os
from datetime import date
sep = os.sep
import os.path
import warnings
from scipy.optimize import nnls
import numbers
from sklearn import linear_model
import pylops
#from PIL import Image
from skimage import io
#%% Default Parameters
global params_default
params_default = {'max_learn': 1e3, # Maximum number of steps in learning
'mean_square_error': 1,
'epsilon' : 1, # Default tau values to be spatially varying
'l1': 7, #0.00007,#0.7,#0.7, # Default lambda parameter is 0.6
'l2': 0,#0.00000000000005,# 0.2**2, # lamForb # Default Forbenius norm parameter is 0 (don't use)
'l3': 0, #lamCont # Default Dictionary continuation term parameter is 0 (don't use)
'lamContStp': 1, # Default multiplicative change to continuation parameter is 1 (no change)
'l4': 0,#0.1, #lamCorr # Default Dictionary correlation regularization parameter is 0 (don't use)
'beta': 0.09, # Default beta parameter to 0.09
'maxiter': 0.01, # Default the maximum iteration to whenever Delta(Dictionary)<0.01
'numreps': 2, # Default number of repetitions for RWL1 is 2
'tolerance': 1e-8, # Default tolerance for TFOCS calls is 1e-8
'likely_form' : 'gaussian', # Default to a gaussian likelihood ('gaussian' or'poisson')
'step_s': 5, #1, # Default step to reduce the step size over time (only needed for grad_type ='norm')
'step_decay': 0.999, # Default step size decay (only needed for grad_type ='norm')
'dict_max_error': 0.01, # learn_eps # Default learning tolerance: stop when Delta(Dictionary)<0.01
'p': 4, # Default number of dictionary elements is a function of the data
'verb': 1, # Default to no verbose output
'GD_iters': 1, # Default to one GD step per iteration
'bshow': 0, # Default to no plotting
# Default to not having negativity constraints
'nonneg':True, # Default to not having negativity constraints on the coefficients
'plot': False, # Default to not plot spatial components during the learning
'updateEmbed' : False, # Default to not updateing the graph embedding based on changes to the coefficients
'mask': [], # for masked images (widefield data)
'normalizeSpatial' : False, # default behavior - time-traces are unit norm. when true, spatial maps normalized to max one and time-traces are not normalized
'patchSize': 50,
'motion_correct': False, #future
'kernelType': 'embedding',
'reduceDim': False, #True,
'w_time': 0,
'n_neighbors':49,
'n_comps':2,
'solver_qp':'quadprog',
'solver': 'inv',# 'lasso',#'spgl1',
'nullify_some': False ,
'norm_by_lambdas_vec': True,
'GD_type': 'norm',#'full_ls_cor'
'xmin' : 151,#151
'xmax' : 200,#350 # Can sub-select a portion of the full FOV to test on a small section before running on the full dataset
'ymin' : 151,#101
'ymax' : 200,#300
'use_former_kernel' : False,
'usePatch' : False, #str2bool(input('to use patch? (true or false)') )
'portion' :True,# str2bool(input('To take portion?'))
'divide_med' : False,# str2bool(input('divide by median? (true or false)'))
'data_0_1' : False,
'to_save' : True, #str2bool(input('to save?'))
'default_path': r'E:\CODES FROM GITHUB\GraFT-analysis\code\neurofinder.02.00\images',
'save_error_iterations': True,
'max_images':800,
'dist_init': 'uniform',
'to_sqrt':True
}
#%% GraFT Functions
global params_config
params_config = {'self_tune':7, 'dist_type': 'euclidian', 'alg':'ball_tree',
'n_neighbors':49, 'reduce_dim':False}
def createDefaultParams(params = {}):
dictionaryVals = {'step_s':1,
'learn_eps':0.01,
'epsilon': 2,
'numreps': 2,
}
return addKeyToDict(dictionaryVals,params)
def createLmabdasMat(epsilonVal, shapeMat):
if isinstance(epsilonVal, (list, tuple, np.ndarray)) and len(epsilonVal) == 1:
epsilonVal = epsilonVal[0]
if not isinstance(epsilonVal, (list, tuple, np.ndarray)):
labmdas = epsilonVal * np.ones(shapeMat)
else:
epsilonVal = np.array(epsilonVal)
if len(epsilonVal) == shapeMat[1]:
lambdas = np.ones(shapeMat[0]).rehspae((-1,1)) @ epsilonVal.reshape((1,-1))
elif len(epsilonVal) == shapeMat[0]:
lambdas = epsilonVal.reshape((-1,1)) @ np.ones(shapeMat[1]).rehspae((1,-1))
else:
raise ValueError('epsilonVal must be either a number or a list/tupe/np.array with the a number of elements equal to one of the shapeMat dimensions')
def addKeyToDict(dictionaryVals,dictionaryPut):
return {**dictionaryVals, **dictionaryPut}
def validate_inputs(params) :
params['epsilon'] = float(params['epsilon'])
params['step_s'] = float(params['step_s'])
params['p'] = int(params['p'])
params['nonneg'] = str2bool(params['nonneg'])
params['reduceDim'] = str2bool(params['reduceDim'])
params['solver'] = str(params['solver'])
params['norm_by_lambdas_vec'] = str2bool(params['norm_by_lambdas_vec'])
return params
def run_GraFT(data = [], corr_kern = [], params = {}, to_save = True, to_return = True,
ask_selected = True, selected = ['epsilon','step_s', 'p', 'nonneg',
'reduceDim','solver','norm_by_lambdas_vec'] ):
"""
This function runs the main graft algorithm.
Parameters
----------
data : can be string of data path or a numpy array (pixels X pixels X time) or (pixels X time).
Leave empty for default
The default is []. In this case the calcium imaging dataset will be used.
corr_kern : proximity kernel. Leave empty ([]) to re-create the kernel.
params : dictionary of parameters, optional
the full default values of the optional parameters are mentioned in dict_default.
to_save : boolean, optional
whether to save the results to .npy file. The default is True.
to_return : boolean, optional
whether to return results. The default is True.
ask_selected : boolean, optional
whether to ask the use about specific parameters. The default is True.
selected : list of strings, optional
relevant only if 'ask_selected' is true.
. The default is ['epsilon','step_s', 'p', 'nonneg',
'reduceDim','solver','norm_by_lambdas_vec'].
Raises
------
ValueError
If invalid path
Returns
-------
A : np.ndarray (pixels X p) - neural maps
phi : np.ndarray (time X p) temporal traces
additional_return : dictionary with additional returns. Including error over iterations
"""
params = {**params_default, **params}
if ask_selected:
for select in selected:
params[select] = input('Value of %s (def = %s)'%(select, str(params[select])))
params = validate_inputs(params)
if to_save:
save_name = input('save_name')
if ask_selected:
addition_name = '_'.join([s +'_' + str(params[s] ) for s in selected])
save_name_short = save_name
save_name = save_name + '_' + addition_name
"""
Create data
"""
default = False
if checkEmptyList(data):
warnings.warn('Data is empty. Take default dataset (calcium imaging)')
default = True
data = 'data_calcium_xmin_%d_xmax_%d_ymin_%d_ymax_%d.npy'%(params['xmin'],params['xmax'],params['ymin'],params['ymax'])
#data = params['default_path']
if isinstance(data, str): # Check if path
been_cut = False
try:
try:
if data.endswith('.npy'):
data = np.load('data_calcium_xmin_%d_xmax_%d_ymin_%d_ymax_%d.npy'%(params['xmin'],params['xmax'],params['ymin'],params['ymax']))
been_cut = True
else:
data = from_folder_to_array(data, max_images = params['max_images'])
except:
if default:
data = from_folder_to_array(params['default_path'], max_images = params['max_images'])
else:
raise ValueError('Data loading failed')
except:
raise ValueError('Cannot locate data path! (your invalid path is %s)'%data)
if isinstance(data,np.ndarray):
if not been_cut:
data = data[params['xmin']:params['xmax'], params['ymin']:params['ymax'],:]
np.save('data_calcium_xmin_%d_xmax_%d_ymin_%d_ymax_%d.npy'%(params['xmin'],params['xmax'],params['ymin'],params['ymax']), data)
"""
Create Kernel
"""
print('creating kernel...')
if checkEmptyList(corr_kern):
corr_kern = mkDataGraph(data, params, reduceDim = params['reduceDim'],
reduceDimParams = {'alg':'PCA'}, graph_function = 'gaussian',
K_sym = True, use_former = False, data_name = 'try_graft', toNormRows = True)
np.save('kernel_calcium_xmin_%d_xmax_%d_ymin_%d_ymax_%d.npy'%(params['xmin'],params['xmax'],params['ymin'],params['ymax']), corr_kern)
elif isinstance(corr_kern, str): # Check if path
try:
corr_kern = np.load('kernel_calcium_xmin_%d_xmax_%d_ymin_%d_ymax_%d.npy'%(params['xmin'],params['xmax'],params['ymin'],params['ymax']))
except:
raise ValueError('Cannot locate kernel path! (your invalid path is %s)'%corr_kern)
"""
run graft
"""
if params['usePatch']:
raise ValueError('Use Patch is not available yet!')
else:
[phi, A, additional_return] = GraFT(data, [], corr_kern, params) # Learn the dictionary (no patching - will be much more memory intensive and slower)
if to_save:
#save_name = input('save_name')
path_name = 'date_'+ str(date.today()) + '_xmin_%d_xmax_%d_ymin_%d_ymax_%d.npy'%(params['xmin'],params['xmax'],params['ymin'],params['ymax'])
if not os.path.exists(path_name):
os.makedirs(path_name)
try:
np.save(path_name + sep + save_name + '.npy', {'phi':phi, 'A':A, 'data':data, 'params':params, 'divide_med':params['divide_med'],
'usePatch':params['usePatch'], 'shape':data.shape, 'additional': additional_return})
except:
np.save(path_name + sep + save_name_short + '.npy', {'phi':phi, 'A':A, 'data':data, 'params':params, 'divide_med':params['divide_med'],
'usePatch':params['usePatch'], 'shape':data.shape, 'additional': additional_return})
if to_return:
return A, phi, additional_return
def GraFT(data, phi, kernel, params):
"""
Function to learn a dictioanry for spatially ordered/ graph-based data using a
re-weighted l1 spatial / graph filtering model.
Parameters
----------
data : TYPE
DESCRIPTION.
phi : TYPE
DESCRIPTION.
kernel : TYPE
DESCRIPTION.
params : TYPE
DESCRIPTION.
Returns
-------
None.
"""
#print(params.get('nonneg'))
additional_return = {'MSE':[]}
if len(data.shape) == 3: data = MovToD2(data)
params = {**{'max_learn': 1e3, 'learn_eps': 0.01,'step_decay':0.995}, **params}
#params = createDefaultParams(params)
n_rows = data.shape[0] # number of neurons
n_cols =params['p']# data.shape[1]
n_times = data.shape[1]
extras = {'dictEvo':[], 'presMap':[], 'wghMap':[]} # Store some outputs
#%% Initialize dictionary
#print('prarams in graft')
#print(params)
phi = dictInitialize(phi, (n_times, n_cols), params = params)
if params['to_sqrt']:
multi = np.sqrt(np.mean(data))
else:
multi = 1
phi = phi * multi
step_GD = params['step_s']
lambdas = [] # weights
A = []
n_iter = 0
error_dict = np.inf
cur_error = np.inf #
print("params['max_learn']")
print(params['max_learn'])
while n_iter < params['max_learn'] and (error_dict > params['dict_max_error'] or cur_error > params['mean_square_error']):
print('Iteration %d'%n_iter)
n_iter += 1
#%% compute the presence coefficients from the dictionary:
A, lambdas = dictionaryRWL1SF(data, phi, kernel, params = params,A=A) # Infer coefficients given the data and dictionary
print('A after update')
print(A.sum())
#%% Second step is to update the dictionary:
dict_old = phi # Save the old dictionary for metric calculations
#print('phi before update')
#print(phi)
phi = dictionary_update(phi, A, data, step_GD, GD_type = params['GD_type'], params = params) # Take a gradient step with respect to the dictionary
print('phi after update')
print(phi.sum())
#raise ValueError('fdgfdgfdgfdgdfgsdfg')
step_GD = step_GD*params['step_decay'] # Update the step size
error_dict = norm((phi - dict_old).flatten())/norm(dict_old.flatten());
# Calculate the difference in dictionary coefficients
params['l3'] = params['lamContStp']*params['l3']; # Continuation parameter decay
cur_error = np.mean((A @ phi.T - data)**2)
additional_return['MSE'].append(cur_error)
print('Current Error is: {:.2f}'.format(cur_error))
## post-processing
# Re-compute the presence coefficients from the dictionary:
if params['normalizeSpatial']:
A, lambdas = dictionaryRWL1SF(data, phi, kernel, params,A) # Infer coefficients given the data and dictionary
Dnorms = np.sqrt(np.sum(phi**2,0)) # Get norms of each dictionary element
Smax = np.max(A,0) # Get maximum value of each spatial map
actMeas = Dnorms*Smax # Total activity metric is the is the product of the above
IX = np.argsort(actMeas)[::-1] # Get the indices of the activity metrics in descending order
phi = phi[:,IX] # Reorder the dictionary
A = A[:,IX] # Reorder the spatial maps
return phi, A, additional_return
def norm(mat):
"""
Parameters
----------
mat : np.ndarray
l2 norm of mat.
Returns
-------
TYPE
DESCRIPTION.
"""
if len(mat.flatten()) == np.max(mat.shape):
return np.sqrt(np.sum(mat**2))
else:
_, s, _ = np.linalg.svd(mat, full_matrices=True)
return np.max(s)
def mkCorrKern(params = {}):
"""
Parameters
----------
params : TYPE, optional
DESCRIPTION. The default is {}.
Returns
-------
corr_kern : TYPE
DESCRIPTION.
"""
# Make a kernel
params = {**{'w_space':3,'w_scale':4,'w_scale2':0.5, 'w_power':2,'w_time':0}, **params}
dim1 = np.linspace(-params['w_scale'], params['w_scale'], 1+2*params['w_space']) # space dimension
dim2 = np.linspace(-params['w_scale2'], params['w_scale2'], 1+2*params['time']) # time dimension
corr_kern = gaussian_vals(dim1, std = params['w_space'], mean = 0 , norm = True,
dimensions = 2, mat2 = dim2, power = 2)
return corr_kern
def checkCorrKern(data, corr_kern, param_kernel = 'embedding', recreate = False, know_empty = False):
if len(corr_kern) == 0: #raise ValueError('Kernel cannot ')
if not know_empty: warnings.warn('Empty Kernel - creating one')
if param_kernel == 'embedding' and recreate:
corr_kern = mkDataGraph(data, corr_kern)
elif param_kernel == 'convolution' and recreate:
corr_kern = mkCorrKern(corr_kern)
else:
raise ValueError('Invalid param_kernel. Should be "embedding" or "convolution"')
return corr_kern
def checkEmptyList(obj):
return isinstance(obj, list) and len(obj) == 0
def dictionaryRWL1SF(data, phi, corr_kern, A = [], params = {}):
#print('dictionaryRWL1SF')
# compute the presence coefficients from the dictionary
params = {**{'epsilon': 1 , 'likely_form':'gaussian', 'numreps':2, 'normalizeSpatial':False,
'thresANullify': 2**(-5)},**params}
if len(data.shape) == 3: data = MovToD2(data)
n_times = data.shape[1]
n_neurons = data.shape[0]
p = phi.shape[1]
corr_kern = checkCorrKern(data, corr_kern);
if params['to_sqrt']:
multi = np.sqrt(np.mean(data))
else:
multi = 1
if checkEmptyList(A):
if params['dist_init'] == 'zeros':
A = np.zeros((n_neurons, p))# np.zeros((n_neurons, p))
else:
A = np.random.rand(n_neurons, p) * multi
if (isinstance( params['epsilon'] , list) and len(params['epsilon']) == 1):
params['epsilon'] = params['epsilon'][0]
if isinstance(params['epsilon'], numbers.Number):
lambdas = np.ones((n_neurons, p))*params['epsilon']
elif (isinstance( params['epsilon'] , list) and len(params['epsilon']) == p):
lambdas = np.repeat(params['epsilon'].reshape((1,-1)), n_neurons, axis = 0)#np.ones(n_neurons, p)*params['epsilon']
elif (isinstance( params['epsilon'] , list) and len(params['epsilon']) == n_neurons):
lambdas = np.repeat(params['epsilon'].reshape((-1,1)), p, axis = 1)#np.ones(n_neurons, p)*params['epsilon']
else:
raise ValueError('Invalid length of params[epsilon]. Should be a number or a list with n_neurons or p elementts. Currently the params[epsilon] is ' + str(params['epsilon']))
for repeat in range(params['numreps']):
lambdas = updateLambdasMat(A, corr_kern, params['beta'], params) # Update the matrix of weights
for n_neuron in range(n_neurons):
if params['likely_form'].lower() == 'gaussian':
print('A before gauss')
print(A)
print('---------')
A[n_neuron, :] = singleGaussNeuroInfer(lambdas[n_neuron, :], data[n_neuron, :],
phi,
l1 = params['l1'],
nonneg = params['nonneg'], A=A[n_neuron, :], params = params)
#raise ValueError('fgfdgfdg')
elif params['likely_form'].lower() == 'poisson':
A[n_neuron, :] = singlePoiNeuroInfer(lambdas[n_neuron, :], data[n_neuron, :],
phi,
params['lambda'],
params['tolerance'],
params['nonneg'], A[n_neuron,:])
else:
raise ValueError('Invalid likely from value')
if params['normalizeSpatial']:
max_A_over_neurons = A.sum(0)
max_A_over_neurons[max_A_over_neurons == 0] = 1
A = A/max_A_over_neurons.reshape((1,-1))
A[A < params['thresANullify']] = 0
return A, lambdas
def singlePoiNeuroInfer(): #future
raise ValueError('Need to be implemented')
def normalizeDictionary(D, cutoff = 1):
D_norms = np.sqrt(np.sum(D**2,0)) # Get the norms of the dictionary elements
D = D @ np.diag(1/(D_norms*(D_norms>cutoff)/cutoff+(D_norms<=cutoff))); # Re-normalize the basis
return D
def dictionary_update(dict_old, A, data, step_s, GD_type = 'norm', params ={}):
dict_new = takeGDStep(dict_old, A, data, step_s, GD_type, params)
if not params.get('normalizeSpatial'):
dict_new = normalizeDictionary(dict_new,1) # Normalize the dictionary
#print('phi after norm')
#print(dict_old.sum())
dict_new[np.isnan(dict_new)] = 0
return dict_new
def takeGDStep(dict_old, A, data, step_s, GD_type = 'norm', params ={}):
"""
Parameters
----------
dict_old : TYPE
DESCRIPTION.
A : TYPE
DESCRIPTION.
data : TYPE
DESCRIPTION.
step_s : TYPE
DESCRIPTION.
GD_type : TYPE, optional
DESCRIPTION. The default is 'norm'.
params : TYPE, optional
DESCRIPTION. The default is {}.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
dict_new : TYPE
DESCRIPTION.
"""
l2 = params['l2']
l3 = params['l3']
l4 = params['l4']
if GD_type == 'norm':
#print('phi before norm')
#print(dict_old.sum())
# Take a step in the negative gradient of the basis:
# Minimizing the energy: E = ||x-Da||_2^2 + lambda*||a||_1^2
dict_new = update_GDiters(dict_old, A, data, step_s, params)
#print('phi after norm')
#print(dict_new.sum())
elif GD_type == 'forb':
# Take a step in the negative gradient of the basis:
# This time the Forbenious norm is used to reduce unused
# basis elements. The energy function being minimized is
# then: E = ||x-Da||_2^2 + lambda*||a||_1^2 + lamForb||D||_F^2
dict_new = update_GDwithForb(dict_old, A, data, step_s, l2, params);
elif GD_type == 'full_ls':
# Minimizing the energy:
# E = ||X-DA||_2^2 via D = X*pinv(A)
dict_new = update_FullLS(dict_old, A, data, params);
elif GD_type == 'anchor_ls':
# Minimizing the energy:
# E = ||X-DA||_2^2 + lamCont*||D_old - D||_F^2 via D = [X;D_old]*pinv([A;I])
dict_new = update_LSwithCont(dict_old, A, data, l3, params);
elif GD_type == 'anchor_ls_forb':
# Minimizing the energy:
# E = ||X-DA||_2^2 + lamCont*||D_old - D||_F^2 + lamForb*||D||_F^2
# via D = [X;D_old]*pinv([A;I])
dict_new = update_LSwithContForb(dict_old, A, data, l2, l3, params);
elif GD_type == 'full_ls_forb':
# Minimizing the energy:
# E = ||X-DA||_2^2 + lamForb*||D||_F^2
# via D = X*A^T*pinv(AA^T+lamForb*I)
dict_new = update_LSwithForb(dict_old, A, data, l2, params);
elif GD_type== 'full_ls_cor':
# E = ||X-DA||_2^2 + l4*||D.'D-diag(D.'D)||_sav + l2*||D||_F^2
# + l3*||D-Dold||_F^2
dict_new = update_FullLsCor(dict_old, A, data, l2, l3, l4, params)
elif GD_type =='sparse_deconv':
dict_new = sparseDeconvDictEst(dict_old,data,A,params.h,params); # This is a more involved function and needs its own function
else:
raise ValueError('GD_Type %s is not defined in the takeGDstep function'%GD_type)
return dict_new
def dictInitialize(phi = [], shape_dict = [], norm_type = 'unit', to_norm = True, params = {}):
"""
Parameters
----------
phi : list of lists or numpy array or empty list
The initializaed dictionary
shape_dict : tuple or numpy array or list, 2 int elements, optional
shape of the dictionary. The default is [].
norm_type : TYPE, optional
DESCRIPTION. The default is 'unit'.
to_norm : TYPE, optional
DESCRIPTION. The default is True.
dist : string, optional
distribution from which the dictionary is drawn. The default is 'uniforrm'.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
phi : TYPE
The output dictionary
"""
if len(phi) == 0 and len(shape_dict) == 0:
raise ValueError('At least one of "phi" or "shape_dict" must not be empty!')
if len(phi) > 0:
return norm_mat(phi, type_norm = norm_type, to_norm = to_norm)
else:
#if dist == 'uniform':
phi = createMat(shape_dict, params)
return dictInitialize(phi, shape_dict, norm_type, to_norm, params)
def createMat(shape_dict, params = params_default ):
"""
Parameters
----------
shape : TYPE
DESCRIPTION.
dist : TYPE, optional
DESCRIPTION. The default is 'uniforrm'.
params : TYPE, optional
DESCRIPTION. The default is {'mu':0, 'std': 1}.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
params = {**{'mu':0, 'std': 1}, **params}
dist = params['dist_init']
if dist == 'uniform':
return np.random.rand(shape_dict[0], shape_dict[1])
elif dist == 'norm':
return params['mu'] + np.random.randn(shape_dict[0], shape_dict[1])*params['std']
elif dist == 'zeros':
return np.zeros((shape_dict[0], shape_dict[1]))
else:
raise ValueError('Unknown dist for createMat')
#<h1 id="header">Header</h1>
def singleGaussNeuroInfer(lambdas_vec, data, phi, l1, nonneg, A = [], ratio_null = 0.1, params = {}):
# Use quadprog to solve the weighted LASSO problem for a single vector
if phi.shape[1] != len(lambdas_vec):
raise ValueError('Dimension mismatch!')
# Set up problem
data = data.flatten() # Make sure time-trace is a column vector
lambdas_vec = lambdas_vec.flatten() # Make sure weight vector is a column vector
p = len(lambdas_vec) # Get the numner of dictionary atoms
# Set up linear operator
# Af = @(x) D*(x./tau_vec); # Set up the forward operator
# Ab = @(x) (D.'*x)./tau_vec; # Set up the backwards (transpose) operator
# A = linop_handles([numel(mov_vec), N2], Af, Ab, 'R2R'); # Create a TFOCS linear operator object
## Run the weighted LASSO to get the coefficients
if len(data) == 0 or np.sum(data**2) == 0:
A = np.zeros(p) # This is the trivial solution to generate all zeros linearly.
raise ValueError('zeros again')
else:
if nonneg:
if A == [] or (A==0).all():
print('A before')
print(A)
print(lambdas_vec)
A = solve_qp(2*(phi.T @ phi) , -2*phi.T @ data + l1*lambdas_vec, solver = params['solver_qp'] ) # Use quadratic programming to solve the non-negative LASSO
print('A after')
print(A)
print(lambdas_vec)
if np.nan in A: raise ValueError('fgfdgdfg')
#lb = np.zeros((p,1)),
ub = np.inf*np.ones((p,1)),
else:
print('A before')
print(A)
print(lambdas_vec)
A = solve_qp(2*(phi.T @ phi),-2*phi.T @ data+l1*lambdas_vec,
solver = params['solver_qp'] , initvals = A) # Use quadratic programming to solve the non-negative LASSO
if np.isnan(A).any():
print(A)
raise ValueError('fgfdgdfg')
#lb = np.zeros((p,1)),
#ub = np.inf*np.ones((p,1)),
else:
#params['nonneg'] = False
#efficiencyMarker
A = solve_Lasso_style(phi, data, l1, [], params = params, random_state = 0).flatten()
#solver_L1RLS(phi, data, l1, zeros(N2, 1), params ) # Solve the weighted LASSO using TFOCS and a modified linear operator
if params['norm_by_lambdas_vec']:
A = A.flatten()/lambdas_vec.flatten(); # Re-normalize to get weighted LASSO values
# consider changing to oscar like here https://github.com/vene/pyowl/blob/master/pyowl.py
if params['nullify_some']:
A[A<ratio_null*np.max(A)] = 0;
return A
def solve_Lasso_style(A, b, l1, x0, params = {}, lasso_params = {},random_state = 0):
"""
Solves the l1-regularized least squares problem
minimize (1/2)*norm( A * x - b )^2 + l1 * norm( x, 1 )
Parameters
----------
A : TYPE
DESCRIPTION.
b : TYPE
DESCRIPTION.
l1 : float
scalar between 0 to 1, describe the reg. term on the cofficients.
x0 : TYPE
DESCRIPTION.
params : TYPE, optional
DESCRIPTION. The default is {}.
lasso_params : TYPE, optional
DESCRIPTION. The default is {}.
random_state : int, optional
random state for reproducability. The default is 0.
Raises
------
NameError
DESCRIPTION.
Returns
-------
x : np.ndarray
the solution for min (1/2)*norm( A * x - b )^2 + l1 * norm( x, 1 ) .
lasso_options:
- 'inv' (least squares)
- 'lasso' (sklearn lasso)
- 'fista' (https://pylops.readthedocs.io/en/latest/api/generated/pylops.optimization.sparsity.FISTA.html)
- 'omp' (https://pylops.readthedocs.io/en/latest/gallery/plot_ista.html#sphx-glr-gallery-plot-ista-py)
- 'ista' (https://pylops.readthedocs.io/en/latest/api/generated/pylops.optimization.sparsity.ISTA.html)
- 'IRLS' (https://pylops.readthedocs.io/en/latest/api/generated/pylops.optimization.sparsity.IRLS.html)
- 'spgl1' (https://pylops.readthedocs.io/en/latest/api/generated/pylops.optimization.sparsity.SPGL1.html)
- . Refers to the way the coefficients should be claculated (inv -> no l1 regularization)
"""
if len(b.flatten()) == np.max(b.shape):
b = b.reshape((-1,1))
if 'solver' not in params.keys():
warnings.warn('Pay Attention: Using Default (inv) solver for updating A. If you want to use lasso please change the solver key in params to lasso or another option from "solve_Lasso_style"')
params = {**{'threshkind':'soft','solver':'inv','num_iters':50}, **params}
#print(params['solver'])
if params['solver'] == 'inv' or l1 == 0:
#print(A.shape)
#print(b.shape)
x =linalg.pinv(A) @ b.reshape((-1,1))
#print(x.shape)
elif params['solver'] == 'lasso' :
#herehere try without warm start
clf = linear_model.Lasso(alpha=l1,random_state=random_state, **lasso_params)
clf.fit(A,b.T )
x = np.array(clf.coef_)
elif params['solver'].lower() == 'fista' :
Aop = pylops.MatrixMult(A)
#if 'threshkind' not in params: params['threshkind'] ='soft'
#other_params = {'':other_params[''],
x = pylops.optimization.sparsity.FISTA(Aop, b.flatten(), niter=params['num_iters'],
eps = l1 , threshkind = params.get('threshkind') )[0]
elif params['solver'].lower() == 'ista' :
#herehere try without warm start
if 'threshkind' not in params: params['threshkind'] ='soft'
Aop = pylops.MatrixMult(A)
x = pylops.optimization.sparsity.ISTA(Aop, b.flatten(), niter=params['num_iters'] ,
eps = l1,threshkind = params.get('threshkind'))[0]
elif params['solver'].lower() == 'omp' :
Aop = pylops.MatrixMult(A)
x = pylops.optimization.sparsity.OMP(Aop, b.flatten(),
niter_outer=params['num_iters'], sigma=l1)[0]
elif params['solver'].lower() == 'spgl1' :
Aop = pylops.MatrixMult(A)
x = pylops.optimization.sparsity.SPGL1(Aop, b.flatten(),iter_lim = params['num_iters'], tau = l1)[0]
elif params['solver'].lower() == 'irls' :
Aop = pylops.MatrixMult(A)
#herehere try without warm start
x = pylops.optimization.sparsity.IRLS(Aop, b.flatten(), nouter=50, espI = l1)[0]
else:
raise NameError('Unknown update c type')
return x
def updateLambdasMat(A, corr_kern, beta, params ):
p = A.shape[1]
n_neurons = A.shape[0]
params = {**{'epsilon':1, 'updateEmbed': False, 'mask':[]}, **params}
if params.get('updateEmbed') : # If required, recalculate the graph based on the current estimate of coefficients
H = mkDataGraph(A, []);
# This line actually runs that re-calculation of the graph
if (isinstance( params['epsilon'] , list) and len(params['epsilon']) == 1):
params['epsilon'] = params['epsilon'][0]
if isinstance(params['epsilon'], numbers.Number): # If the numerator of the weight updates is constant...
if params['updateEmbed'] : # - If the numerator of the weight updates is the size of the dictionary (i.e., one tau per dictioary element)...
lambdas = params['epsilon']/(beta + A + H @ A) # - Calculate the wright updates tau/(beta + |s_i| + [P*S]_i)
elif not params['updateEmbed']:
# - If the graph was not updated, use the original graph (in corr_kern)
if corr_kern.shape[0] == n_neurons : # - If the weight projection matrix has the same number of rows as pixels, update based on a matrix multiplication
lambdas = params['epsilon']/(beta + A + corr_kern @ A); # - Calculate the wright updates tau/(beta + |s_i| + [P*S]_i)
else:
raise ValueError('This case is not defined yet') #future
# CF = np.zeros(A.shape);
# for net_num in range(p):
# if not params.get('mask')
# CF[:,net-num] = np.convolve(conv2(reshape(S(:,i),params.nRows,params.nCols), corr_kern, mode = 'same');
# else
# temp = zeros(params.nRows,params.nCols);
# temp(params.mask(:)) = S(:,i);
# temp = conv2(temp ,corr_kern,'same');
# CF(:,i) = temp(params.mask(:));
# lambdas = params['epsilon']/(beta + A + CF); # - Calculate the wright updates tau/(beta + |s_i| + [P*S]_i)
elif len(params['epsilon'].flatten()) == p: # If the numerator of the weight updates is the size of the dictionary (i.e., one tau per dictioary element)...
if params['updateEmbed'] : # - If the graph was updated, use the new graph (i.e., P)
lambdas = params['epsilon'].reshape((1,-1))/(beta + A + corr_kern @ A) # - Calculate the wright updates tau/(beta + |s_i| + [P*S]_i)
else : # - If the graph was not updated, use the original graph (in corr_kern)
if corr_kern.shape[0] == n_neurons : # - If the weight projection matrix has the same number of rows as pixels, update based on a matrix multiplication
lambdas = params['epsilon'].reshape((1,-1))/(beta + A + corr_kern @ A) # - Calculate the wright updates tau/(beta + |s_i| + [P*S]_i)
else :
raise ValueError('Invalid kernel shape') #future # - Otherwise, the graph was updated; use the original graph (in corr_kern)
# I'm not sure what option is supposed to go here
# lambdas = reshape(convn(reshape(S, [im_x, im_y, nd]),...
# corr_kern,'same'), im_x*im_y,1); # - Get base weights
# lambdas = bsxfun(@times, params['epsilon'], 1./(beta + S + lambdas)); # - Calculate the wright updates tau/(beta + |s_i| + [P*S]_i)
elif params['epsilon'].shape[0] == A.shape[0] and params['epsilon'].shape[1] == A.shape[1]: #future
raise ValueError('This option is not available yet')
# If the numerator of the weight updates is the size of the image
#CF = np.zeros(A.shape)
#for net_num in range(A.shape[1]): #future
#if not params.get('mask')
# CF(:,i) = vec(conv2(reshape(S(:,i),params.nRows,params.nCols) ,corr_kern,'same'));
#else
# temp = zeros(params.nRows,params.nCols);
# temp(params.mask(:)) = S(:,i);
# CF(:,i) = vec(conv2(temp ,corr_kern,'same'));
# lambdas = bsxfun(@times, params['epsilon'], ones(1,1,nd))./(beta+S+CF); % - Calculate the wright updates tau/(beta + |s_i| + [P*S]_i)
else:
raise ValueError('Invalid Option')
return lambdas
def MovToD2(mov, ):
"""
Parameters
----------
mov : can be list of np.ndarray of frames OR 3d np.ndarray of [pixels X pixels X time]
The data
Returns
-------
array
a 2d numpy array of the movie, pixels X time
"""
if isinstance(mov, list):
return np.hstack([frame.flatten().reshape((-1,1)) for frame in mov])
elif isinstance(mov, np.ndarray) and len(np.shape(mov)) == 2:
return np.hstack([mov[:,:,frame_num].flatten().reshape((-1,1)) for frame_num in range(mov.shape[2])])
elif isinstance(mov, np.ndarray) and len(np.shape(mov)) == 3:
return np.hstack([mov[:,:,frame_num].flatten().reshape((-1,1)) for frame_num in range(mov.shape[2])])
else:
raise ValueError('Unrecognized dimensions for mov (cannot change its dimensions to 2d)')
def D2ToMov(mov, frameShape, type_return = 'array'):
"""
Parameters
----------
mov : TYPE
DESCRIPTION.
frameShape : TYPE
DESCRIPTION.
type_return : string, can be 'array' or 'list', optional
The default is 'array'.
Raises
------
ValueError - if dimensions do not fit
Returns
-------
list or np.ndarray (according to the input "type return") of frames with shape frameShape X time
"""
if mov.shape[0] != frameShape[0]*frameShape[1] :
raise ValueError('Shape of each frame ("frameShape") is not consistent with the length of the data ("mov")')
if type_return == 'array':
return np.dstack([mov[:,frame].reshape(frameShape) for frame in range(mov.shape[1])])
elif type_return == 'list':
return [mov[:,frame].reshape(frameShape) for frame in range(mov.shape[1])]
else:
raise ValueError('Invalid "type_return" input. Should be "list" or "array"')
def mkDataGraph(data, params = {}, reduceDim = False, reduceDimParams = {}, graph_function = 'gaussian',
K_sym = True, use_former = True, data_name = 'none', toNormRows = True):
"""
Parameters
----------
data : should be neurons X time OR neurons X p
DESCRIPTION.
params : TYPE, optional
DESCRIPTION. The default is {}.
reduceDim : TYPE, optional
DESCRIPTION. The default is False.
reduceDimParams : TYPE, optional
DESCRIPTION. The default is {}.
graph_function : TYPE, optional
DESCRIPTION. The default is 'gaussian'.
K_sym : TYPE, optional
DESCRIPTION. The default is True.
use_former : TYPE, optional
DESCRIPTION. The default is True.
data_name : TYPE, optional
DESCRIPTION. The default is 'none'.
toNormRows : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
TYPE
DESCRIPTION.
"""
reduceDimParams = {**{'alg':'PCA'}, **reduceDimParams}
params = addKeyToDict(params_config,
params)
if len(data.shape) == 3:
data = np.hstack([data[:,:,i].flatten().reshape((-1,1)) for i in range(data.shape[2])])
print('data was reshaped to 2d')
# Future: PCA
if reduceDim:
pca = PCA(n_components=params['n_comps'])
data = pca.fit_transform(data)
#raise ValueError('stop here')
K = calcAffinityMat(data, params, data_name, use_former, K_sym, graph_function)
K = K - np.diag(np.diag(K) )
if toNormRows:
K = K/K.sum(1).reshape((-1,1))
return K
def calcAffinityMat(data, params, data_name, use_former, K_sym = True, graph_function = 'gaussian'):
# data = neurons X time
n_cols = data.shape[1]
n_rows = data.shape[0]
numNonZeros = params['n_neighbors'] * n_cols
# knn_dict is a dictionary with keys 'dist' and 'ind'
knn_dict = findNeighDict(data, params, data_name, use_former, addi = '_knn', to_save = True)
matInds = createSparseMatFromInd(knn_dict['ind'], is_mask = True)
matDists = createSparseMatFromInd(knn_dict['ind'], defVal = knn_dict['dist'], is_mask = False)
if graph_function == 'gaussian':
K = gaussian_vals(matDists, std = np.median(matDists[matInds != 0 ]))
else:
raise ValueError('Unknown Graph Function')
if K_sym:
K = (K + K.T)/2
return K
def findNeighDict(data, params, data_name = 'none',
use_former = True, addi = '_knn', to_save = True):
"""
"""
save_knn_path = data_name + '%s.npy'%addi #np.save()
if use_former and os.path.isfile(save_knn_path) :
knn_dict = np.load(save_knn_path, allow_pickle=True).item()
else:
if params['n_neighbors'] > data.shape[1]:
print('Too many neighbors were required, set it to %d'%int(data.shape[1]/2))
params['n_neighbors'] = int(data.shape[1]/2)
nbrs = NearestNeighbors(n_neighbors=params['n_neighbors'],
algorithm=params['alg']).fit(data)
distances, indices = nbrs.kneighbors(data)
knn_dict = {'dist': distances, 'ind': indices}
if to_save:
np.save(save_knn_path, knn_dict)
return knn_dict
def createSparseMatFromInd(inds, M = 0, defVal = 1, is_mask = True ):
"""
This function find a 0-1 matrix where the non-zeros are located according to inds
Parameters
----------
inds : np.ndarray [sample index X number of neighbors]
indices of the neighbors
M : int, optional
DESCRIPTION. The default is 0.
defVal : number OR numpy.ndarray with the same shape of inds, optional
DESCRIPTION. The default is 1.
Returns
-------
mat : np.ndarray of size M X M of 0/1 values
"""
if M == 0 or M < np.max(inds):
M = np.max([np.max(inds)+1, inds.shape[0]])
print('M was changed in "createSparseMatFromInd"')
mat = np.zeros((M,M))
if not is_mask: mat += np.inf
rows = np.repeat(np.arange(inds.shape[0]).reshape((-1,1)),inds.shape[1], axis=1)
mat[rows,inds] = defVal
return mat
def gaussian_vals(mat, std = 1, mean = 0 , norm = False, dimensions = 1, mat2 = [], power = 2):
"""
check_again
Parameters
----------
mat : the matrix to consider
std : number, gaussian std
mean : number, optionalis
mean gaussian value. The default is 0.
norm : boolean, optional
whether to divide values by sum (s.t. sum -> 1). The default is False.
Returns
-------
g : gaussian values of mat
"""
if dimensions == 1:
if not checkEmptyList(mat2): warnings.warn('Pay attention that the calculated Gaussian is 1D. Please change the input "dimensions" in "gaussian_vals" to 2 if you want to consider the 2nd mat as well')
g = np.exp(-((mat-mean)/std)**power)
if norm: return g/np.sum(g)
elif dimensions == 2:
#dim1_mat = np.abs(mat1.reshape((-1,1)) @ np.ones((1,len(mat1.flatten()))))
#dim2_mat = np.abs((mat2.reshape((-1,1)) @ np.ones((1,len(mat2.flatten())))).T)
#g= np.exp(-0.5 * (1/std)* (dim1_mat**power + (dim1_mat.T)**power))
g = gaussian_vals(mat, std , mean , norm , dimensions = 1, mat2 = [], power = power)
g1= g.reshape((1,-1))
g2 = np.exp(-0.5/np.max([int(len((mat2-1)/2)),1])) * mat2.reshape((-1,1))
g = g2 @ g1
g[int(g.shape[0]/2), int(g.shape[1]/2)] = 0
if norm:
g = g/np.sum(g)
else:
raise ValueError('Invalid "dimensions" input')
return g
#%% GD Updates
def update_GDiters(dict_old, A, data, step_s, params):
"""
Take a step in the negative gradient of the basis: Minimizing the energy E = ||x-Da||_2^2 + lambda*||a||_1^2
Parameters
----------
dict_old : TYPE
A : TYPE
data : TYPE
step_s : TYPE
DESCRIPTION.
l2 : TYPE
DESCRIPTION.
params : TYPE,
DESCRIPTION.
Returns - new dict
-------
"""
for index2 in range(params.get('GD_iters')):
# Update The basis matrix
print('dict old inside update_GDiters')
print(dict_old.sum())
dict_old = dict_old + (step_s/A.shape[0])*((data.T - dict_old @ A.T) @ A)
print('dict old inside update_GDiters')
print(dict_old.sum())
# This part is basically the same, only for the hyyperspectral, care needs to be taken to saturate at 0,
# so that no negative relflectances are learned.
if params.get('nonneg'):
dict_old[dict_old < 0] = 0
if np.sum(dict_old) ==0:
raise ValueError('fgdgdfgdfgfgfdgfgfdglkfdjglkdjglkfdjglkdjflgk')
return dict_old
def update_GDwithForb(dict_old, A, data, step_s, l2, params):
"""
Take a step in the negative gradient of the basis:
This time the Forbenious norm is used to reduce unused basis elements. The energy function being minimized is then:
E = ||x-Da||_2^2 + lambda*||a||_1^2 + lamForb||D||_F^2
Parameters
----------
dict_old : TYPE
A : TYPE
data : TYPE
step_s : TYPE
DESCRIPTION.
l2 : TYPE
DESCRIPTION.
params : TYPE
DESCRIPTION.
Returns - new dict
-------
"""
for index2 in range(params.get('GD_iters')):
# Update The basis matrix
dict_new = dict_old + (step_s)*((data.T - dict_old @ A.T) @ A -l2*dict_old) @ np.diag(1/(1+np.sum(A != 0, 0)));
# For some data sets, the basis needs to be non-neg as well
if params.get('nonneg'):
dict_new[dict_new < 0] = 0
return dict_new
def update_FullLS(dict_old, A, data, params):
"""
Minimizing the energy:
E = ||X-DA||_2^2 via D = X*pinv(A)
Parameters
----------
dict_old : TYPE
A : TYPE
data : TYPE
params : TYPE
Returns
------- dict_new
"""
raise ValueError('how did you arrive here?')
if params.get('nonneg'):
dict_new = np.zeros(size(dict_old)) # Initialize the dictionary
n_times = dict_old.shape[0]
for t in range(n_times):
dict_new[t,:] = nnls(A, data[:,t]) # Solve the least-squares via a nonnegative program on a per-dictionary level
else:
dict_new = data.T @ np.pinv(A); # Solve the least-squares via an inverse
return dict_new
def update_LSwithCont(dict_old, A, data, l3, params):
"""
Minimizing the energy: E = ||X-DA||_2^2 + l3*||D_old - D||_F^2 via D = [X;D_old]*pinv([A;I])
Parameters
----------
dict_old : TYPE
A : TYPE
data : TYPE
l2 : TYPE
l3 : TYPE
params : TYPE
Returns
------- dict_new
"""
if params.get('nonneg'):
dict_new = np.zeros(dict_old.shape) # Initialize the dictionary
n_times = dict_old.shape[0]
n_neurons = A.shape[0]
for t in range(n_times):
dict_new[t,:] = nnls( np.vstack([A.T, l3*np.eye(n_neurons) ]),
np.vstack([data[:,t].reshape((-1,1)),
l3*dict_old[t,:].reshape((-1,1)) ]) ); # Solve the least-squares via a nonnegative program on a per-dictionary level
else:
dict_new = np.vstack([data,l3*dict_old.T,l2*dict_old]) @ np.linalg.pinv(np.vstack([A.T,l3*np.eye(n_neurons)])) # Solve the least-squares via an inverse
return dict_new
def update_LSwithContForb(dict_old, A, data, l2, l3, params):
"""
Minimizing the energy:
E = ||data.T-DA.T||_2^2 + l3*||D_old - D||_F^2 + l2*||D||_F^2 , via phi = [data.T;phi_old]*pinv([A.T;I])
Parameters
----------
dict_old : TYPE
A : TYPE
data : TYPE
l2 : TYPE
l3 : TYPE
params : TYPE
Returns
-------
dict_new
"""
if params.get('nonneg'):
dict_new = np.zeros(dict_old.shape) # Initialize the dictionary
n_times = dict_old.shape[0]
n_neurons = A.shape[0]
for t in range(n_times):
dict_new[t,:] = nnls( np.vstack([A.T, l3*np.eye(n_neurons),np.zeros((n_neurons, n_neurons)) ]),
np.vstack([data[:,t].reshape((-1,1)),
l3*dict_old[t,:].reshape((-1,1)),
l2*dict_old[t,:].reshape((-1,1)) ]) ); # Solve the least-squares via a nonnegative program on a per-dictionary level
else:
dict_new = np.vstack([data,l3*dict_old.T,l2*dict_old]) @ np.linalg.pinv(np.vstack([A.T,l3*np.eye(n_neurons),zeros((n_neurons, n_neurons))])); # Solve the least-squares via an inverse
return dict_new
def update_LSwithForb(dict_old, A, data, l2, params):
"""
Minimizing the energy:
E = ||X-DA||_2^2 + l2*||D||_F^2
via D = X*A^T*pinv(AA^T+lamForb*I)
Parameters
----------
dict_old : np.ndarray, T X p
temporal profiles dict
A : np.ndarray, N X p
neural nets
data : np.ndarray, N X T
neural recordings
l2: number (regularization)
params : options
Returns
-------
dict_new : np.ndarray, T X p
temporal profiles dict
"""
if params.get('nonneg'):
#future
warnings.warn('Regularized non-negative ls is not implemented yet! Solving without non-negative constraints...\n')
dict_new = data.T @ A @ np.linalg.pinv(A.T @ A + l2*np.eye(A.shape[1])); # Solve the least-squares via an inverse
return dict_new
def update_FullLsCor(dict_old, A, data, l2, l3, l4, params):
"""
E = ||X-DA||_2^2 + l4*||D.'D-diag(D.'D)||_sav + l2*||D||_F^2 + l3*||D-Dold||_F^2
Parameters
----------
dict_old : np.ndarray, T X p
temporal profiles dict
A : np.ndarray, N X p
neural nets
data : np.ndarray, N X T
neural recordings
l2, L3, L4 : numbers (regularization)
params : options
Returns
-------
dict_new : np.ndarray, T X p
temporal profiles dict
"""
if params.get('nonneg'): # if non-negative matrix factorization
dict_new = np.zeros(dict_old.shape); # Initialize the dictionary
n_nets = dict_old.shape[1]
n_times = dict_old.shape[0]
# Solve the least-squares via a nonnegative program on a per-dictionary level
for t in range(n_times): #efficiencyMarker
dict_new[t,:] = solve_qp(2*A.T @ A + l4 + (l3+l2-l4)*np.eye(n_nets),
( -2*A.T @ data[:,t] + l3*dict_old[t,:]).reshape((1,-1))
, solver = params['solver_qp'] )
else:
dict_new = data.T @ A @ np.linalg.pinv(A.T @ A + l4*(1-np.eye(A.shape[1]))) ; # Solve the least-squares via an inverse
return dict_new
def sparseDeconvDictEst(dict_old, A, data, l2, params):
"""
This function should return the solution to the optimiation
S = argmin[||A - (l2*S)data||_F^2 + ]
D_i = l2*S_i
Parameters
----------
dict_old : np.ndarray, T X p
temporal profiles dict
A : np.ndarray, N X p
neural nets
data : np.ndarray, N X T
neural recordings
l2 : number (regularization)
params : dict
Returns
-------
phi : np.ndarray, T X p
temporal profiles dict
"""
#return phi
raise ValueError('Function currently not available. Please change the "GD_type" from "sparse_deconv"')
pass
#%% Other pre-processing
def norm_mat(mat, type_norm = 'evals', to_norm = True):
"""
This function comes to norm matrices by the highest eigen-value
Inputs:
mat = the matrix to norm
type_norm = what type of normalization to apply. Can be 'evals', 'unit' or 'none'.
to_norm = whether to norm or not to.
Output:
the normalized matrix
"""
if to_norm and type_norm != 'none':
if type_norm == 'evals':
eigenvalues, _ = linalg.eig(mat)
mat = mat / np.max(np.abs(eigenvalues))
elif type_norm == 'unit':
mat = mat @ np.diag(1 / np.sqrt(np.sum(mat**2,0)))
return mat
def str2bool(str_to_change):
"""
Transform 'true' or 'yes' to True boolean variable
Example:
str2bool('true') - > True
"""
if isinstance(str_to_change, str):
str_to_change = (str_to_change.lower() == 'true') or (str_to_change.lower() == 'yes')
return str_to_change
#%% Plotting Functions
def visualize_images(to_use_array = True, to_norm = True,
folder_path = r'E:\CODES FROM GITHUB\GraFT-analysis\code\neurofinder.02.00\images' ):
if to_use_array:
array_images = from_folder_to_array(folder_path)
if to_norm:
array_images = array_images/np.maximum([np.max(array_images,0 ), np.max(array_images,1)]).reshape((1,1,-1))
def from_folder_to_array(path_images = r'E:\CODES FROM GITHUB\GraFT-analysis\code\neurofinder.02.00\images'
, max_images = 100):
if isinstance(path_images,(np.ndarray, list)):
pass
elif isinstance(path_images,str):
files = os.listdir(path_images)
files = np.sort(files)
return np.dstack([load_image_to_array(path_images = path_images, image_to_load = cur_file) for counter, cur_file in enumerate(files) if counter < max_images])
def load_image_to_array(path_images = r'E:\CODES FROM GITHUB\GraFT-analysis\code\neurofinder.02.00\images',
image_to_load = 'image07971.tiff'):
im_path = path_images + '\%s'%image_to_load
im = io.imread(im_path)
imarray = np.array(im)
return imarray
def slider_changed(event):
val = slider.get()
ax.imshow(array_images[:,:,int(val)]) | PypiClean |
/Enarksh-0.9.0.tar.gz/Enarksh-0.9.0/enarksh/controller/client/NodeActionClient.py | import zmq
import enarksh
from enarksh.controller.message.NodeActionMessage import NodeActionMessage
class NodeActionClient:
"""
A client for requesting the controller for a node action.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, io):
"""
Object constructor.
:param enarksh.style.EnarkshStyle.EnarkshStyle io: The output decorator.
"""
self._zmq_context = None
"""
The ZMQ context.
:type: Context
"""
self._zmq_controller = None
"""
The socket for communicating with the controller.
:type: zmq.sugar.socket.Socket
"""
self._io = io
"""
The output decorator.
:type: enarksh.style.EnarkshStyle.EnarkshStyle
"""
# ------------------------------------------------------------------------------------------------------------------
def main(self, uri, act_id):
"""
The main function of node_action.
:param str uri: The URI of the (trigger) node that must be triggered.
:param int act_id: The ID of the requested action.
"""
# Initialize ZMQ.
self._zmq_init()
# Compose the message for the controller.
message = NodeActionMessage(uri, act_id)
# Send the message to the controller.
self._zmq_controller.send_pyobj(message)
# Await the response from the controller.
response = self._zmq_controller.recv_pyobj()
if response['ret'] == 0:
self._io.log_verbose(response['message'])
else:
self._io.error(response['message'])
return response['ret']
# ------------------------------------------------------------------------------------------------------------------
def _zmq_init(self):
"""
Initializes ZMQ.
"""
self._zmq_context = zmq.Context()
# Create socket for communicating with the controller.
self._zmq_controller = self._zmq_context.socket(zmq.REQ)
self._zmq_controller.connect(enarksh.CONTROLLER_LOCKSTEP_END_POINT)
# ---------------------------------------------------------------------------------------------------------------------- | PypiClean |
/Gene_POCKET-0.0.4-py3-none-any.whl/pocket/eqtl.py | import os
import numpy as np
import pandas as pd
import limix
class limix_gwas():
def __init__(self, geno_matrix, pheno_list, snp_info_df, kinship=None):
self.geno_matrix = geno_matrix
self.geno_matrix.index = snp_info_df.rsid
self.pheno_list = pheno_list.dropna()
self.kinship = kinship
self.SNPinfo = snp_info_df
def data_check(self):
if self.kinship is not None:
culs = set(self.geno_matrix.columns) & set(self.pheno_list.index) & set(self.kinship.index)
self.kinship = self.kinship.loc[culs,culs]
else:
culs = set(self.geno_matrix.columns) & set(self.pheno_list.index)
self.geno_matrix = self.geno_matrix.loc[:,culs]
self.pheno_list = self.pheno_list.loc[culs]
def maf_filter(self, thresh =0.05):
from limix.qc import compute_maf
print("Start SNP filtering....(MAF > %s)" %thresh)
maf_list = compute_maf(self.geno_matrix.T.values)
self.geno_matrix = self.geno_matrix[np.array(maf_list) > thresh]
self.SNPinfo = self.SNPinfo[np.array(maf_list) > thresh]
print('%s variations used to perform GWAS.' %self.SNPinfo.shape[0])
def mean_impute(self):
from limix.qc import mean_impute
print("Genotype imputation with mean value")
geno_matrix = mean_impute(self.geno_matrix.T.values)
self.geno_matrix = geno_matrix.T
def do_gwas(self, geno_mat =None):
from limix.qtl import scan
print("Start to perform GWAS......")
if geno_mat is None:
geno_mat = self.geno_matrix.T
else:
geno_mat = geno_mat
if self.kinship is not None:
res = scan(geno_mat, self.pheno_list.values, "normal", K= self.kinship.values ,verbose=False)
else:
res = scan(self.geno_matrix.T.values, self.pheno_list.values, "normal", K= None ,verbose=False)
res_p = res.stats
res_p.index = self.SNPinfo.rsid
res_p.loc[:,'rsid'] = self.SNPinfo.rsid
res_p.loc[:,'chrom'] = self.SNPinfo.chrom
res_p.loc[:,'position'] = self.SNPinfo.position
betas = np.array(res.effsizes['h2'].effsize[res.effsizes['h2'].effect_type=='candidate'])
se = np.array(res.effsizes['h2'].effsize_se[res.effsizes['h2'].effect_type=='candidate'])
res_p.loc[:,'beta'] = betas
res_p.loc[:,'se'] = se
res_p.loc[:,'z_score'] = betas/se
self.res_p = res_p
return res_p
def chrom_gwas_paiallel(self, njobs=10):
from joblib import Parallel, delayed
geno_list = []
for g,d in self.SNPinfo.groupby('chrom'):
snps = d.rsid
geno_mat = self.geno_matrix.loc[snps,:].T.values
geno_list.append(geno_mat)
res_list = Parallel(n_jobs= n_jobs)(delayed(do_gwas)(mat) for mat in geno_list)
res_p = pd.DataFrame()
for res in res_list:
res_p = res_p.append(res, ignore_index=True)
self.res_p = res_p
return res_p
def lead_SNP_caculation(self, plink_bed_f, temp_path, r2_thresh=0.2, interval=5000, r2_N_filter=20, r2_N_threshold=0.4, r2_max_filter= 0.8, p_thresh=1e-5, topN = 1000, plink_path=None):
print("Identify leadSNP ......")
self.res_p = self.res_p.sort_values(['pv20'])
if os.path.isdir(temp_path):
pass
else:
os.mkdir(temp_path)
sig_list = []
df = self.res_p
df = df[df.pv20 <= p_thresh]
sorted_snps = list(df.rsid)
while len(sorted_snps) > 0:
rand_n = str(random.random()*1e7)[0:6]
leadSNP = sorted_snps[0]
if len(sig_list) > topN:
break
try:
subprocess.call("plink --bfile %s --r2 --ld-snp %s --allow-extra-chr --ld-window-kb %d --ld-window 100000 --ld-window-r2 0 --out %s/%s_ld_res" %(plink_bed_f, leadSNP, interval,temp_path, rand_n), shell=True)
except IOError:
rand_n = str(random.random()*1e7)[0:6]
subprocess.call("plink --bfile %s --r2 --ld-snp %s --allow-extra-chr --ld-window-kb %d --ld-window 100000 --ld-window-r2 0 --out %s/%s_ld_res" %(plink_bed_f, leadSNP, interval, temp_path, rand_n), shell=True)
ld_res = pd.read_table('%s/%s_ld_res.ld' %(temp_path, rand_n),sep='\s+')
ld_res.index = ld_res.loc[:,'SNP_B']
ld_res = ld_res[~ld_res.index.duplicated(keep='first')]
nlargest_r2 = ld_res.R2.nlargest(r2_N_filter+1)
if (nlargest_r2.iloc[1] < r2_max_filter) and (nlargest_r2.iloc[-1] < r2_N_threshold):
droped_SNPs = ld_res[ld_res.R2 >= r2_thresh].SNP_B
sorted_snps = [x for x in sorted_snps if x not in droped_SNPs]
continue
droped_SNPs = ld_res[ld_res.R2 >= r2_thresh].SNP_B
sorted_snps = [x for x in sorted_snps if x not in droped_SNPs]
sig_list.append(leadSNP)
leadSNP_df = df.loc[sig_list,:]
self.leadSNP_df = leadSNP_df
return leadSNP_df
def save_gwas(self, save_f,type='hdf5'):
print("Save GWAS results ....")
if type == 'hdf5':
try:
self.res_p.to_hdf(save_f, key= 'gwas_df', format='table', data_columns=['rsid','chrom','position','pv20'])
except NameError:
pass
except AttributeError:
pass
try:
self.leadSNP_df.to_hdf(save_f, key= 'leadSNP', format='table', data_columns=['rsid','chrom','position'])
except NameError:
pass
except AttributeError:
pass | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/hris/model/issue_status_enum.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
from MergePythonSDK.shared.model_utils import MergeEnumType
class IssueStatusEnum(ModelNormal, MergeEnumType):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'ONGOING': "ONGOING",
'RESOLVED': "RESOLVED",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
defined_types = {
'value': (str,),
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, value, *args, **kwargs): # noqa: E501
"""IssueStatusEnum - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, value, *args, **kwargs): # noqa: E501
"""IssueStatusEnum - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value | PypiClean |
Subsets and Splits