filename
stringlengths
13
19
text
stringlengths
134
1.04M
the-stack_0_5904
"""Single slice vgg with normalised scale. """ import functools import lasagne as nn import numpy as np import theano import theano.tensor as T import data_loader import deep_learning_layers import image_transform import layers import preprocess import postprocess import objectives import theano_printer import updates import utils # Random params rng = np.random take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging) dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging) # Memory usage scheme caching = None # Save and validation frequency validate_every = 10 validate_train_set = True save_every = 10 restart_from_save = False # Training (schedule) parameters # - batch sizes batch_size = 32 sunny_batch_size = 4 batches_per_chunk = 16 AV_SLICE_PER_PAT = 1 num_epochs_train = 175 * AV_SLICE_PER_PAT # - learning rate and method base_lr = .0001 learning_rate_schedule = { 0: base_lr, num_epochs_train*9/10: base_lr/10, } momentum = 0.9 build_updates = updates.build_adam_updates # Preprocessing stuff cleaning_processes = [ preprocess.set_upside_up,] cleaning_processes_post = [ functools.partial(preprocess.normalize_contrast_zmuv, z=2)] augmentation_params = { "rotation": (-180, 180), "shear": (0, 0), "translation": (-8, 8), "flip_vert": (0, 1), "roll_time": (0, 0), "flip_time": (0, 0), } use_hough_roi = True # use roi to center patches preprocess_train = functools.partial( # normscale_resize_and_augment has a bug preprocess.preprocess_normscale, normscale_resize_and_augment_function=functools.partial( image_transform.normscale_resize_and_augment_2, normalised_patch_size=(64,64))) preprocess_validation = functools.partial(preprocess_train, augment=False) preprocess_test = preprocess_train sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation sunny_preprocess_validation = preprocess.sunny_preprocess_validation sunny_preprocess_test = preprocess.sunny_preprocess_validation # Data generators create_train_gen = data_loader.generate_train_batch create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation") create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train") create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"]) # Input sizes image_size = 64 data_sizes = { "sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high, "sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high, "sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high, "sliced:data:singleslice:2ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high, "sliced:data:singleslice:4ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high, "sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high, "sliced:data:shape": (batch_size, 2,), "sliced:meta:PatientAge": (batch_size, 1), "sliced:meta:PatientSex": (batch_size, 1), "sunny": (sunny_batch_size, 1, image_size, image_size) # TBC with the metadata } # Objective l2_weight = 0.000 l2_weight_out = 0.000 def build_objective(interface_layers): # l2 regu on certain layers l2_penalty = nn.regularization.regularize_layer_params_weighted( interface_layers["regularizable"], nn.regularization.l2) # build objective return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty) # Testing postprocess = postprocess.postprocess test_time_augmentations = 200 # More augmentations since a we only use single slices tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x))) # Architecture def build_model(): #import here, such that our global variables are not overridden! from . import j6_2ch_128mm, j6_4ch meta_2ch = j6_2ch_128mm.build_model() meta_4ch = j6_4ch.build_model() l_age = nn.layers.InputLayer(data_sizes["sliced:meta:PatientAge"]) l_sex = nn.layers.InputLayer(data_sizes["sliced:meta:PatientSex"]) l_meta_2ch_systole = meta_2ch["meta_outputs"]["systole"] l_meta_2ch_diastole = meta_2ch["meta_outputs"]["diastole"] l_meta_4ch_systole = meta_4ch["meta_outputs"]["systole"] l_meta_4ch_diastole = meta_4ch["meta_outputs"]["diastole"] l_meta_systole = nn.layers.ConcatLayer([l_age, l_sex, l_meta_2ch_systole, l_meta_4ch_systole]) l_meta_diastole = nn.layers.ConcatLayer([l_age, l_sex, l_meta_2ch_diastole, l_meta_4ch_diastole]) ldsys1 = nn.layers.DenseLayer(l_meta_systole, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify) ldsys1drop = nn.layers.dropout(ldsys1, p=0.5) ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify) ldsys2drop = nn.layers.dropout(ldsys2, p=0.5) ldsys3 = nn.layers.DenseLayer(ldsys2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax) ldsys3drop = nn.layers.dropout(ldsys3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate ldsys3dropnorm = layers.NormalisationLayer(ldsys3drop) l_systole = layers.CumSumLayer(ldsys3dropnorm) lddia1 = nn.layers.DenseLayer(l_meta_diastole, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify) lddia1drop = nn.layers.dropout(lddia1, p=0.5) lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify) lddia2drop = nn.layers.dropout(lddia2, p=0.5) lddia3 = nn.layers.DenseLayer(lddia2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax) lddia3drop = nn.layers.dropout(lddia3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate lddia3dropnorm = layers.NormalisationLayer(lddia3drop) l_diastole = layers.CumSumLayer(lddia3dropnorm) submodels = [meta_2ch, meta_4ch] return { "inputs": dict({ "sliced:meta:PatientAge": l_age, "sliced:meta:PatientSex": l_sex, }, **{ k: v for d in [model["inputs"] for model in submodels] for k, v in list(d.items()) } ), "outputs": { "systole": l_systole, "diastole": l_diastole, }, "regularizable": dict({ }, **{ k: v for d in [model["regularizable"] for model in submodels if "regularizable" in model] for k, v in list(d.items()) } ), "pretrained":{ j6_2ch_128mm.__name__: meta_2ch["outputs"], j6_4ch.__name__: meta_4ch["outputs"], }, "cutoff_gradients": [ ] + [ v for d in [model["meta_outputs"] for model in submodels if "meta_outputs" in model] for v in list(d.values()) ] }
the-stack_0_5906
import re from queries import * from expresiones import * # ----------------------------------------------------------------------------- # Grupo 6 # # Universidad de San Carlos de Guatemala # Facultad de Ingenieria # Escuela de Ciencias y Sistemas # Organizacion de Lenguajes y Compiladores 2 # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # INICIA ANALIZADOR LEXICO # ----------------------------------------------------------------------------- #palabras reservadas del lenguaje reservadas = { # PALABRAS RESERVADAS POR SQL 'show' : 'SHOW', 'databases' : 'DATABASES', 'database' : 'DATABASE', 'tables' : 'TABLES', 'columns' : 'COLUMNS', 'from' : 'FROM', 'select' : 'SELECT', 'distinct' : 'DISTINCT', 'limit' : 'LIMIT', 'offset' : 'OFFSET', 'of':'OF', 'order' : 'ORDER', 'by' : 'BY', 'where' : 'WHERE', 'and' : 'AND', 'or' : 'OR', 'not' : 'NOT', 'in' : 'IN', 'concat' : 'CONCAT', 'only':'ONLY', 'as' : 'AS', 'sqrt' : 'SQRT', 'avg' : 'AVG', 'sum' : 'SUM', 'cont' :'CONT', 'desc' : 'DESC', 'asc' : 'ASC', 'like' : 'LIKE', 'min' : 'MIN', 'max' : 'MAX', 'abs' : 'ABS', 'on' : 'ON', 'union' : 'UNION', 'all' : 'ALL', 'insert' : 'INSERT', 'into' : 'INTO', 'values' : 'VALUES', 'update' : 'UPDATE', 'set' : 'SET', 'delete' : 'DELETE', 'create' : 'CREATE', 'primary' : 'PRIMARY', 'key' : 'KEY', 'null' : 'NULL', 'nulls':'NULLS', 'unique' : 'UNIQUE', 'check' : 'CHECK', 'cbrt' : 'CBRT', 'ceil' : 'CEIL', 'ceiling' : 'CEILING', 'degrees' : 'DEGREES', 'div':'DIV', 'exp':'EXP', 'factorial':'FACTORIAL', 'floor':'FLOOR', 'gcd':'GCD', 'lcm':'LCM', 'ln':'LN', 'log':'LOG', 'log10':'LOG10', #'current':'CURRENT', 'default' : 'DEFAULT', 'auto_increment' : 'AUTO_INCREMENT', 'alter' : 'ALTER', 'table' : 'TABLE', 'add' : 'ADD', 'drop' : 'DROP', 'column' : 'COLUMN', 'rename' : 'RENAME', 'to' : 'TO', 'replace' : 'REPLACE', 'type' : 'TYPE', 'enum' : 'ENUM', 'if' : 'IF', 'exists' : 'EXISTS', 'min_scale':'MIN_SCALE', 'mod':'MOD', 'pi':'PI', 'power':'POWER', 'radians':'RADIANS', 'round':'ROUND', 'scale':'SCALE', 'sign':'SIGN', 'mode' : 'MODE', 'owner' : 'OWNER', 'constraint' : 'CONSTRAINT', 'foreign' : 'FOREIGN', 'references' : 'REFERENCES', 'inherits' : 'INHERITS', 'group' : 'GROUP', 'having' : 'HAVING', 'inner' : 'INNER', 'outer' : 'OUTER', 'trim_scale':'TRIM_SCALE', 'trunc':'TRUNC', 'width_bucket':'WIDTH_BUCKET', 'random':'RANDOM', 'setseed':'SETSEED', 'acos':'ACOS', 'acosd':'ACOSD', 'asin':'ASIN', 'asind':'ASIND', 'atan':'ATAN', 'atan2':'ATAN2', 'cos':'COS', 'cosd':'COSD', 'cot':'COT', 'cotd':'COTD', 'sin':'SIN', 'sind':'SIND', 'tan':'TAN', 'tand':'TAND', 'atand':'ATAND', 'atan2d':'ATAN2D', 'sinh':'SINH', 'cosh':'COSH', 'tanh':'TANH', 'asinh':'ASINH', 'acosh':'ACOSH', 'atanh':'ATANH', 'length':'LENGTH', 'substring':'SUBSTRING', 'trim':'TRIM', 'get_byte':'GET_BYTE', 'md5':'MD5', 'set_byte':'SET_BYTE', 'sha256':'SHA256', 'substr':'SUBSTR', 'convert':'CONVERT', 'encode':'ENCODE', 'decode':'DECODE', 'escape':'ESCAPE', 'any':'ANY', 'some':'SOME', 'using':'USING', 'first':'FIRST', 'last':'LAST', 'current_user':'CURRENT_USER', 'session_user':'SESSION_USER', 'symmetric':'SYMMETRIC', 'left' : 'LEFT', 'right' : 'RIGHT', 'full' : 'FULL', 'join' : 'JOIN', 'natural' : 'NATURAL', 'case' : 'CASE', 'then' : 'THEN', 'begin' : 'BEGIN', 'end' : 'END', 'else' : 'ELSE', 'greatest' : 'GREATEST', 'least' : 'LEAST', 'intersect' : 'INTERSECT', 'except' : 'EXCEPT', # tipos de datos permitidos 'smallint' : 'SMALLINT', 'integer' : 'INTEGER', 'bigint' : 'BIGINT', 'decimal' : 'DECIMAL', 'numeric' : 'NUMERIC', 'real' : 'REAL', 'double' : 'DOUBLE', 'precision' : 'PRECISION', 'money' : 'MONEY', 'varying' : 'VARYING', 'varchar' : 'VARCHAR', 'character' : 'CHARACTER', 'char' : 'CHAR', 'text' : 'TEXT', 'boolean' : 'BOOLEAN', 'timestamp':'TIMESTAMP', 'time':'TIME', 'date':'DATE', 'interval':'INTERVAL', 'year':'YEAR', 'month':'MONTH', 'day':'DAY', 'hour':'HOUR', 'minute':'MINUTE', 'second':'SECOND', 'to':'TO', 'true':'TRUE', 'false':'FALSE', 'declare' : 'DECLARE', 'function' : 'FUNCTION', 'returns' : 'RETURNS', 'returning':'RETURNING', 'exec':'EXEC', 'execute':'EXECUTE', 'between' : 'BETWEEN', 'ilike' : 'ILIKE', 'is':'IS', 'isnull':'ISNULL', 'notnull':'NOTNULL', #enums 'type':'TYPE', 'ENUM':'ENUM', #para trim 'leading':'LEADING', 'trailing':'TRAILING', 'both':'BOTH', 'for':'FOR', 'symmetric':'SYMMETRIC', 'use' : 'USE', 'now' : 'NOW', 'extract' : 'EXTRACT', 'date_part' : 'DATE_PART', 'current_date' : 'CURRENT_DATE', 'current_time' : 'CURRENT_TIME', # INDEX 'index':'INDEX', 'hash':'HASH', 'perform' : 'PERFORM', 'procedure' : 'PROCEDURE', 'out' : 'OUT', 'language' : 'LANGUAGE', 'plpgsql' : 'PLPGSQL', 'rowtype' : 'ROWTYPE', 'alias' : 'ALIAS' # revisar funciones de tiempo y fechas } # listado de tokens que manejara el lenguaje (solo la forma en la que los llamare en las producciones) tokens = [ 'PUNTOYCOMA', 'MAS', 'MENOS', 'POR', 'DIV', 'DOSPUNTOS', 'PUNTO', 'TYPECAST', 'CORCHETEIZQ', 'CORCHETEDER', 'POTENCIA', 'RESIDUO', 'MAYOR', 'MENOR', 'IGUAL', 'MAYORIGUAL', 'MENORIGUAL', 'DIFERENTE', 'IGUALIGUAL', 'PARENTESISIZQUIERDA', 'PARENTESISDERECHA', 'COMA', 'NOTEQUAL', 'SIMBOLOOR', 'SIMBOLOAND', 'SIMBOLOAND2', 'SIMBOLOOR2', 'NUMERAL', 'COLOCHO', 'DESPLAZAMIENTODERECHA', 'DESPLAZAMIENTOIZQUIERDA', 'DOLAR', #tokens que si devuelven valor 'DECIMALTOKEN', 'ENTERO', 'CADENA', 'ETIQUETA', 'ID' ] + list(reservadas.values()) # Tokens y la forma en la que se usaran en el lenguaje t_PUNTOYCOMA = r';' t_MAS = r'\+' t_MENOS = r'-' t_POR = r'\*' t_DIV = r'/' t_DOSPUNTOS = r':' t_PUNTO = r'\.' t_TYPECAST = r'::' t_CORCHETEDER = r']' t_CORCHETEIZQ = r'\[' t_POTENCIA = r'\^' t_RESIDUO = r'%' t_MAYOR = r'<' t_MENOR = r'>' t_IGUAL = r'=' t_MAYORIGUAL = r'>=' t_MENORIGUAL = r'<=' t_DIFERENTE = r'<>' t_IGUALIGUAL = r'==' t_PARENTESISIZQUIERDA = r'\(' t_PARENTESISDERECHA = r'\)' t_COMA = r',' t_NOTEQUAL = r'!=' t_SIMBOLOOR = r'\|\|' #esto va a concatenar cadenas t_SIMBOLOAND = r'&&' t_SIMBOLOAND2 = r'\&' t_SIMBOLOOR2 = r'\|' t_NUMERAL = r'\#' #REVISAR t_COLOCHO = r'~' #REVISAR t_DESPLAZAMIENTODERECHA = r'>>' t_DESPLAZAMIENTOIZQUIERDA = r'<<' t_DOLAR = r'\$' #definife la estructura de los decimales def t_DECIMAL(t): r'\d+\.\d+' try: t.value = float(t.value) except ValueError: print("El valor decimal es muy largo %d", t.value) t.value = 0 return t #definife la estructura de los enteros def t_ENTERO(t): r'\d+' try: t.value = int(t.value) except ValueError: print("El valor del entero es muy grande %d", t.value) t.value = 0 return t #definife la estructura de las cadenas def t_CADENA(t): r'[\'|\"].*?[\'|\"]' t.value = t.value[1:-1] # quito las comillas del inicio y final de la cadena return t #definife la estructura de las etiquetas, por el momento las tomo unicamente como letras y numeros def t_ETIQUETA(t): r'[a-zA-Z_]+[a-zA-Z0-9_]*' t.type = reservadas.get(t.value.lower(),'ID') # Check for reserved words print("ALV:",t) print("ALV:",t.type) return t # Comentario simple # ... def t_COMENTARIO_SIMPLE(t): r'--.*\n' t.lexer.lineno += 1 def t_COMENTARIO_MULTILINEA(t): r'/\*(.|\n|)*?\*/' t.lexer.lineno += t.value.count("\n") # ----------------------- Caracteres ignorados ----------------------- # caracter equivalente a un tab t_ignore = " \t" #caracter equivalente a salto de linea def t_newline(t): r'\n+' t.lexer.lineno += t.value.count("\n") def t_error(t): x=caden.splitlines() filas=len(x)-1 print("filas que no cambian: ",filas) if h.filapivote>0: fila=(t.lineno-1)-h.filapivote*filas else: fila=(t.lineno-1) h.filapivote+=1 print("Caracter lexico no permitido ==> '%s'" % t.value) h.errores+= "<tr><td>"+str(t.value[0])+"</td><td>"+str(fila)+"</td><td>"+str(find_column(caden,t))+"</td><td>LEXICO</td><td>token no pertenece al lenguaje</td></tr>\n" t.lexer.skip(1) # Construyendo el analizador léxico import ply.lex as lex lexer = lex.lex() # ----------------------------------------------------------------------------- # INICIA ANALIZADOR SINTACTICO # ----------------------------------------------------------------------------- # Asociación de operadores y precedencia precedence = ( ('left','TYPECAST'), ('right','UMINUS'), ('right','UNOT'), ('left','MAS','MENOS'), ('left','POTENCIA'), ('left','POR','DIV','RESIDUO'), ('left','AND','OR','SIMBOLOOR2','SIMBOLOOR','SIMBOLOAND2'), ('left','DESPLAZAMIENTOIZQUIERDA','DESPLAZAMIENTODERECHA'), ) #IMPORTACION DE CLASES ALTERNAS import reportes as h # estructura de mi gramatica #-----------------------------------------------------INICIO-------------------------------------------------------------------- def p_inicio_1(t) : 'inicio : queries' h.reporteGramatical1 +="inicio ::= queries \n" t[0]=t[1] p=t[0] h.insertarSimbolos(p) def p_queries_1(t) : 'queries : queries query' h.reporteGramatical1 +="queries ::= queries query\n" t[1].append(t[2]) t[0]=t[1] def p_queries_2(t) : 'queries : query' h.reporteGramatical1 +="queries ::= query\n" t[0]=[t[1]] #-----------------------------------------------------LISTA DE FUNCIONES-------------------------------------------------------------------- def p_query(t): '''query : mostrarBD | crearBD | alterBD | dropBD | useBD | operacion | insertinBD | updateinBD | deleteinBD | createTable | inheritsBD | dropTable | alterTable | variantesAt | contAdd | contDrop | contAlter | selectData PUNTOYCOMA | tipos | createIndex | combinacionSelects PUNTOYCOMA | execFunction ''' h.reporteGramatical1 +="query ::= opcion\n" h.reporteGramatical2 +="t[0]=t[1]\n" t[0]=t[1] # derivando cada produccion a cosas como el create, insert, select; funciones como avg, sum, substring irian como otra produccion #dentro del select (consulta) # empiezan las producciones de las operaciones finales #la englobacion de las operaciones #-----------------------------------------------------CREATE INDEX-------------------------------------------------------------------- def p_createIndex(t): 'createIndex : CREATE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateIndex(t[3],t[5],t[7]) \n" t[0] = CreateIndex("INDEX",t[3],t[5],t[7]) def p_createIndex_1_1(t): 'createIndex : CREATE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateIndexParams(t[3],t[5],t[7],t[8])\n" t[0] = CreateIndexParams("INDEX",t[3],t[5],t[7],t[8]) def p_createIndex_1_2(t): 'createIndex : CREATE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateIndexWhere(t[3],t[5],t[7],t[10])\n" t[0] = CreateIndexWhere("INDEX",t[3],t[5],t[7],t[10]) def p_createIndex_1_1_2(t): 'createIndex : CREATE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateIndexParamsWhere(t[3],t[5],t[7],t[8],t[11]) \n" t[0] = CreateIndexParamsWhere("INDEX",t[3],t[5],t[7],t[8],t[11]) def p_createIndex_2(t): 'createIndex : CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = t[0] = CreateIndex(t[3],t[5],t[9]) \n" t[0] = CreateIndex("INDEX USING HASH",t[3],t[5],t[9]) def p_createIndex_2_1(t): 'createIndex : CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateIndexParams(t[3],t[5],t[9],t[10])\n" t[0] = CreateIndexParams("INDEX USING HASH",t[3],t[5],t[9],t[10]) def p_createIndex_2_2(t): 'createIndex : CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateIndexWhere(t[3],t[5],t[9],t[12])\n" t[0] = CreateIndexWhere("INDEX USING HASH",t[3],t[5],t[9],t[12]) def p_createIndex_2_1_2(t): 'createIndex : CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateIndexParamsWhere(t[3],t[5],t[9],t[10],t[13])\n" t[0] = CreateIndexParamsWhere("INDEX USING HASH",t[3],t[5],t[9],t[10],t[13]) def p_createIndex_3(t): 'createIndex : CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = t[0] = CreateIndex(t[4],t[6],t[8]\n" t[0] = CreateIndex("UNIQUE",t[4],t[6],t[8]) def p_createIndex_3_1(t): 'createIndex : CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateIndexParams(t[4],t[6],t[8],t[9])\n" t[0] = CreateIndexParams("UNIQUE",t[4],t[6],t[8],t[9]) def p_createIndex_3_2(t): 'createIndex : CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateIndexWhere(t[4],t[6],t[8],t[11])\n" t[0] = CreateIndexWhere("UNIQUE",t[4],t[6],t[8],t[11]) def p_createIndex_3_1_2(t): 'createIndex : CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA' h.reporteGramatical1 +="createIndex ::= CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateIndexParamsWhere(t[4],t[6],t[8],t[9],t[12])\n" t[0] = CreateIndexParamsWhere("UNIQUE",t[4],t[6],t[8],t[9],t[12]) def p_indexParams(t): 'indexParams : sort' h.reporteGramatical1 +="indexParams ::= sort\n" h.reporteGramatical2 +="t[0] = t[1]\n" t[0] = t[1] def p_whereOptions_1(t): 'whereOptions : asignaciones' h.reporteGramatical1 +="whereOptions ::= asignaciones\n" h.reporteGramatical2 +="t[0] = t[1]\n" t[0] = t[1] def p_whereOptions_2(t): 'whereOptions : operacion' h.reporteGramatical1 +="whereOptions ::= operacion\n" h.reporteGramatical2 +="t[0] = t[1]\n" t[0] = t[1] def p_whereOptions_3(t): 'whereOptions : search_condition' h.reporteGramatical1 +="whereOptions ::= search_condition\n" h.reporteGramatical2 +="t[0] = t[1]\n" t[0] = t[1] def p_sortOptions_1(t): 'sort : NULLS FIRST' h.reporteGramatical1 +="sort ::= NULLS FIRST\n" h.reporteGramatical2 +="t[0] = t[2]\n" t[0] = t[2] def p_sortOptions_1_1(t): 'sort : DESC NULLS FIRST' h.reporteGramatical1 +="sort ::= DESC NULLS FIRST\n" h.reporteGramatical2 +="t[0] = t[3]\n" t[0] = SortOptions(t[1],t[3]) def p_sortOptions_1_2(t): 'sort : ASC NULLS FIRST' h.reporteGramatical1 +="sort ::= ASC NULLS FIRST\n" h.reporteGramatical2 +="t[0] = t[3]\n" t[0] = SortOptions(t[1],t[3]) def p_sortOptions_2(t): 'sort : NULLS LAST' h.reporteGramatical1 +="sort ::= NULLS LAST\n" h.reporteGramatical2 +="t[0] = t[2]\n" t[0] = t[2] def p_sortOptions_2_1(t): 'sort : DESC NULLS LAST' h.reporteGramatical1 +="sort ::= DESC NULLS LAST\n" h.reporteGramatical2 +="t[0] = t[3]\n" t[0] = SortOptions(t[1],t[3]) def p_sortOptions_2_2(t): 'sort : ASC NULLS LAST' h.reporteGramatical1 +="sort ::= ASC NULLS LAST\n" h.reporteGramatical2 +="t[0] = t[3]\n" t[0] = SortOptions(t[1],t[3]) #-----------------------------------------------------CREATE DB-------------------------------------------------------------------- def p_crearBaseDatos_1(t): 'crearBD : CREATE DATABASE ID PUNTOYCOMA' h.reporteGramatical1 +="crearBD ::= CREATE DATABASE ID PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateDatabases(t[3])\n" t[0] = CreateDatabases(t[3]) def p_crearBaseDatos_2(t): 'crearBD : CREATE DATABASE IF NOT EXISTS ID PUNTOYCOMA' h.reporteGramatical1 +="crearBD ::= CREATE DATABASE IF NOT EXISTS ID PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = Create_IF_Databases(t[3],t[6])\n" t[0] = Create_IF_Databases(t[3],t[6]) def p_crear_replace_BaseDatos_1(t): 'crearBD : CREATE OR REPLACE DATABASE ID PUNTOYCOMA' h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateDatabases(t[5])\n" t[0] = Create_Replace_Databases(t[3],t[5]) def p_crear_replace_BaseDatos_2(t): 'crearBD : CREATE OR REPLACE DATABASE IF NOT EXISTS ID PUNTOYCOMA' h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateDatabases(t[5])\n" t[0] = Create_Replace_IF_Databases(t[3],t[5],t[8]) def p_crear_param_BaseDatos_1(t): 'crearBD : CREATE DATABASE ID parametrosCrearBD PUNTOYCOMA' h.reporteGramatical1 +="crearBD ::= CREATE DATABASE ID parametrosCrearBD PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[3],t[4])\n" t[0] = CreateDatabaseswithParameters(t[3],t[4]) def p_crear_param_BaseDatos_2(t): 'crearBD : CREATE DATABASE IF NOT EXISTS ID parametrosCrearBD PUNTOYCOMA' h.reporteGramatical1 +="crearBD ::= CREATE DATABASE ID parametrosCrearBD PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[3],t[4])\n" t[0] = Create_Databases_IFwithParameters(t[3],t[6],t[7]) def p_crear_replace_param_BaseDatos_1(t): 'crearBD : CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMA' h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[5],t[6])\n" t[0] = Create_Replace_DatabaseswithParameters(t[3],t[5],t[6]) def p_crear_replace_param_BaseDatos_2(t): 'crearBD : CREATE OR REPLACE DATABASE IF NOT EXISTS ID parametrosCrearBD PUNTOYCOMA' h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[5],t[6])\n" t[0] = Create_Replace_Databases_IFwithParameters(t[3],t[5],t[8],t[9]) def p_parametrosCrearBD_1(t): 'parametrosCrearBD : parametrosCrearBD parametroCrearBD' h.reporteGramatical1 +="parametrosCrearBD ::= parametrosCrearBD parametroCrearBD\n" h.reporteGramatical2 +="t[1].append(t[2])\n t[0]=t[1]\n" t[1].append(t[2]) t[0]=t[1] def p_parametrosCrearBD_2(t): 'parametrosCrearBD : parametroCrearBD' h.reporteGramatical1 +="parametrosCrearBD ::= parametroCrearBD\n" h.reporteGramatical2 +="t[0]=[t[1]]\n" t[0]=[t[1]] def p_parametroCrearBD(t): '''parametroCrearBD : OWNER IGUAL final | MODE IGUAL final ''' h.reporteGramatical1 +="parametroCrearBD ::= "+str(t[1])+" IGUAL "+str(t[3])+"\n" if t[1] == "OWNER": h.reporteGramatical2 +="t[0]=ExpresionOwner(t[1],t[3])\n" t[0]=ExpresionOwner(t[1],t[3]) elif t[1] == "MODE": h.reporteGramatical2 +="t[0]=ExpresionMode(t[1],t[3])\n" t[0]=ExpresionMode(t[1],t[3]) #-----------------------------------------------------SHOW DB-------------------------------------------------------------------- def p_mostrarBD(t): 'mostrarBD : SHOW DATABASES PUNTOYCOMA' h.reporteGramatical1 +="mostrarBD ::= SHOW DATABASES PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]=ShowDatabases(1)\n" t[0]=ShowDatabases(1) def p_usarBaseDatos(t): 'useBD : USE ID PUNTOYCOMA' h.reporteGramatical1 +="useBD ::= USE ID PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]=UseDatabases(t[2])\n" t[0]=UseDatabases(t[2]) print("t[0]:",t[0]) #-----------------------------------------------------ALTER BD-------------------------------------------------------------------- def p_alterBD_1(t): 'alterBD : ALTER DATABASE ID RENAME TO ID PUNTOYCOMA' h.reporteGramatical1 +="alterBD ::= ALTER DATABASE "+str(t[3])+" RENAME TO "+str(t[6])+" PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = AlterDB(t[3],t[6])\n" t[0] = AlterDB(t[3],t[6]) def p_alterBD_2(t): 'alterBD : ALTER DATABASE ID OWNER TO parametroAlterUser PUNTOYCOMA' h.reporteGramatical1 +="alterBD ::= ALTER DATABASE "+str(t[3])+" OWNER TO "+str(t[6])+" PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = AlterOwner(t[3],t[4],t[6])\n" t[0] = AlterOwner(t[3],t[4],t[6]) def p_parametroAlterUser(t): '''parametroAlterUser : CURRENT_USER | SESSION_USER | final ''' h.reporteGramatical1 +="parametroAlterUser ::= "+str(t[1])+" \n" h.reporteGramatical2 +="t[0] = t[1]\n" t[0] = t[1] #-----------------------------------------------------DROP TABLE----------------------------------------------------------------- def p_dropTable(t) : 'dropTable : DROP TABLE ID PUNTOYCOMA' h.reporteGramatical1 +="dropTable ::= DROP TABLE ID PUNTOYCOMA\n" t[0]=DropTable(t[3]) #-----------------------------------------------------ALTER TABLE----------------------------------------------------------------- def p_alterTable(t): ''' alterTable : ALTER TABLE ID variantesAt PUNTOYCOMA ''' h.reporteGramatical1 +="alterTable ::= ALTER TABLE ID variantesAt PUNTOYCOMA\n" h.reporteGramatical2 +="t[0] = AlterTable(t[3],t[4])" t[0] = AlterTable(t[3],t[4]) #---------------------------------------------------TIPOS------------------------------------------------------------------------ def p_variantesAt(t): ''' variantesAt : ADD contAdd | ALTER contAlter | DROP contDrop ''' if t[1].upper()=="ADD": h.reporteGramatical1 +="variantesAt ::= ADD contAdd\n" h.reporteGramatical2 +="t[0]=VariantesAt(t[1],t[2])" t[0]=VariantesAt(t[1],t[2]) elif t[1].upper()=="ALTER": h.reporteGramatical1 +="variantesAt ::= ALTER listaContAlter\n" h.reporteGramatical2 +="t[0]=VariantesAt(t[1],t[2])" t[0]=VariantesAt(t[1],t[2]) elif t[1].upper()=="DROP": h.reporteGramatical1 +="variantesAt ::= DROP contDrop\n" h.reporteGramatical2 +="t[0]=VariantesAt(t[1],t[2])" t[0]=VariantesAt(t[1],t[2]) # SE SEPARO LA LISTA PARA PODER MANIPULAR DATOS def p_listaContAlter(t): ''' listaContAlter : listaContAlter COMA contAlter ''' h.reporteGramatical1 +="listaContAlter ::= listaContAlter COMA contAlter\n" def p_listaContAlter_2(t): ''' listaContAlter : contAlter ''' h.reporteGramatical1 +="listaContAlter ::= contAlter\n" def p_contAlter(t): ''' contAlter : COLUMN ID SET NOT NULL | COLUMN ID TYPE tipo ''' if t[3].upper()=="SET": h.reporteGramatical1 +="contAlter ::= COLUMN ID SET NOT NULL\n" h.reporteGramatical2 +="t[0]=contAlter(t[2],t[3],t[4])" t[0]=contAlter(t[2],t[3],t[4]) elif t[3].upper()=="TYPE": h.reporteGramatical1 +="contAlter ::= COLUMN ID TYPE tipo\n" h.reporteGramatical2 +="t[0]=contAlter(t[2],t[3],t[4])" t[0]=contAlter(t[2],t[3],t[4]) def p_contAdd(t): ''' contAdd : COLUMN ID tipo | CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA | FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID | PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA | CONSTRAINT ID FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA ID PARENTESISDERECHA | CONSTRAINT ID PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA | CONSTRAINT ID UNIQUE PARENTESISIZQUIERDA ID PARENTESISDERECHA ''' if t[1].upper()=="COLUMN": h.reporteGramatical1 +="contAdd ::= COLUMN ID tipo\n" h.reporteGramatical2 +="t[0]=contAdd(t[1],t[3],t[2],None,None,None,None)" t[0]=contAdd(t[1],t[3],t[2],None,None,None,None) elif t[1].upper()=="CHECK": h.reporteGramatical1 +="contAdd ::= CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=contAdd(t[1],None,None,None,None,None,t[3])" t[0]=contAdd(t[1],None,None,None,None,None,t[3]) elif t[1].upper()=="FOREIGN": h.reporteGramatical1 +="contAdd ::= FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID\n" h.reporteGramatical2 +="t[0]=contAdd(t[1],None,t[4],t[7],None,None,None)" t[0]=contAdd(t[1],None,t[4],t[7],None,None,None) elif t[1].upper()=="PRIMARY": h.reporteGramatical1 +="contAdd ::= PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=contAdd(t[1],None,t[4],None,None,None,None)" t[0]=contAdd(t[1],None,t[4],None,None,None,None) elif t[1].upper()=="CONSTRAINT": if t[3].upper()=="PRIMARY": h.reporteGramatical1 +="contAdd ::= CONSTRAINT ID PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=contAdd(t[1],t[3],t[2],t[6],None,None,None)" t[0]=contAdd(t[1],t[3],t[2],t[6],None,None,None) elif t[3].upper()=="FOREIGN": h.reporteGramatical1 +="contAdd ::= CONSTRAINT ID FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA ID PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=contAdd(t[1],t[3],t[2],t[6],t[9],t[11],None)" t[0]=contAdd(t[1],t[3],t[2],t[6],t[9],t[11],None) else: h.reporteGramatical1 +="contAdd ::= CONSTRAINT ID UNIQUE PARENTESISIZQUIERDA ID PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=contAdd(t[1],None,t[2],None,None,None,t[5])" t[0]=contAdd(t[1],t[3],t[2],None,None,None,t[5]) def p_contDrop(t): ''' contDrop : COLUMN ID | CONSTRAINT ID | PRIMARY KEY ''' if t[1].upper()=="COLUMN": h.reporteGramatical1 +="contDrop ::= COLUMN ID \n" h.reporteGramatical2 +="t[0]=contDrop(t[1],t[2])" t[0]=contDrop(t[1],t[2]) elif t[1].upper()=="CONSTRAINT": h.reporteGramatical1 +="contDrop ::= CONSTRAINT ID\n" h.reporteGramatical2 +="t[0]=contDrop(t[1],t[2])" t[0]=contDrop(t[1],t[2]) elif t[1].upper()=="PRIMARY": h.reporteGramatical1 +="contDrop ::= PRIMARY KEY\n" h.reporteGramatical2 +="t[0]=contDrop(t[1],None)" t[0]=contDrop(t[1],None) # SE SEPARO LA LISTA PARA PODER MANIPULAR DATOS def p_listaID(t): ''' listaid : listaid COMA final ''' h.reporteGramatical1 +="listaid ::= listaid COMA ID\n" h.reporteGramatical2 +="t[1].append(t[3])\nt[0]=t[1]\n" t[1].append(t[3]) t[0]=t[1] def p_listaID_2(t): ''' listaid : final ''' h.reporteGramatical1 +="listaid ::= ID\n" h.reporteGramatical2 +="t[0]=[t[1]]" t[0]=[t[1]] #-----------------------------------------------------DROP BD-------------------------------------------------------------------- def p_dropBD_1(t): 'dropBD : DROP DATABASE ID PUNTOYCOMA' h.reporteGramatical1 +="dropBD ::= DROP DATABASE "+str(t[3])+" PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]= DropDB(t[3])\n" t[0]= DropDB(t[3]) def p_dropBD_2(t): 'dropBD : DROP DATABASE IF EXISTS ID PUNTOYCOMA' h.reporteGramatical1 +="dropBD ::= DROP DATABASE IF EXISTS "+str(t[5])+" PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]= DropDBIF(t[3],t[5])\n" t[0]= DropDBIF(t[3],t[5]) #-----------------------------------------------------OPERACIONES Y EXPRESIONES-------------------------------------------------------------------- def p_operacion(t): '''operacion : operacion MAS operacion | operacion MENOS operacion | operacion POR operacion | operacion DIV operacion | operacion RESIDUO operacion | operacion POTENCIA operacion | operacion AND operacion | operacion OR operacion | operacion SIMBOLOOR2 operacion | operacion SIMBOLOOR operacion | operacion SIMBOLOAND2 operacion | operacion DESPLAZAMIENTOIZQUIERDA operacion | operacion DESPLAZAMIENTODERECHA operacion | operacion IGUAL operacion | operacion IGUALIGUAL operacion | operacion NOTEQUAL operacion | operacion MAYORIGUAL operacion | operacion MENORIGUAL operacion | operacion MAYOR operacion | operacion MENOR operacion | operacion DIFERENTE operacion | PARENTESISIZQUIERDA operacion PARENTESISDERECHA | PARENTESISIZQUIERDA listaid PARENTESISDERECHA ''' # -------------------------------------------------------------------------------------------------------------- if t[2]=='+': h.reporteGramatical1 +="operacion ::= operacion MAS operacion\n" h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MAS)\n" t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MAS) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='-': h.reporteGramatical1 +="operacion ::= operacion MENOS operacion\n" h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MENOS)\n" t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MENOS) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='*': h.reporteGramatical1 +="operacion ::= operacion POR operacion\n" h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POR)\n" t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POR) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='/': h.reporteGramatical1 +="operacion ::= operacion DIV operacion\n" h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.DIVIDIDO)\n" t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.DIVIDIDO) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='%': h.reporteGramatical1 +="operacion ::= operacion RESIDUO operacion\n" h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MODULO)\n" t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MODULO) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='^': print("entra a la potencia") h.reporteGramatical1 +="operacion ::= operacion POTENCIA operacion\n" h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POTENCIA)\n" t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POTENCIA) # -------------------------------------------------------------------------------------------------------------- elif t[2]=="AND": h.reporteGramatical1 +="operacion ::= operacion AND operacion\n" h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)\n" t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND) # -------------------------------------------------------------------------------------------------------------- elif t[2]=="OR": h.reporteGramatical1 +="operacion ::= operacion OR operacion\n" h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)\n" t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='|': h.reporteGramatical1 +="operacion ::= operacion | operacion\n" h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)\n" t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='||': h.reporteGramatical1 +="operacion ::= operacion || operacion\n" h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)\n" t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='&': h.reporteGramatical1 +="operacion ::= operacion & operacion\n" h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)\n" t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='<<': print(t[2]) h.reporteGramatical1 +="operacion ::= operacion DESPLAZAMIENTOIZQUIERDA operacion\n" h.reporteGramatical2 +="t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_IZQUIERDA)\n" t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_IZQUIERDA) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='>>': h.reporteGramatical1 +="operacion ::= operacion DESPLAZAMIENTODERECHA operacion\n" h.reporteGramatical2 +="t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_DERECHA)\n" t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_DERECHA) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='=': t[0]=ExpresionIgualdad(t[1],t[3]) #t[0]=operacionDelete(t[1],t[3],t[2]) h.reporteGramatical1 +="operacion ::= operacion IGUAL operacion\n" # -------------------------------------------------------------------------------------------------------------- elif t[2]=='==': h.reporteGramatical1 +="operacion ::= operacion IGUALIGUAL operacion\n" h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.IGUAL_IGUAL)\n" t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.IGUAL_IGUAL) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='!=': h.reporteGramatical1 +="operacion ::= operacion NOTEQUAL operacion\n" h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.NO_IGUAL)\n" t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.NO_IGUAL) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='>=': h.reporteGramatical1 +="operacion ::= operacion MAYORIGUAL operacion\n" h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR_IGUAL)\n" t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR_IGUAL) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='<=': h.reporteGramatical1 +="operacion ::= operacion MENORIGUAL operacion\n" h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR_IGUAL)\n" t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR_IGUAL) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='>': h.reporteGramatical1 +="operacion ::= operacion MAYOR operacion\n" h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR)\n" t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='<': h.reporteGramatical1 +="operacion ::= operacion MENOR operacion\n" h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR)\n" t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR) # -------------------------------------------------------------------------------------------------------------- elif t[2]=='<>': h.reporteGramatical1 +="operacion ::= operacion DIFERENTE operacion\n" h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.DIFERENTE)\n" t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.DIFERENTE) # -------------------------------------------------------------------------------------------------------------- else: h.reporteGramatical1 +="operacion ::= PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" t[0]=t[2] # -------------------------------------------------------------------------------------------------------------- def p_operacion_menos_unario(t): '''operacion : MENOS ENTERO %prec UMINUS | MENOS DECIMAL %prec UMINUS ''' h.reporteGramatical1 +="operacion ::= MENOS operacion %prec UMINUS\n" h.reporteGramatical2 +="t[0]=ExpresionNegativo(t[2])\n" t[0]=ExpresionNegativo(t[2]) # -------------------------------------------------------------------------------------------------------------- def p_operacion_not_unario(t): 'operacion : NOT operacion %prec UNOT' h.reporteGramatical1 +="operacion ::= NOT operacion %prec UNOT\n" h.reporteGramatical2 +="t[0]=ExpresionNOT(t[2])\n" t[0]=ExpresionNOT(t[2]) # -------------------------------------------------------------------------------------------------------------- def p_operacion_funcion(t): 'operacion : funcionBasica' h.reporteGramatical1 +="operacion ::= funcionBasica\n" h.reporteGramatical2 +="t[0]=t[1]\n" t[0]=t[1] # -------------------------------------------------------------------------------------------------------------- def p_operacion_final(t): 'operacion : final' t[0] = t[1] h.reporteGramatical1 +="operacion ::= final\n" h.reporteGramatical2 +="t[0]=t[1]\n" t[0]=t[1] #-----------------------------------------------------FUNCIONES MATEMATICAS-------------------------------------------------------------------- def p_funcion_basica(t): '''funcionBasica : ABS PARENTESISIZQUIERDA operacion PARENTESISDERECHA | CBRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA | CEIL PARENTESISIZQUIERDA operacion PARENTESISDERECHA | CEILING PARENTESISIZQUIERDA operacion PARENTESISDERECHA | DEGREES PARENTESISIZQUIERDA operacion PARENTESISDERECHA | DIV PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA | EXP PARENTESISIZQUIERDA operacion PARENTESISDERECHA | FACTORIAL PARENTESISIZQUIERDA operacion PARENTESISDERECHA | FLOOR PARENTESISIZQUIERDA operacion PARENTESISDERECHA | GCD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA | LCM PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA | LN PARENTESISIZQUIERDA operacion PARENTESISDERECHA | LOG PARENTESISIZQUIERDA operacion PARENTESISDERECHA | MOD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA | PI PARENTESISIZQUIERDA PARENTESISDERECHA | POWER PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA | RADIANS PARENTESISIZQUIERDA operacion PARENTESISDERECHA | ROUND PARENTESISIZQUIERDA operacion PARENTESISDERECHA | SIGN PARENTESISIZQUIERDA operacion PARENTESISDERECHA | SQRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA | TRIM_SCALE PARENTESISIZQUIERDA operacion PARENTESISDERECHA | TRUNC PARENTESISIZQUIERDA operacion PARENTESISDERECHA | WIDTH_BUCKET PARENTESISIZQUIERDA operacion COMA operacion COMA operacion COMA operacion PARENTESISDERECHA | RANDOM PARENTESISIZQUIERDA PARENTESISDERECHA | ACOS PARENTESISIZQUIERDA operacion PARENTESISDERECHA | ACOSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA | ASIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA | ASIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA | ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA | ATAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA | ATAN2 PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA | ATAN2D PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA | COS PARENTESISIZQUIERDA operacion PARENTESISDERECHA | COSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA | COT PARENTESISIZQUIERDA operacion PARENTESISDERECHA | COTD PARENTESISIZQUIERDA operacion PARENTESISDERECHA | SIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA | SIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA | TAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA | TAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA | SINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA | GREATEST PARENTESISIZQUIERDA select_list PARENTESISDERECHA | LEAST PARENTESISIZQUIERDA select_list PARENTESISDERECHA | NOW PARENTESISIZQUIERDA PARENTESISDERECHA | COSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA | TANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA | ASINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA | ACOSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA | ATANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA | LENGTH PARENTESISIZQUIERDA operacion PARENTESISDERECHA | TRIM PARENTESISIZQUIERDA opcionTrim operacion FROM operacion PARENTESISDERECHA | GET_BYTE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA | MD5 PARENTESISIZQUIERDA operacion PARENTESISDERECHA | SET_BYTE PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA | SHA256 PARENTESISIZQUIERDA operacion PARENTESISDERECHA | SUBSTR PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA | CONVERT PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA | ENCODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA | DECODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA | AVG PARENTESISIZQUIERDA operacion PARENTESISDERECHA | SUM PARENTESISIZQUIERDA operacion PARENTESISDERECHA | EXTRACT PARENTESISIZQUIERDA opcionTiempo FROM TIMESTAMP operacion PARENTESISDERECHA | ID PARENTESISIZQUIERDA operacion COMA INTERVAL operacion PARENTESISDERECHA | CURRENT_TIME | CURRENT_DATE ''' if t[1].upper()=="ABS": h.reporteGramatical1 +="funcionBasica ::= ABS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionABS(t[3])\n" t[0]=ExpresionABS(t[3]) elif t[1].upper()=="CBRT": h.reporteGramatical1 +="funcionBasica ::= CBRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionCBRT(t[3])\n" t[0]=ExpresionCBRT(t[3]) elif t[1].upper()=="CEIL": h.reporteGramatical1 +="funcionBasica ::= CEIL PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionCEIL(t[3])\n" t[0]=ExpresionCEIL(t[3]) elif t[1].upper()=="CEILING": h.reporteGramatical1 +="funcionBasica ::= CEILING PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionCEILING(t[3])\n" t[0]=ExpresionCEILING(t[3]) elif t[1].upper()=="DEGREES": t[0]=ExpresionDEGREES(t[3]) h.reporteGramatical1 +="funcionBasica ::= DEGREES PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionDEGREES(t[3])\n" elif t[1].upper()=="DIV": print("entra a DIV++++++++++++") t[0]=ExpresionDIV(t[3],t[5]) h.reporteGramatical1 +="funcionBasica ::= DIV PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionDIV(t[3],t[5])\n" elif t[1].upper()=="EXP": t[0]=ExpresionEXP(t[3]) h.reporteGramatical1 +="funcionBasica ::= EXP PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionEXP(t[3])\n" elif t[1].upper()=="FACTORIAL": t[0]=ExpresionFACTORIAL(t[3]) h.reporteGramatical1 +="funcionBasica ::= FACTORIAL PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionFACTORIAL(t[3])\n" elif t[1].upper()=="FLOOR": t[0]=ExpresionFLOOR(t[3]) h.reporteGramatical1 +="funcionBasica ::= FLOOR PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionFLOOR(t[3])\n" elif t[1].upper()=="GCD": t[0]=ExpresionGCD(t[3],t[5]) h.reporteGramatical1 +="funcionBasica ::= GCD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionGCD(t[3],t[5])\n" elif t[1].upper()=="LN": t[0]=ExpresionLN(t[3]) h.reporteGramatical1 +="funcionBasica ::= LN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionLN(t[3])\n" elif t[1].upper()=="LOG": t[0]=ExpresionLOG(t[3]) h.reporteGramatical1 +="funcionBasica ::= LOG PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionLOG(t[3])\n" elif t[1].upper()=="MOD": t[0]=ExpresionMOD(t[3],t[5]) h.reporteGramatical1 +="funcionBasica ::= MOD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionMOD(t[3],t[5])\n" elif t[1].upper()=="PI": t[0]=ExpresionPI(1) h.reporteGramatical1 +="funcionBasica ::= PI PARENTESISIZQUIERDA PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionPI(1)\n" elif t[1].upper()=="POWER": t[0]=ExpresionPOWER(t[3],t[5]) h.reporteGramatical1 +="funcionBasica ::= POWER PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionPOWER(t[3],t[5])\n" elif t[1].upper()=="RADIANS": t[0]=ExpresionRADIANS(t[3]) h.reporteGramatical1 +="funcionBasica ::= RADIANS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionRADIANS(t[3])\n" elif t[1].upper()=="ROUND": t[0]=ExpresionROUND(t[3]) h.reporteGramatical1 +="funcionBasica ::= ROUND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionROUND(t[3])\n" elif t[1].upper()=="SIGN": t[0]=ExpresionSIGN(t[3]) h.reporteGramatical1 +="funcionBasica ::= SIGN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionSIGN(t[3])\n" elif t[1].upper()=="SQRT": t[0]=ExpresionSQRT(t[3]) h.reporteGramatical1 +="funcionBasica ::= SQRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionSQRT(t[3])\n" elif t[1].upper()=="TRUNC": t[0]=ExpresionTRUNC(t[3]) h.reporteGramatical1 +="funcionBasica ::= TRUNC PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="[0]=ExpresionTRUNC(t[3])\n" elif t[1].upper()=="WIDTH_BUCKET": t[0]=ExpresionWIDTHBUCKET(t[3],t[5],t[7],t[9]) h.reporteGramatical1 +="funcionBasica ::= WIDTH_BUCKET PARENTESISIZQUIERDA operacion COMA operacion COMA operacion COMA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionWIDTHBUCKET(t[3],t[5],t[7],t[9])\n" elif t[1].upper()=="RANDOM": t[0]=ExpresionRANDOM(1) h.reporteGramatical1 +="funcionBasica ::= RANDOM PARENTESISIZQUIERDA PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionRANDOM(1)\n" elif t[1].upper()=="ACOS": t[0]=ExpresionACOS(t[3]) h.reporteGramatical1 +="funcionBasica ::= ACOS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionACOS(t[3])\n" elif t[1].upper()=="ACOSD": t[0]=ExpresionACOSD(t[3]) h.reporteGramatical1 +="funcionBasica ::= ACOSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionACOSD(t[3])\n" elif t[1].upper()=="ASIN": t[0]=ExpresionASIN(t[3]) h.reporteGramatical1 +="funcionBasica ::= ASIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="tt[0]=ExpresionASIN(t[3])\n" elif t[1].upper()=="ASIND": t[0]=ExpresionASIND(t[3]) h.reporteGramatical1 +="funcionBasica ::= ASIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionASIND(t[3])\n" elif t[1].upper()=="ATAN": t[0]=ExpresionATAN(t[3]) h.reporteGramatical1 +="funcionBasica ::= ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionATAN(t[3])\n" elif t[1].upper()=="ATAND": t[0]=ExpresionATAND(t[3]) h.reporteGramatical1 +="funcionBasica ::= ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionATAND(t[3])\n" elif t[1].upper()=="ATAN2": t[0]=ExpresionATAN2(t[3],t[5]) h.reporteGramatical1 +="funcionBasica ::= ATAN2 PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionATAN2(t[3],t[5])\n" elif t[1].upper()=="ATAN2D": t[0]=ExpresionATAN2D(t[3],t[5]) h.reporteGramatical1 +="funcionBasica ::= ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionATAN2D(t[3],t[5])\n" elif t[1].upper()=="COS": t[0]=ExpresionCOS(t[3]) h.reporteGramatical1 +="funcionBasica ::= COS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionCOS(t[3])\n" elif t[1].upper()=="COSD": t[0]=ExpresionCOSD(t[3]) h.reporteGramatical1 +="funcionBasica ::= COSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionCOSD(t[3])\n" elif t[1].upper()=="COT": t[0]=ExpresionCOT(t[3]) h.reporteGramatical1 +="funcionBasica ::= COT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionCOT(t[3])\n" elif t[1].upper()=="COTD": t[0]=ExpresionCOTD(t[3]) h.reporteGramatical1 +="funcionBasica ::= COTD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionCOTD(t[3])\n" elif t[1].upper()=="SIN": t[0]=ExpresionSIN(t[3]) h.reporteGramatical1 +="funcionBasica ::= SIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionSIN(t[3])\n" elif t[1].upper()=="SIND": t[0]=ExpresionSIND(t[3]) h.reporteGramatical1 +="funcionBasica ::= SIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionSIND(t[3])\n" elif t[1].upper()=="TAN": t[0]=ExpresionTAN(t[3]) h.reporteGramatical1 +="funcionBasica ::= TAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionTAN(t[3])\n" elif t[1].upper()=="TAND": t[0]=ExpresionTAND(t[3]) h.reporteGramatical1 +="funcionBasica ::= TAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionTAND(t[3])\n" elif t[1].upper()=="SINH": t[0]=ExpresionSINH(t[3]) h.reporteGramatical1 +="funcionBasica ::= SINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionSINH(t[3])\n" elif t[1].upper()=="COSH": t[0]=ExpresionCOSH(t[3]) h.reporteGramatical1 +="funcionBasica ::= COSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionCOSH(t[3])\n" elif t[1].upper()=="TANH": t[0]=ExpresionTANH(t[3]) h.reporteGramatical1 +="funcionBasica ::= TANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionTANH(t[3])\n" elif t[1].upper()=="ASINH": t[0]=ExpresionASINH(t[3]) h.reporteGramatical1 +="funcionBasica ::= ASINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionASINH(t[3])\n" elif t[1].upper()=="ACOSH": t[0]=ExpresionACOSH(t[3]) h.reporteGramatical1 +="funcionBasica ::= ACOSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionACOSH(t[3])\n" elif t[1].upper()=="ATANH": t[0]=ExpresionATANH(t[3]) h.reporteGramatical1 +="funcionBasica ::= ATANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionATANH(t[3])\n" elif t[1].upper()=="GREATEST": t[0]=ExpresionGREATEST(t[3]) h.reporteGramatical1 +="funcionBasica ::= GREATEST PARENTESISIZQUIERDA select_list PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionGREATEST(t[3])\n" elif t[1].upper()=="LEAST": t[0]=ExpresionLEAST(t[3]) h.reporteGramatical1 +="funcionBasica ::= LEAST PARENTESISIZQUIERDA select_list PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionLEAST(t[3])\n" elif t[1].upper()=="NOW": t[0]=ExpresionNOW(1) h.reporteGramatical1 +="funcionBasica ::= NOW PARENTESISIZQUIERDA PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionNOW(1)\n" elif t[1].upper()=="LENGTH": h.reporteGramatical1 +="funcionBasica ::= LENGTH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionLENGTH(t[3])\n" t[0]=ExpresionLENGTH(t[3]) elif t[1].upper()=="TRIM": h.reporteGramatical1 +="funcionBasica ::= TRIM PARENTESISIZQUIERDA opcionTrim operacion FROM operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionTRIM(t[3],t[4],t[6])\n" t[0]=ExpresionTRIM(t[3],t[4],t[6]) elif t[1].upper()=="GET_BYTE": h.reporteGramatical1 +="funcionBasica ::= GET_BYTE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n" elif t[1].upper()=="MD5": h.reporteGramatical1 +="funcionBasica ::= MD5 PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionMD5(t[3])\n" t[0]=ExpresionMD5(t[3]) elif t[1].upper()=="SET_BYTE": h.reporteGramatical1 +="funcionBasica ::= SET_BYTE PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n" elif t[1].upper()=="SHA256": h.reporteGramatical1 +="funcionBasica ::= SHA256 PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionSHA256(t[3])\n" t[0]=ExpresionSHA256(t[3]) elif t[1].upper()=="SUBSTR": h.reporteGramatical1 +="funcionBasica ::= SUBSTR PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionSUBSTR(t[3],t[5],t[7])\n" t[0]=ExpresionSUBSTR(t[3],t[5],t[7]) elif t[1].upper()=="CONVERT": h.reporteGramatical1 +="funcionBasica ::= CONVERT PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n" elif t[1].upper()=="ENCODE": h.reporteGramatical1 +="funcionBasica ::= ENCODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n" elif t[1].upper()=="DECODE": h.reporteGramatical1 +="funcionBasica ::= DECODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n" elif t[1].upper()=="AVG": h.reporteGramatical1 +="funcionBasica ::= AVG PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" elif t[1].upper()=="SUM": h.reporteGramatical1 +="funcionBasica ::= SUM PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" elif t[1].upper()=="EXTRACT": h.reporteGramatical1 +="funcionBasica ::= EXTRACT PARENTESISIZQUIERDA opcionTiempo FROM TIMESTAMP operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionEXTRACT(t[3],t[6])\n" t[0]=ExpresionEXTRACT(t[3],t[6]) elif t[1].upper()=="DATE_PART": h.reporteGramatical1 +="funcionBasica ::= DATE_PART PARENTESISIZQUIERDA operacion COMA INTERVAL operacion PARENTESISDERECHA\n" elif t[1].upper()=="CURRENT_DATE": h.reporteGramatical1 +="funcionBasica ::= CURRENT_DATE \n" h.reporteGramatical2 +="t[0]=ExpresionCurrentDate(1)\n" t[0]=ExpresionCurrentDate(1) elif t[1].upper()=="CURRENT_TIME": h.reporteGramatical1 +="funcionBasica ::= CURRENT_TIME\n" h.reporteGramatical2 +="t[0]=ExpresionCurrentTime(1)\n" t[0]=ExpresionCurrentTime(1) else: print("no entra a ninguna en funcionBasica") def p_funcion_basica_1(t): 'funcionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion FOR operacion PARENTESISDERECHA' h.reporteGramatical1 +="funcionBasica ::= SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion FOR operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionSUBSTRINGA(t[3],t[5],t[7])\n" t[0]=ExpresionSUBSTRINGA(t[3],t[5],t[7]) def p_funcion_basica_2(t): 'funcionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion PARENTESISDERECHA' h.reporteGramatical1 +="funcionBasica ::= SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionSUBSTRINGB(t[3],t[5])\n" t[0]=ExpresionSUBSTRINGB(t[3],t[5]) def p_funcion_basica_3(t): 'funcionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FOR operacion PARENTESISDERECHA' h.reporteGramatical1 +="funcionBasica ::= SUBSTRING PARENTESISIZQUIERDA operacion FOR operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=ExpresionSUBSTRINGC(t[3],t[5])\n" t[0]=ExpresionSUBSTRINGC(t[3],t[5]) def p_opcionTrim(t): ''' opcionTrim : LEADING | TRAILING | BOTH ''' h.reporteGramatical1 +="opcionTrim ::= "+str(t[1])+"\n" # falta mandar a las funciones de fechas y dates y todo eso if t[1].upper()=="LEADING": h.reporteGramatical1 +="funcioopcionTrimnBasica ::= LEADING\n" h.reporteGramatical2 +="t[0]=ExpresionCadenas(1)\n" t[0]=ExpresionCadenas("1") elif t[1].upper()=="TRAILING": h.reporteGramatical1 +="opcionTrim ::= TRAILING\n" h.reporteGramatical2 +="t[0]=ExpresionCadenas(2)\n" t[0]=ExpresionCadenas("2") elif t[1].upper()=="BOTH": h.reporteGramatical1 +="opcionTrim ::= BOTH\n" h.reporteGramatical2 +="t[0]=ExpresionCadenas(3)\n" t[0]=ExpresionCadenas("3") def p_opcionTiempo(t): '''opcionTiempo : YEAR | MONTH | DAY | HOUR | MINUTE | SECOND ''' if t[1].upper()=="YEAR": h.reporteGramatical1 +="opcionTiempo ::= YEAR\n" h.reporteGramatical2 +="t[0]=ExpresionCadenas(1)\n" t[0]=ExpresionCadenas("1") elif t[1].upper()=="MONTH": h.reporteGramatical1 +="opcionTiempo ::= MONTH\n" h.reporteGramatical2 +="t[0]=ExpresionCadenas(2)\n" t[0]=ExpresionCadenas("2") elif t[1].upper()=="DAY": h.reporteGramatical1 +="opcionTiempo ::= DAY\n" h.reporteGramatical2 +="t[0]=ExpresionCadenas(3)\n" t[0]=ExpresionCadenas("3") elif t[1].upper()=="HOUR": h.reporteGramatical1 +="opcionTiempo ::= HOUR\n" h.reporteGramatical2 +="t[0]=ExpresionCadenas(4)\n" t[0]=ExpresionCadenas("4") elif t[1].upper()=="MINUTE": h.reporteGramatical1 +="opcionTiempo ::= MINUTE\n" h.reporteGramatical2 +="t[0]=ExpresionCadenas(5)\n" t[0]=ExpresionCadenas("5") elif t[1].upper()=="SECOND": h.reporteGramatical1 +="opcionTiempo ::= SECOND\n" h.reporteGramatical2 +="t[0]=ExpresionCadenas(6)\n" t[0]=ExpresionCadenas("6") #-----------------------------------------------------PRODUCCIONES TERMINALES-------------------------------------------------------------------- def p_final(t): '''final : DECIMAL | ENTERO''' h.reporteGramatical1 +="final ::= Numero("+str(t[1])+")\n" h.reporteGramatical2 +="t[0]=ExpresionNumero(t[1])\n" t[0]=ExpresionNumero(t[1]) def p_final_id(t): 'final : ID' t[0] = t[1] h.reporteGramatical1 +="final ::= ID("+str(t[1])+")\n" h.reporteGramatical2 +="t[0]=ExpresionIdentificador(t[1])\n" t[0]=ExpresionIdentificador(t[1]) def p_final_invocacion(t): 'final : ID PUNTO ID' h.reporteGramatical1 +="final ::= ID("+str(t[1])+") . ID("+str(t[3])+")\n" h.reporteGramatical2 +="t[0] = ExpresionInvocacion(t[1],t[3])\n" t[0] = ExpresionLlamame(t[1],t[3]) def p_final_invocacion_2(t): 'final : ID PUNTO POR' h.reporteGramatical1 +="final ::= ID("+str(t[1])+") . ID("+str(t[3])+")\n" h.reporteGramatical2 +="t[0] = ExpresionInvocacion(t[1],t[3])\n" t[0] = ExpresionLlamame(t[1],t[3]) def p_final_cadena(t): 'final : CADENA' t[0] = t[1] h.reporteGramatical1 +="final ::= CADENA ("+t[1]+")\n" h.reporteGramatical2 +="t[0]=ExpresionCadenas(t[1])\n" t[0]=ExpresionCadenas(t[1]) #-----------------------------------------------------INSERT BD-------------------------------------------------------------------- def p_insertBD_1(t): 'insertinBD : INSERT INTO ID VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA' #print(t[3],t[6]) t[0] = InsertinDataBases(t[3],None,t[6]) h.reporteGramatical1 +="insertinBD ::= INSERT INTO ID VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 += "InsertinDabaBases(t[3],t[6])\n" def p_insertBD_2(t): 'insertinBD : INSERT INTO ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA' print(t[9]) t[0] = InsertinDataBases(t[3],t[5],t[9]) h.reporteGramatical1 +="insertinBD ::= INSERT INTO ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 += "t[0] = InsertinDataBases(t[3],t[5],t[9])\n" # SE SEPARO LA LISTA EN 2 METODOS PARA MANEJAR DATOS def p_listaParam(t): '''listaParam : listaParam COMA listaP ''' t[1].append(t[3]) t[0] = t[1] h.reporteGramatical1 +="insertinBD ::= listaParam COMA operacion\n" h.reporteGramatical2 +="t[0]=t[1]\n" def p_listaParam_2(t): '''listaParam : listaP ''' t[0] = [t[1]] h.reporteGramatical1 +="listaParam ::= operacion\n" h.reporteGramatical2 +="t[0]=[t[1]]\n" def p_listaP_1(t): 'listaP : operacion' print("---------------",t[1]) t[0] = t[1] def p_listaP_2(t): 'listaP : ID operacion' t[0] = t[1] print(t[0]) def p_listaP_3(t): 'listaP : ID PARENTESISIZQUIERDA PARENTESISDERECHA' t[0] = t[1]+"()" print(t[0]) #-----------------------------------------------------UPDATE BD-------------------------------------------------------------------- def p_updateBD(t): 'updateinBD : UPDATE ID SET asignaciones WHERE operacion PUNTOYCOMA' t[0]= UpdateinDataBase(t[2],t[4],t[6]) h.reporteGramatical1 +="updateinBD ::= UPDATE ID SET asignacion WHERE operacion PUNTOYCOMA\n" h.reporteGramatical1 +="t[0]=UpdateinDabaBase(t[2].t[4],t[6])\n" # SE SEPARO LA LISTA EN 2 METODOS PARA MANEJAR DATOS def p_asignaciones(t): '''asignaciones : asignaciones COMA operacion ''' t[1].append(t[3]) t[0] = t[1] h.reporteGramatical1 +="asignaciones ::= asignaciones COMA operacion\n" h.reporteGramatical2 +="t[0]=t[1]\n" def p_asignaciones_2(t): '''asignaciones : operacion ''' t[0] = [t[1]] h.reporteGramatical1 +="asignaciones ::= asigna\n" h.reporteGramatical2 +="t[0]=[t[1]]\n" #-----------------------------------------------------DELETE IN BD-------------------------------------------------------------------- def p_deleteinBD_1(t): 'deleteinBD : DELETE FROM ID PUNTOYCOMA' t[0] = t[3] h.reporteGramatical1 +="deleteinBD ::= DELETE FROM ID PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]=t[3]\n" def p_deleteinBD_2(t): 'deleteinBD : DELETE FROM ID WHERE operacion PUNTOYCOMA' t[0] = DeleteinDataBases(t[3],t[5]) h.reporteGramatical1 +="deleteinBD ::= DELETE FROM ID WHERE operacion PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]=DeleteinDataBases(t[3],t[5])\n" #-----------------------------------------------------CREATE TABLE CON INHERITS------------------------------------------------------- def p_inheritsBD(t): 'inheritsBD : CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA INHERITS PARENTESISIZQUIERDA ID PARENTESISDERECHA PUNTOYCOMA' t[0]=InheritsBD(t[3],t[9],t[5]) h.reporteGramatical1 +="inheritsBD ::= CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA INHERITS PARENTESISIZQUIERDA ID PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]=InheritsBD(t[3],t[9],t[5])\n" #-----------------------------------------------------CREATE TABLE-------------------------------------------------------------------- def p_createTable(t): 'createTable : CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA PUNTOYCOMA' t[0]= CreateTable(t[3],t[5]) h.reporteGramatical1 +="createTable ::= CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 += "t[0]= CreateTable(t[3],t[5])\n" # -------------------------------------------------------------------------------------------------------------- # SE SEPARO LA LISTA EN 2 METODOS PARA MANEJAR DATOS def p_creaColumna(t): '''creaColumnas : creaColumnas COMA Columna ''' t[1].append(t[3]) t[0] = t[1] #print(t[0]) h.reporteGramatical1 +="creaColumnas ::= creaColumnas COMA Columna\n" h.reporteGramatical2 +="t[1]\n" def p_creaColumna_2(t): '''creaColumnas : Columna ''' t[0]=[t[1]] h.reporteGramatical1 +="createTable ::= Columna\n" h.reporteGramatical2 +="[t[1]]\n" # -------------------------------------------------------------------------------------------------------------- #INICIA LAS PRODUCCIONES DE COLUMNAS def p_columna_1(t): 'Columna : ID tipo' t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],None),OPERACION_RESTRICCION_COLUMNA.COLUMNASINRESTRICCION) h.reporteGramatical1 +="Columna ::= ID tipo\n" h.reporteGramatical2 +="t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],None),OPERACION_RESTRICCION_COLUMNA.COLUMNASINRESTRICCION)" def p_columna_2(t): 'Columna : ID tipo paramOpcional' t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],t[3]),OPERACION_RESTRICCION_COLUMNA.COLUMNACONRESTRICCION) h.reporteGramatical1 +="Columna ::= ID tipo paramOpcional" h.reporteGramatical2 +="t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],t[3]),OPERACION_RESTRICCION_COLUMNA.COLUMNACONRESTRICCION)\n" def p_columna_3(t): 'Columna : UNIQUE PARENTESISIZQUIERDA listaParam PARENTESISDERECHA' t[0]=TipoAtributoTable(RestriccionUnique(t[3]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_ATRIBUTO) h.reporteGramatical1 +="Columna : UNIQUE PARENTESISIZQUIERDA listaParam PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=TipoAtributoTable(RestriccionUnique(t[3]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_ATRIBUTO)\n" def p_columna_4(t): '''Columna : constraintcheck ''' t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT) h.reporteGramatical1 +="Columna ::= constraintcheck\n" h.reporteGramatical2 +="t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT)\n" def p_columna_5(t): 'Columna : checkinColumn' t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE) h.reporteGramatical1 +="Columna ::= checkinColumn\n" h.reporteGramatical2 +="t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE)\n" def p_columna_6(t): 'Columna : primaryKey' t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY) h.reporteGramatical1 +="Columna ::= primaryKey\n" h.reporteGramatical2 +="t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY)\n" def p_columna_7(t): 'Columna : foreignKey' t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.FOREIGN_KEY) h.reporteGramatical1 +="Columna ::= foreingKey\n" h.reporteGramatical2 += "t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.FOREIGN_KEY)\n" # -------------------------------------------------------------------------------------------------------------- #INICIA LA LISTA DE RESTRICCIONES OPCIONALES EN LAS COLUMNAS def p_paramOpcional(t): '''paramOpcional : paramOpcional paramopc ''' t[1].append(t[2]) t[0] = t[1] h.reporteGramatical1 +="paramOpcional ::= paramOpcional paramopc\n" h.reporteGramatical2 +="t[0]=t[1]\n" def p_paramOpcional_1(t): '''paramOpcional : paramopc ''' t[0] = [t[1]] h.reporteGramatical1 +="paramOpcional ::= paramopc\n" h.reporteGramatical2 +="t[0]=[t[1]]\n" # -------------------------------------------------------------------------------------------------------------- #INICIA LAS RESTRICCIONES EN LAS COLUMNAS def p_paramopc_1(t): '''paramopc : DEFAULT final | NULL | NOT NULL | UNIQUE | PRIMARY KEY ''' if t[1].upper() == "DEFAULT": t[0] = TipoRestriccion(RestriccionDefaul(t[2]),OPERACION_RESTRICCION_COLUMNA.DEFAULT) h.reporteGramatical1 +="paramopc ::= DEFAULT final\n" h.reporteGramatical2 +="TipoRestriccion(RestriccionDefaul(t[2]),OPERACION_RESTRICCION_COLUMNA.DEFAULT)\n" elif t[1].upper() == "NULL": t[0] = TipoRestriccion(RestriccionNull(1),OPERACION_RESTRICCION_COLUMNA.NULL) h.reporteGramatical1 +="paramopc ::= NULL\n" h.reporteGramatical2 +="TipoRestriccion(RestriccionNull(1),OPERACION_RESTRICCION_COLUMNA.NULL)\n" elif t[1].upper() == "NOT": t[0] = TipoRestriccion(RestriccionNotNull(1),OPERACION_RESTRICCION_COLUMNA.NOT_NULL) h.reporteGramatical1 +="paramopc ::= NOT NULL\n" h.reporteGramatical2 +="t[0] = TipoRestriccion(RestriccionNotNull(1),OPERACION_RESTRICCION_COLUMNA.NOT_NULL)\n" elif t[1].upper() == "UNIQUE": t[0] = TipoRestriccion(RestriccionUniqueSimple(1),OPERACION_RESTRICCION_COLUMNA.UNIQUE_COLUMNA) h.reporteGramatical1 +="paramopc ::= UNIQUE\n" h.reporteGramatical2 +="TipoRestriccion(RestriccionUniqueSimple(1),OPERACION_RESTRICCION_COLUMNA.UNIQUE_COLUMNA)\n" elif t[1].upper() == "PRIMARY" and t[2].upper()=="KEY": t[0] = TipoRestriccion(RestriccionPrimaryKeyColumn(1),OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY) h.reporteGramatical1 +="paramopc ::= PRIMARY KEY\n" h.reporteGramatical2 +="TipoRestriccion(RestriccionPrimaryKeyColumn(1),OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY)\n" else: print("FFFFF") # -------------------------------------------------------------------------------------------------------------- #LLAMADA A LAS RESTRICCION CHECK def p_paramopc_2(t): 'paramopc : constraintcheck' t[0] = TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT) h.reporteGramatical1 +="paramopc ::= constraintcheck\n" h.reporteGramatical2 +="t[0] = TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT)\n" def p_paramopc_3(t): 'paramopc : checkinColumn' t[0]=TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE) h.reporteGramatical1 +="paramopc ::= checkinColumn\n" h.reporteGramatical2 +="t[0]=TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE)\n" # -------------------------------------------------------------------------------------------------------------- #RESTRICCION UNIQUE def p_paramopc_4(t): 'paramopc : CONSTRAINT ID UNIQUE' t[0] = TipoRestriccion(RestriccionConstraintUnique(t[2]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_CONSTAINT) h.reporteGramatical1 +="paramopc ::= CONSTRAINT ID UNIQUE\n" h.reporteGramatical2 +="t[0] = TipoRestriccion(RestriccionConstraintUnique(t[2]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_CONSTAINT)\n" # -------------------------------------------------------------------------------------------------------------- #RESTRICION CHECK def p_checkcolumna(t): 'checkinColumn : CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA' t[0]=RestriccionCheck(t[3]) h.reporteGramatical1 +="checkinColumn ::= CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=RestriccionCheck(t[3])\n" def p_constraintcheck(t): 'constraintcheck : CONSTRAINT ID CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA' t[0]=RestriccionConstraintCheck(t[2],t[5]) h.reporteGramatical1 +="constraintcheck : CONSTRAINT ID CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=RestriccionConstraintCheck(t[2],t[5])\n" def p_primaryKey(t): 'primaryKey : PRIMARY KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA' t[0]=RestriccionPrimaryKey(t[4]) h.reporteGramatical1 +="primaryKey ::= PRIMARY KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=RestriccionPrimaryKey(t[4])\n" def p_foreingkey(t): 'foreignKey : FOREIGN KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA' t[0]=RestriccionForeingkey(t[7],t[4],t[9]) h.reporteGramatical1 +="foreignKey ::= FOREIGN KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=RestriccionForeingkey(t[7],t[4],t[9])\n" #-----------------------------------------------------TIPOS DE DATOS-------------------------------------------------------------------- def p_tipo(t): '''tipo : SMALLINT | INTEGER | BIGINT | NUMERIC | REAL | DOUBLE PRECISION | MONEY | VARCHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA | CHARACTER VARYING PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA | CHARACTER PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA | CHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA | TEXT | BOOLEAN | TIMESTAMP | TIME | INTERVAL | DATE | YEAR | MONTH | DAY | HOUR | MINUTE | SECOND ''' # -------------------------------------------------------------------------------------------------------------- if t[1].upper()=="SMALLINT": t[0] = TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="INTEGER": t[0] = TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="BIGINT": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="NUMERIC": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="REAL": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="DOUBLE": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="MONEY": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="CHARACTER" and t[2].upper()=="VARYING": t[0]=TipoDatoColumna(t[2],t[4]) h.reporteGramatical1 +="tipo ::= CHARACTER VARYING PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[4])\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="VARCHAR": t[0]=TipoDatoColumna(t[1],t[3]) h.reporteGramatical1 +="tipo ::= VARCHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[3])\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="CHARACTER": t[0]=TipoDatoColumna(t[1],t[3]) h.reporteGramatical1 +="tipo ::= CHARACTER PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[3])\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="CHAR": t[0]=TipoDatoColumna(t[1],t[3]) h.reporteGramatical1 +="tipo ::= CHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[3])\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="TEXT": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="BOOLEAN": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="TIMESTAMP": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="TIME": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="INTERVAL": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="DATE": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="YEAR": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="MONT": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="HOUR": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="MINUT": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- elif t[1].upper()=="SECOND": t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- def p_tipo_2(t): 'tipo : DECIMAL' t[0]=TipoDatoColumna(t[1],None) h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n" # -------------------------------------------------------------------------------------------------------------- def p_tipo_3(t): 'tipo : DECIMAL PARENTESISIZQUIERDA ENTERO COMA ENTERO PARENTESISDERECHA ' val = str(t[3])+","+str(t[5]) t[0]=TipoDatoColumna(t[1],val) h.reporteGramatical1 +"tipo ::= "+str(t[1])+"("+str(t[3])+","+str(t[5])+")\n" h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],val)" #--------------------------------------------------- SENTENCIA SELECT -------------------------------------------------------------- def p_select(t): '''selectData : SELECT select_list FROM select_list WHERE search_condition opcionesSelect | SELECT POR FROM select_list WHERE search_condition opcionesSelect ''' if t[2]=='*': h.reporteGramatical1 +="selectData ::= SELECT POR FROM select_list WHERE search_condition opcionesSelect \n" print("/////////////////// SELECT CON ASTERISCO ////////////////////////") print("Columnas: ",t[2]) print("Tablas: ",t[4]) print("Where: ",QueryWhere(t[6])) print("Extras: ",t[7]) t[0]=Select5(t[2],t[4],QueryWhere(t[6]),t[7]) else: h.reporteGramatical1 +="selectData ::= SELECT select_list FROM select_list WHERE search_condition opcionesSelect \n" print("/////////////////// SELECT SIN ASTERISCO ////////////////////////") print("Columnas: ",t[2]) print("Tablas: ",t[4]) print("Where: ",QueryWhere(t[6])) print("Extras: ",t[7]) t[0]=Select5(t[2],t[4],QueryWhere(t[6]),t[7]) def p_select_1(t): '''selectData : SELECT select_list FROM select_list WHERE search_condition | SELECT POR FROM select_list WHERE search_condition ''' if t[2]=='*': h.reporteGramatical1 +="selectData ::= SELECT POR FROM select_list WHERE search_condition \n" h.reporteGramatical2 +="t[0]=Select3(t[4],QueryWhere(t[6]))\n" print("entra al select con where y asterisco/////////////////") t[0]=Select3(t[4],QueryWhere(t[6])) print("el objeto que sube") print(t[0]) else: h.reporteGramatical1 +="selectData ::= SELECT select_list FROM select_list WHERE search_condition \n" h.reporteGramatical2 +=" t[0]=Select4(t[2],t[4],QueryWhere(t[6]))\n" print("entra al select con where y campos /////////////////") print(t[2]) print(t[4]) print(t[6]) t[0]=Select4(t[2],t[4],QueryWhere(t[6])) print(t[0]) # esta full def p_select_2(t): '''selectData : SELECT select_list FROM select_list | SELECT POR FROM select_list ''' if t[2]=='*': h.reporteGramatical1 +="selectData ::= SELECT POR FROM select_list \n" h.reporteGramatical2 +=" t[0]=Select(1,t[4])\n" print("entra a select_2 A") #se le agrega como segundo parametro el 2 que significa que venia asterirsco o todas las tablas t[0]=Select(1,2,t[4]) else: # select tipo 4 h.reporteGramatical1 +="selectData ::= SELECT select_list FROM select_list \n" h.reporteGramatical2 +=" t[0]=Select2(2,t[2],t[4])\n" print("entra a select_2 B") print(t[2]) print(t[4]) t[0]=Select2(2,t[2],t[4]) # esta full def p_select_3(t): '''selectData : SELECT select_list ''' h.reporteGramatical1 +="selectData ::= SELECT select_list \n" h.reporteGramatical2 +=" t[0]=Select(1,t[2])\n" #se le agrega el 2do 1 si solo vienen datos y no tablas t[0]=Select(1,1,t[2]) def p_opcionesSelect_1(t): '''opcionesSelect : opcionesSelect opcionSelect ''' h.reporteGramatical1 +="opcionesSelect ::= opcionesSelect opcionSelect\n" print(t[1]) t[1].append(t[2]) t[0]=t[1] def p_opcionesSelect_2(t): '''opcionesSelect : opcionSelect ''' h.reporteGramatical1 +="opcionesSelect ::= opcionSelect\n" print(t[1]) t[0]=[t[1]] def p_opcionesSelect_3(t): '''opcionSelect : LIMIT operacion | GROUP BY select_list | HAVING select_list | ORDER BY select_list ''' if t[1].upper()=="LIMIT": h.reporteGramatical1 +="opcionSelect ::= LIMIT operacion\n" h.reporteGramatical2 +="t[0]=ExpresionLimit(t[2])\n" t[0]=ExpresionLimit(t[2]) elif t[1].upper()=="GROUP": h.reporteGramatical1 +="opcionSelect ::= GROUP BY select_list\n" h.reporteGramatical2 +="t[0]=ExpresionGroup(t[3])\n" t[0]=ExpresionGroup(t[3]) elif t[1].upper()=="HAVING": h.reporteGramatical1 +="opcionSelect ::= HAVING select_list\n" h.reporteGramatical2 +="t[0]=ExpresionHaving(t[2])\n" t[0]=ExpresionHaving(t[2]) elif t[1].upper()=="ORDER": h.reporteGramatical1 +="opcionSelect ::= ORDER BY select_list\n" h.reporteGramatical2 +="t[0]=ExpresionOrder(t[3],'ASC')\n" t[0]=ExpresionOrder(t[3],'ASC') def p_opcionesSelect_4(t): '''opcionSelect : LIMIT operacion OFFSET operacion | ORDER BY select_list ordenamiento ''' if t[1].upper()=="LIMIT": h.reporteGramatical1 +="opcionSelect ::= LIMIT operacion OFFSET operacion\n" h.reporteGramatical2 +="t[0]=ExpresionLimitOffset(t[2],t[4])\n" t[0]=ExpresionLimitOffset(t[2],t[4]) elif t[1].upper()=="ORDER": h.reporteGramatical1 +="opcionSelect ::= ORDER BY select_list ordenamiento\n" h.reporteGramatical2 +="t[0]=ExpresionOrder(t[3],t[4])\n" t[0]=ExpresionOrder(t[3],t[4]) def p_ordenamiento(t): '''ordenamiento : ASC | DESC ''' h.reporteGramatical1 +="ordenamiento ::= "+str(t[1])+"\n" h.reporteGramatical2 +=" t[0]=str(t[1])\n" t[0]=str(t[1]) def p_search_condition_2(t): 'search_condition : final NOT IN PARENTESISIZQUIERDA selectData PARENTESISDERECHA' h.reporteGramatical1 +="search_condition ::= NOT search_condition\n" print("esta condicion es del not con operacion******************") print(t[1]) print(t[5]) t[0]=ExpresionNotIn(t[1],t[5]) #agregar eeste al arbol y 3D def p_search_condition_5(t): 'search_condition : NOT EXISTS PARENTESISIZQUIERDA selectData PARENTESISDERECHA' h.reporteGramatical1 +="search_condition ::= NOT search_condition\n" print("esta condicion es del not con operacion******************") print(t[4]) t[0]=ExpresionNotExists(t[4]) #agregar eeste al arbol y 3D def p_search_condition_6(t): 'search_condition : EXISTS PARENTESISIZQUIERDA selectData PARENTESISDERECHA' h.reporteGramatical1 +="search_condition ::= NOT search_condition\n" print("esta condicion es del not con operacion******************") print(t[3]) t[0]=ExpresionExists(t[3]) #agregar eeste al arbol y 3D def p_search_condition_7(t): 'search_condition : final IN PARENTESISIZQUIERDA selectData PARENTESISDERECHA' h.reporteGramatical1 +="search_condition ::= NOT search_condition\n" print("esta condicion es del not con operacion******************") print(t[1]) print(t[4]) t[0]=ExpresionIn(t[1],t[4]) # PARA ABAJO YA ESTA def p_search_condition_3(t): 'search_condition : operacion' h.reporteGramatical1 +="search_condition ::= operacion\n" h.reporteGramatical2 +=" t[0]=t[1]\n" print("entra a la operacion del seach_condition++++++++++++++++++++++++++++++++++++++++") print(t[1]) t[0]=t[1] def p_search_condition_4(t): 'search_condition : PARENTESISIZQUIERDA search_condition PARENTESISDERECHA' h.reporteGramatical1 +="search_condition ::= PARENTESISIZQUIERDA search_condition PARENTESISDERECHA\n" h.reporteGramatical2 +=" t[0]=t[2]\n" print("entra a la condicion con el parentesis") print(t[2]) t[0]=t[2] def p_select_list_1(t): ' select_list : select_list COMA operacion' h.reporteGramatical1 +="select_list ::= select_list COMA operacion\n" h.reporteGramatical2 +=" t[1].append(t[3])\nt[0]=t[1]\n" print("Entra a select list COMA operacion****************************************") t[1].append(t[3]) print(t[1]) t[0]=t[1] def p_select_list_6(t): ' select_list : select_list COMA asignacion' h.reporteGramatical1 +="select_list ::= select_list COMA asignacion\n" h.reporteGramatical2 +=" t[0]=Asignacion(t[1],t[3])\n" print(" entra al select_list COMA operacion-------------") t[1].append(t[3]) t[0]=t[1] print(t[0]) def p_select_list_7(t): ' select_list : asignacion' h.reporteGramatical1 +="select_list ::= asignacion\n" h.reporteGramatical2 +=" t[0]=t[1]\n" print(" entra al select_list: asignacion-------------") print(t[1]) t[0]=[t[1]] def p_select_list_2(t): 'select_list : operacion' h.reporteGramatical1 +="select_list ::= operacion\n" h.reporteGramatical2 +=" t[0]=[ExpresionFuncionBasica(t[1])]\n" print("select_list+++++++++++++++++++++++++") print(t[1]) t[0]=[ExpresionFuncionBasica(t[1])] def p_asignacion_1(t): ' asignacion : operacion AS operacion' h.reporteGramatical1 +="select_list ::= select_list AS operacion\n" h.reporteGramatical2 +=" t[0]=[Asignacion(t[1],t[3])]\n" print("entra a asignacion: operacion AS operacion") t[0]=Asignacion(t[1],t[3]) def p_asignacion_2(t): ' asignacion : final final' h.reporteGramatical1 +="select_list ::= final final\n" h.reporteGramatical2 +=" t[0]=[Asignacion(t[1],t[2])]\n" print(" entra al select_list de 2 finales-------------") t[0]=Asignacion(t[1],t[2]) print(t[0]) def p_funcion_basica_4(t): 'funcionBasica : operacion BETWEEN operacion ' h.reporteGramatical1 +="funcionBasica ::= operacion BETWEEN operacion AND operacion\n" h.reporteGramatical2 +="t[0]=ExpresionBetween(t[1],t[3])\n" print("entra al between con sus operaciones") print(t[1]) print(t[3]) t[0]=ExpresionBetween(t[1],t[3]) def p_funcion_basica_7(t): 'funcionBasica : operacion NOT BETWEEN operacion' h.reporteGramatical1 +="funcionBasica ::= operacion NOT BETWEEN operacion AND operacion\n" h.reporteGramatical2 +="t[0]=ExpresionNotBetween(t[1],t[4])\n" print("entra al NOT between con sus operaciones") print(t[1]) print(t[3]) t[0]=ExpresionNotBetween(t[1],t[4]) def p_funcion_basica_8(t): 'funcionBasica : operacion BETWEEN SYMMETRIC operacion ' h.reporteGramatical1 +="funcionBasica ::= operacion BETWEEN SYMMETRIC operacion AND operacion\n" h.reporteGramatical2 +="t[0]=ExpresionBetweenSymmetric(t[1],t[4])\n" t[0]=ExpresionBetweenSymmetric(t[1],t[4]) def p_funcion_basica_9(t): 'funcionBasica : operacion NOT BETWEEN SYMMETRIC operacion ' h.reporteGramatical1 +="funcionBasica ::= operacion NOT BETWEEN SYMMETRIC operacion AND operacion\n" h.reporteGramatical2 +="t[0]=ExpresionNotBetweenSymmetric(t[1],t[5])\n" t[0]=ExpresionNotBetweenSymmetric(t[1],t[5]) def p_funcion_basica_10(t): '''funcionBasica : operacion IS DISTINCT FROM operacion ''' h.reporteGramatical1 +="funcionBasica ::= operacion IS DISTINCT FROM operacion\n" h.reporteGramatical2 +="t[0]=ExpresionIsDistinct(t[1],t[5])\n" print("entra al IS DISTINCT ++++++++++++++++++") t[0]=ExpresionIsDistinct(t[1],t[5]) def p_funcion_basica_11(t): '''funcionBasica : operacion IS NOT DISTINCT FROM operacion''' h.reporteGramatical1 +="funcionBasica ::= operacion IS NOT DISTINCT FROM operacion\n" h.reporteGramatical2 +="t[0]=ExpresionIsNotDistinct(t[1],t[6])\n" print("entra al IS NOT DISTINCT ++++++++++++++++++") t[0]=ExpresionIsNotDistinct(t[1],t[6]) def p_tipos(t): '''tipos : CREATE TYPE final AS ENUM PARENTESISIZQUIERDA select_list PARENTESISDERECHA PUNTOYCOMA''' print("entra al enum++++++++++++++++++++++++++++++++") h.reporteGramatical1 +="tipos ::= CREATE TYPE final AS ENUM PARENTESISIZQUIERDA select_list PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]=Tipo(t[3],t[7])\n" print(t[3]) print(t[7]) t[0]=Tipo(t[3],t[7]) #debo agregar estos al arbol y a la 3D #-------------------------------------------------------------------------------------------------------------------- # AGREGACION DEL UNION def p_combinacionSelects(t): '''combinacionSelects : selectData UNION selectData | selectData INTERSECT selectData | selectData EXCEPT selectData ''' print("*************************Entra a procesar el UNION********************") if t[2].upper()=="UNION": t[0]=QueryUnion(t[1],t[3]) elif t[2].upper()=="INTERSECT": t[0]=QueryIntersect(t[1],t[3]) elif t[2].upper()=="EXCEPT": t[0]=QueryExcept(t[1],t[3]) def p_select_4(t): '''selectData : SELECT select_list FROM tipoJoin | SELECT POR FROM tipoJoin ''' if t[2]=='*': print("entro al select * tipo join ++++++++++++++++++++++++++++++") print(t[2]) t[0]=Select6(t[2],t[4]) else: print("entro al select lista tipo join ++++++++++++++++++++++++++++++") print(t[2]) t[0]=Select6(t[2],t[4]) def p_tipoJoin_1(t): '''tipoJoin : select_list INNER JOIN select_list ON operacion | select_list NATURAL INNER JOIN select_list ''' if t[2].upper()=="INNER": print("entro al tipoJoin1 INNER----------------------------------------------------") print(t[1]) print(t[2]) print(t[4]) print(t[6]) t[0]=ExpresionJoinA(t[1],t[2],t[4],t[6]) elif t[2].upper()=="NATURAL": print("entro al NATURAL ----------------------------------------------------") print(t[1]) print(t[2]) print(t[3]) print(t[5]) t[0]=ExpresionJoinB(t[1],t[2],t[3],t[5]) def p_tipoJoin_2(t): '''tipoJoin : select_list otroTipoJoin OUTER JOIN select_list ON operacion | select_list NATURAL otroTipoJoin OUTER JOIN select_list ''' if t[2].upper()=="NATURAL": print("entro al tipoJoin2 NATURAL ----------------------------------------------------") print(t[1]) print(t[2]) print(t[3]) print(t[4]) print(t[6]) t[0]=ExpresionJoinC(t[1],t[2],t[3],t[4],t[6]) else: print("entro al tipoJoin2 ELSE ----------------------------------------------------") print(t[1]) print(t[2]) print(t[3]) print(t[5]) print(t[7]) t[0]=ExpresionJoinD(t[1],t[2],t[3],t[5],t[7]) def p_otroTipoJoin(t): ''' otroTipoJoin : LEFT | RIGHT | FULL ''' print("entra al otro tipo de join para su condicion") t[0]=t[1] def p_execFunction(t): 'execFunction : execOption ID PUNTOYCOMA' h.reporteGramatical1 +="execFunction ::= execOption ID PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]=execFunction(t[2])\n" t[0]=execFunction(t[2]) def p_execFunction_1(t): 'execFunction : execOption ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA' h.reporteGramatical1 +="execFunction ::= execOption ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]=execFunctionParams(t[2],t[4])\n" t[0]=execFunctionParams(t[2],t[4]) def p_execFunction_2(t): 'execFunction : execOption ID PARENTESISIZQUIERDA PARENTESISDERECHA PUNTOYCOMA' h.reporteGramatical1 +="execFunction ::= execOption ID PARENTESISIZQUIERDA PARENTESISDERECHA PUNTOYCOMA\n" h.reporteGramatical2 +="t[0]=execFunction(t[2])\n" t[0]=execFunction(t[2]) def p_execOption_1(t): 'execOption : EXEC' t[0] = t[1] def p_execOption_2(t): 'execOption : EXECUTE' t[0] = t[1] #para manejar los errores sintacticos #def p_error(t): #en modo panico :v # print("token error: ",t) # print("Error sintáctico en '%s'" % t.value[0]) # print("Error sintáctico en '%s'" % t.value[1]) #def p_error(t): #en modo panico :v # while True: # tok=parser.token() # if not tok or tok.type==';':break # parser.errok() # return tok def find_column(input, token): line_start = input.rfind('\n', 0, token.lexpos) + 1 print((token.lexpos - line_start) +1 ) return (token.lexpos - line_start) def p_error(t): print("token: '%s'" %t) print("Error sintáctico en '%s' " % t.value) #h.filapivote+=1 x=caden.splitlines() filas=len(x)-1 print("filas que no cambian: ",filas) if h.filapivote>0: fila=(t.lineno-1)-h.filapivote*filas else: fila=(t.lineno-1) h.filapivote+=1 h.errores+= "<tr><td>"+str(t.value)+"</td><td>"+str(fila)+"</td><td>"+str(find_column(caden,t))+"</td><td>SINTACTICO</td><td>el token no va aqui</td></tr>\n" print("Error sintáctico fila '%s'" % fila) print("Error sintáctico col '%s'" % find_column(caden,t)) if not t: print("End of File!") return # Read ahead looking for a closing '}' while True: tok = parser.token() # Get the next token if not tok or tok.type == 'PUNTOYCOMA': break parser.restart() import ply.yacc as yacc parser = yacc.yacc() def parse(input) : global caden caden="" caden=input return parser.parse(input)
the-stack_0_5909
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import random import torch import torchvision from torchvision.transforms import functional as F from .rf_transforms import ( RandomHorizontalFlip3D, RandomVerticalFlip3D, Pad3D, CalibrateMWPose, GenerateHMS, ) from .pc_transforms import ( SplitSourceRef, Resampler, FixedResampler, RandomJitter, RandomCrop, RandomTransformSE3, RandomTransformSE3_euler, RandomRotatorZ, ShufflePoints, SetDeterministic, ) class Compose(object): def __init__(self, transforms): self.transforms = transforms def __call__(self, image, target): for t in self.transforms: image, target = t(image, target) return image, target def __repr__(self): format_string = self.__class__.__name__ + "(" for t in self.transforms: format_string += "\n" format_string += " {0}".format(t) format_string += "\n)" return format_string class Resize(object): def __init__(self, min_size, max_size): if not isinstance(min_size, (list, tuple)): min_size = (min_size,) self.min_size = min_size self.max_size = max_size # modified from torchvision to add support for max size def get_size(self, image_size): w, h = image_size size = random.choice(self.min_size) max_size = self.max_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) return (oh, ow) def __call__(self, image, target=None): size = self.get_size(image.size) image = F.resize(image, size) if target is None: return image target = target.resize(image.size) return image, target class RandomHorizontalFlip(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target): if random.random() < self.prob: image = F.hflip(image) target = target.transpose(0) return image, target class RandomVerticalFlip(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target): if random.random() < self.prob: image = F.vflip(image) target = target.transpose(1) return image, target class ColorJitter(object): def __init__(self, brightness=None, contrast=None, saturation=None, hue=None, ): self.color_jitter = torchvision.transforms.ColorJitter( brightness=brightness, contrast=contrast, saturation=saturation, hue=hue,) def __call__(self, image, target): image = self.color_jitter(image) return image, target class ToTensor(object): def __call__(self, image, target): return F.to_tensor(image), target class Normalize(object): def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, image, target=None): image = F.normalize(image, mean=self.mean, std=self.std) if target is None: return image return image, target
the-stack_0_5910
# Forked from https://github.com/psobot/keynote-parser/blob/master/keynote_parser/codec.py import struct import snappy from functools import partial from numbers_parser.mapping import ID_NAME_MAP from numbers_parser.exceptions import NotImplementedError from google.protobuf.internal.decoder import _DecodeVarint32 from google.protobuf.json_format import MessageToDict from numbers_parser.generated.TSPArchiveMessages_pb2 import ArchiveInfo class IWAFile(object): def __init__(self, chunks, filename=None): self.chunks = chunks self.filename = filename @classmethod def from_buffer(cls, data, filename=None): try: chunks = [] while data: chunk, data = IWACompressedChunk.from_buffer(data, filename) chunks.append(chunk) return cls(chunks, filename) except Exception as e: # pragma: no cover if filename: raise ValueError("Failed to deserialize " + filename) from e else: raise def to_dict(self): try: return {"chunks": [chunk.to_dict() for chunk in self.chunks]} except Exception as e: # pragma: no cover if self.filename: raise ValueError("Failed to serialize " + self.filename) from e else: raise class IWACompressedChunk(object): def __init__(self, archives): self.archives = archives def __eq__(self, other): return self.archives == other.archives # pragma: no cover @classmethod def _decompress_all(cls, data): while data: header = data[:4] first_byte = header[0] if not isinstance(first_byte, int): first_byte = ord(first_byte) if first_byte != 0x00: raise ValueError( # pragma: no cover "IWA chunk does not start with 0x00! (found %x)" % first_byte ) unpacked = struct.unpack_from("<I", bytes(header[1:]) + b"\x00") length = unpacked[0] chunk = data[4 : 4 + length] data = data[4 + length :] try: yield snappy.uncompress(chunk) except Exception: # pragma: no cover # Try to see if this data isn't compressed in the first place. # If this data is still compressed, parsing it as Protobuf # will almost definitely fail anyways. yield chunk @classmethod def from_buffer(cls, data, filename=None): data = b"".join(cls._decompress_all(data)) archives = [] while data: archive, data = IWAArchiveSegment.from_buffer(data, filename) archives.append(archive) return cls(archives), None def to_dict(self): return {"archives": [archive.to_dict() for archive in self.archives]} class ProtobufPatch(object): def __init__(self, data): self.data = data def __eq__(self, other): return self.data == other.data # pragma: no cover def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.data) # pragma: no cover def to_dict(self): return message_to_dict(self.data) @classmethod def FromString(cls, message_info, proto_klass, data): # Recent numbers does not apear to store date this way assert len(message_info.diff_field_path.path) != 1 return cls(proto_klass.FromString(data)) class IWAArchiveSegment(object): def __init__(self, header, objects): self.header = header self.objects = objects def __eq__(self, other): return ( self.header == other.header and self.objects == other.objects ) # pragma: no cover def __repr__(self): return "<%s identifier=%s objects=%s>" % ( # pragma: no cover self.__class__.__name__, self.header.identifier, repr(self.objects).replace("\n", " ").replace(" ", " "), ) @classmethod def from_buffer(cls, buf, filename=None): archive_info, payload = get_archive_info_and_remainder(buf) if not repr(archive_info): raise ValueError( "Segment doesn't seem to start with an ArchiveInfo!" ) # pragma: no cover payloads = [] n = 0 for message_info in archive_info.message_infos: try: if message_info.type == 0 and archive_info.should_merge and payloads: base_message = archive_info.message_infos[ message_info.base_message_index ] klass = partial( ProtobufPatch.FromString, message_info, ID_NAME_MAP[base_message.type], ) else: klass = ID_NAME_MAP[message_info.type] except KeyError: # pragma: no cover raise NotImplementedError( "Don't know how to parse Protobuf message type " + str(message_info.type) ) try: message_payload = payload[n : n + message_info.length] if hasattr(klass, "FromString"): output = klass.FromString(message_payload) else: output = klass(message_payload) except Exception as e: # pragma: no cover raise ValueError( "Failed to deserialize %s payload of length %d: %s" % (klass, message_info.length, e) ) payloads.append(output) n += message_info.length return cls(archive_info, payloads), payload[n:] def to_dict(self): return { "header": header_to_dict(self.header), "objects": [message_to_dict(message) for message in self.objects], } def message_to_dict(message): if hasattr(message, "to_dict"): return message.to_dict() output = MessageToDict(message) output["_pbtype"] = type(message).DESCRIPTOR.full_name return output def header_to_dict(message): output = message_to_dict(message) for message_info in output["messageInfos"]: del message_info["length"] return output def get_archive_info_and_remainder(buf): msg_len, new_pos = _DecodeVarint32(buf, 0) n = new_pos msg_buf = buf[n : n + msg_len] n += msg_len return ArchiveInfo.FromString(msg_buf), buf[n:]
the-stack_0_5911
from __future__ import division import argparse import copy import os import os.path as osp import time import mmcv import torch from mmcv import Config from mmcv.runner import init_dist from mmdet import __version__ from mmdet.apis import set_random_seed, train_detector from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import collect_env, get_root_logger #参数解析器,终端命令行运行一个demo.py后后面跟的参数(运行tools/train.py可以跟的参数),argparse 模块可以让人轻松编写用户友好的命令行接口 def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') #第一步:创建一个解析器 parser.add_argument('config', help='train config file path') #第二步:添加参数 parser.add_argument('--work_dir', help='the dir to save logs and models') parser.add_argument( '--resume_from', help='the checkpoint file to resume from') parser.add_argument( '--validate', action='store_true', help='whether to evaluate the checkpoint during training') parser.add_argument( '--gpus', type=int, default=1, help='number of gpus to use ' '(only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument( '--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument( '--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus') args = parser.parse_args() #第三步:解析参数 if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args def main(): args = parse_args() cfg = Config.fromfile(args.config) #源码:mmcv/utils/config.py;功能:接受一个文件,命令行当传入一个config文件时就把模型的配置参数穿打cfg了 # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: #如果没有给存储路径的话,就存到默认路径 cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp)) logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([('{}: {}'.format(k, v)) for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info # log some basic info logger.info('Distributed training: {}'.format(distributed)) logger.info('Config:\n{}'.format(cfg.text)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}, deterministic: {}'.format( args.seed, args.deterministic)) set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed meta['seed'] = args.seed model = build_detector( cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) #通过config文件传过来的参数,初始化model参数 datasets = [build_dataset(cfg.data.train)] #数据格式创建 if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES #这一步直接调用模型,数据,配置文件信息。。。训练,接下来追溯到mmdet/apis/train.py train_detector( model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta) if __name__ == '__main__': main()
the-stack_0_5913
from __future__ import absolute_import, print_function """ Command for starting up an authenticating reverse proxy for use in development. Please, don't use me in production! """ import six.moves.BaseHTTPServer from django.conf import settings import getpass import socket from nsot.util.commands import NsotCommand, CommandError class Command(NsotCommand): help = "Start an authenticating reverse proxy for use in development." def add_arguments(self, parser): parser.add_argument( "username", nargs="?", default=getpass.getuser(), help="Username used for authentication.", ) parser.add_argument( "-a", "--address", type=str, default=settings.NSOT_HOST, help="Address to listen on.", ) parser.add_argument( "-d", "--domain", type=str, default="localhost", help="Domain for user account.", ) parser.add_argument( "-H", "--auth-header", type=str, default=settings.USER_AUTH_HEADER, help="HTTP user auth header name.", ) parser.add_argument( "-P", "--backend-port", type=int, default=settings.NSOT_PORT, help="Port to proxy to.", ) parser.add_argument( "-p", "--listen-port", type=int, default=settings.NSOT_PORT + 1, help="Port to listen on.", ) def handle(self, **options): username = options.get("username") try: from mrproxy import UserProxyHandler except ImportError: raise SystemExit( "mrproxy is required for the user proxy. Please see " "README.rst." ) class ServerArgs(object): """Argument container for http service.""" def __init__(self, backend_port, username, auth_header): self.backend_port = backend_port self.header = ["%s: %s" % (auth_header, username)] username = "%s@%s" % (username, options.get("domain")) address = options.get("address") auth_header = options.get("auth_header") backend_port = options.get("backend_port") listen_port = options.get("listen_port") # Try to start the server try: server = six.moves.BaseHTTPServer.HTTPServer( (address, listen_port), UserProxyHandler ) except socket.error as err: raise CommandError(err) else: server.args = ServerArgs(backend_port, username, auth_header) # Run until we hit ctrl-C try: print( "Starting proxy on %s %s => %s, auth '%s: %s'" % (address, backend_port, listen_port, auth_header, username) ) server.serve_forever() except KeyboardInterrupt: print("Bye!")
the-stack_0_5914
"""Builder for websites.""" import os import shutil from regolith.builders.basebuilder import BuilderBase from regolith.dates import get_dates from regolith.fsclient import _id_key from regolith.sorters import ene_date_key, position_key from regolith.tools import ( all_docs_from_collection, filter_publications, filter_projects, make_bibtex_file, document_by_value, dereference_institution, ) class HtmlBuilder(BuilderBase): """Build HTML files for website""" btype = "html" def __init__(self, rc): super().__init__(rc) # TODO: get this from the RC self.cmds = [ "root_index", "people", "projects", "blog", "jobs", "abstracts", "nojekyll", "cname", "finish", ] def construct_global_ctx(self): """Constructs the global context""" super().construct_global_ctx() gtx = self.gtx rc = self.rc gtx["jobs"] = list(all_docs_from_collection(rc.client, "jobs")) gtx["people"] = sorted( all_docs_from_collection(rc.client, "people"), key=position_key, reverse=True, ) gtx["abstracts"] = list( all_docs_from_collection(rc.client, "abstracts") ) gtx["group"] = document_by_value( all_docs_from_collection(rc.client, "groups"), "name", rc.groupname ) gtx["all_docs_from_collection"] = all_docs_from_collection gtx["institutions"] = sorted( all_docs_from_collection(rc.client, "institutions"), key=_id_key ) def finish(self): """Move files over to their destination and remove them from the source""" # static stsrc = os.path.join( getattr(self.rc, "static_source", "templates"), "static" ) stdst = os.path.join(self.bldir, "static") if os.path.isdir(stdst): shutil.rmtree(stdst) if os.path.isdir(stsrc): shutil.copytree(stsrc, stdst) def root_index(self): """Render root index""" self.render("root_index.html", "index.html", title="Home") make_bibtex_file(list(all_docs_from_collection(self.rc.client, "citations")), pid='group', person_dir=self.bldir, ) def people(self): """Render people, former members, and each person""" rc = self.rc peeps_dir = os.path.join(self.bldir, "people") former_peeps_dir = os.path.join(self.bldir, "former") os.makedirs(peeps_dir, exist_ok=True) os.makedirs(former_peeps_dir, exist_ok=True) peeps = self.gtx["people"] for p in peeps: names = frozenset(p.get("aka", []) + [p["name"]]) pubs = filter_publications( all_docs_from_collection(rc.client, "citations"), names, reverse=True, bold=False, ) bibfile = make_bibtex_file( pubs, pid=p["_id"], person_dir=peeps_dir ) ene = p.get("employment", []) + p.get("education", []) ene.sort(key=ene_date_key, reverse=True) for e in ene: dereference_institution(e, all_docs_from_collection( rc.client, "institutions")) projs = filter_projects( all_docs_from_collection(rc.client, "projects"), names ) for serve in p.get("service", []): serve_dates = get_dates(serve) date = serve_dates.get("date") if not date: date = serve_dates.get("end_date") if not date: date = serve_dates.get("begin_date") serve["year"] = date.year serve["month"] = date.month sns = p.get("service", []) sns.sort(key=ene_date_key, reverse=True) p["service"] = sns self.render( "person.html", os.path.join("people", p["_id"] + ".html"), p=p, title=p.get("name", ""), pubs=pubs, names=names, bibfile=bibfile, education_and_employment=ene, projects=projs, ) self.render( "people.html", os.path.join("people", "index.html"), title="People" ) self.render( "former.html", os.path.join("former", "index.html"), title="Former Members", ) def projects(self): """Render projects""" rc = self.rc projs = all_docs_from_collection(rc.client, "projects") self.render( "projects.html", "projects.html", title="Projects", projects=projs ) def blog(self): """Render the blog and rss""" rc = self.rc blog_dir = os.path.join(self.bldir, "blog") os.makedirs(blog_dir, exist_ok=True) posts = list(all_docs_from_collection(rc.client, "blog")) posts.sort(key=ene_date_key, reverse=True) for post in posts: self.render( "blog_post.html", os.path.join("blog", post["_id"] + ".html"), post=post, title=post["title"], ) self.render( "blog_index.html", os.path.join("blog", "index.html"), title="Blog", posts=posts, ) self.render("rss.xml", os.path.join("blog", "rss.xml"), items=posts) def jobs(self): """Render the jobs and each job""" jobs_dir = os.path.join(self.bldir, "jobs") os.makedirs(jobs_dir, exist_ok=True) for job in self.gtx["jobs"]: self.render( "job.html", os.path.join("jobs", job["_id"] + ".html"), job=job, title="{0} ({1})".format(job["title"], job["_id"]), ) self.render( "jobs.html", os.path.join("jobs", "index.html"), title="Jobs" ) def abstracts(self): """Render each abstract""" abs_dir = os.path.join(self.bldir, "abstracts") os.makedirs(abs_dir, exist_ok=True) for ab in self.gtx["abstracts"]: self.render( "abstract.html", os.path.join("abstracts", ab["_id"] + ".html"), abstract=ab, title="{0} {1} - {2}".format( ab["firstname"], ab["lastname"], ab["title"] ), ) def nojekyll(self): """Touches a nojekyll file in the build dir""" with open(os.path.join(self.bldir, ".nojekyll"), "a+"): pass def cname(self): """Add CNAME""" rc = self.rc if not hasattr(rc, "cname"): return with open( os.path.join(self.bldir, "CNAME"), "w", encoding="utf-8" ) as f: f.write(rc.cname)
the-stack_0_5915
""" # Definition for a Node. class Node(object): def __init__(self, val, children): self.val = val self.children = children """ class Solution(object): def maxDepth(self, root): """ :type root: Node :rtype: int """ if not root: return 0 res = 1 for child in root.children: if not child: return 1 res = max(res, 1 + self.maxDepth(child)) return res
the-stack_0_5916
# Copyright (c) 2009 Aldo Cortesi # Copyright (c) 2011 Florian Mounier # Copyright (c) 2011 Anshuman Bhaduri # Copyright (c) 2012 Tycho Andersen # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import pytest from multiprocessing import Value import libqtile.log_utils import libqtile.core import libqtile.utils import libqtile.hook import logging from .conftest import BareConfig # TODO: more tests required. # 1. Check all hooks that can be fired class Call: def __init__(self, val): self.val = val def __call__(self, val): self.val = val @pytest.yield_fixture def hook_fixture(): class Dummy: pass dummy = Dummy() libqtile.log_utils.init_log(logging.CRITICAL, log_path=None, log_color=False) libqtile.hook.init(dummy) yield libqtile.hook.clear() def test_cannot_fire_unknown_event(): with pytest.raises(libqtile.utils.QtileError): libqtile.hook.fire("unknown") @pytest.mark.usefixtures("hook_fixture") def test_hook_calls_subscriber(): test = Call(0) libqtile.core.manager.hook.subscribe.group_window_add(test) libqtile.core.manager.hook.fire("group_window_add", 8) assert test.val == 8 @pytest.mark.usefixtures("hook_fixture") def test_subscribers_can_be_added_removed(): test = Call(0) libqtile.core.manager.hook.subscribe.group_window_add(test) assert libqtile.core.manager.hook.subscriptions libqtile.core.manager.hook.clear() assert not libqtile.core.manager.hook.subscriptions @pytest.mark.usefixtures("hook_fixture") def test_can_unsubscribe_from_hook(): test = Call(0) libqtile.core.manager.hook.subscribe.group_window_add(test) libqtile.core.manager.hook.fire("group_window_add", 3) assert test.val == 3 libqtile.core.manager.hook.unsubscribe.group_window_add(test) libqtile.core.manager.hook.fire("group_window_add", 4) assert test.val == 3 def test_can_subscribe_to_startup_hooks(qtile_nospawn): config = BareConfig self = qtile_nospawn self.startup_once_calls = Value('i', 0) self.startup_calls = Value('i', 0) self.startup_complete_calls = Value('i', 0) def inc_startup_once_calls(): self.startup_once_calls.value += 1 def inc_startup_calls(): self.startup_calls.value += 1 def inc_startup_complete_calls(): self.startup_complete_calls.value += 1 libqtile.core.manager.hook.subscribe.startup_once(inc_startup_once_calls) libqtile.core.manager.hook.subscribe.startup(inc_startup_calls) libqtile.core.manager.hook.subscribe.startup_complete(inc_startup_complete_calls) self.start(config) self.start_qtile = True assert self.startup_once_calls.value == 1 assert self.startup_calls.value == 1 assert self.startup_complete_calls.value == 1 # TODO Restart and check that startup_once doesn't fire again @pytest.mark.usefixtures('hook_fixture') def test_can_update_by_selection_change(qtile): test = Call(0) libqtile.core.manager.hook.subscribe.selection_change(test) libqtile.core.manager.hook.fire('selection_change', 'hello') assert test.val == 'hello' @pytest.mark.usefixtures('hook_fixture') def test_can_call_by_selection_notify(qtile): test = Call(0) libqtile.core.manager.hook.subscribe.selection_notify(test) libqtile.core.manager.hook.fire('selection_notify', 'hello') assert test.val == 'hello'
the-stack_0_5917
# -*- coding: utf-8 -*- import json import os.path import sys import yaml from lemoncheesecake.project import Project class MyProject(Project): def build_report_title(self): with open(os.path.join(os.path.dirname(__file__), "docker-compose.yml")) as compose_file: compose = yaml.load(compose_file, Loader=yaml.FullLoader) echo_image_name = compose["services"]["echo"]["image"] echo_image_version = echo_image_name.replace("echoprotocol/echo:", "") return "ECHO tests (ECHO v. {})".format(echo_image_version) project_dir = os.path.dirname(__file__) sys.path.append(project_dir) project = MyProject(project_dir) project.metadata_policy.add_property_rule("main", "type", on_suite=True, required=False) project.metadata_policy.add_property_rule("positive", "type", on_suite=True, required=False) project.metadata_policy.add_property_rule("negative", "type", on_suite=True, required=False) RESOURCES_DIR = os.path.join(os.path.dirname(__file__), "resources") genesis_path = "genesis.json" if "GENESIS_FILE" not in os.environ else os.environ["GENESIS_FILE"] GENESIS = json.load(open(os.path.join(os.path.dirname(__file__), genesis_path))) if "ROPSTEN" in os.environ and os.environ["ROPSTEN"].lower() != "false": ROPSTEN = True else: ROPSTEN = False if "DEBUG" in os.environ and os.environ["DEBUG"].lower() != "false": DEBUG = True else: DEBUG = False if "BASE_URL" not in os.environ: BASE_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["BASE_URL"] else: BASE_URL = os.environ["BASE_URL"] if "WALLET_URL" not in os.environ: WALLET_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["WALLET_URL"] else: WALLET_URL = os.environ["WALLET_URL"] if "ETHEREUM_URL" not in os.environ: ETHEREUM_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["ETHEREUM_URL"] else: ETHEREUM_URL = os.environ["ETHEREUM_URL"] if "BITCOIN_URL" not in os.environ: BITCOIN_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["BITCOIN_URL"] else: BITCOIN_URL = os.environ["BITCOIN_URL"] if "ETHRPC_URL" not in os.environ: ETHRPC_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["ETHRPC_URL"] else: ETHRPC_URL = os.environ["ETHRPC_URL"] if "TESTRPC_URL" not in os.environ: TESTRPC_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["TESTRPC_URL"] else: TESTRPC_URL = os.environ["TESTRPC_URL"] if "NATHAN_PK" not in os.environ: NATHAN_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["NATHAN_PK"] else: NATHAN_PK = os.environ["NATHAN_PK"] if "INIT0_PK" not in os.environ: INIT0_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT0_PK"] else: INIT0_PK = os.environ["INIT0_PK"] if "INIT1_PK" not in os.environ: INIT1_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT1_PK"] else: INIT1_PK = os.environ["INIT1_PK"] if "INIT2_PK" not in os.environ: INIT2_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT2_PK"] else: INIT2_PK = os.environ["INIT2_PK"] if "INIT3_PK" not in os.environ: INIT3_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT3_PK"] else: INIT3_PK = os.environ["INIT3_PK"] if "INIT4_PK" not in os.environ: INIT4_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT4_PK"] else: INIT4_PK = os.environ["INIT4_PK"] ECHO_OPERATIONS = json.load(open(os.path.join(RESOURCES_DIR, "echo_operations.json"))) ECHO_CONTRACTS = json.load(open(os.path.join(RESOURCES_DIR, "echo_contracts.json"))) WALLETS = os.path.join(RESOURCES_DIR, "wallets.json") UTILS = os.path.join(RESOURCES_DIR, "utils.json") ECHO_INITIAL_BALANCE = int(GENESIS["initial_balances"][0]["amount"]) ECHO_ASSET_SYMBOL = GENESIS["initial_balances"][0]["asset_symbol"] INITIAL_ACCOUNTS = GENESIS["initial_accounts"] INITIAL_COMMITTEE_CANDIDATES = GENESIS["initial_committee_candidates"] INITIAL_ACCOUNTS_COUNT = len(INITIAL_ACCOUNTS) INITIAL_ACCOUNTS_NAMES = [] for i in range(INITIAL_ACCOUNTS_COUNT): INITIAL_ACCOUNTS_NAMES.append(INITIAL_ACCOUNTS[i]["name"]) INITIAL_COMMITTEE_ETH_ADDRESSES = [] for i, initial_committee_candidate in enumerate(INITIAL_COMMITTEE_CANDIDATES): if initial_committee_candidate["owner_name"] == INITIAL_ACCOUNTS_NAMES[i]: INITIAL_COMMITTEE_ETH_ADDRESSES.append(initial_committee_candidate["eth_address"]) ACCOUNT_PREFIX = "account" DEFAULT_ACCOUNTS_COUNT = 1000 MAIN_TEST_ACCOUNT_COUNT = 1 BLOCK_RELEASE_INTERVAL = 5 BLOCKS_NUM_TO_WAIT = 10 REQUIRED_DEPOSIT_AMOUNT = GENESIS["initial_parameters"]["committee_frozen_balance_to_activate"] UNFREEZE_DURATION_SECONDS = GENESIS["initial_parameters"]["committee_balance_unfreeze_duration_seconds"] BASE_ASSET_SYMBOL, ETH_ASSET_SYMBOL = "ECHO", "EETH" ETH_ASSET_ID = GENESIS["initial_parameters"]["sidechain_config"]["ETH_asset_id"] BTC_ASSET_ID = GENESIS["initial_parameters"]["sidechain_config"]["BTC_asset_id"] ETH_CONTRACT_ADDRESS = "0x" + GENESIS["initial_parameters"]["sidechain_config"]["eth_contract_address"] UNPAID_FEE_METHOD = "0x19c4518a" COMMITTEE = "0x130f679d" SATOSHI_PRECISION = 100000000 GAS_PRICE = GENESIS["initial_parameters"]["sidechain_config"]["gas_price"] MIN_ETH_WITHDRAW_FEE = GENESIS["initial_parameters"]["sidechain_config"]["eth_withdrawal_fee"] MIN_ETH_WITHDRAW = GENESIS["initial_parameters"]["sidechain_config"]["eth_withdrawal_min"] SATOSHI_PER_BYTE = GENESIS["initial_parameters"]["sidechain_config"]["satoshis_per_byte"] BTC_FEE = GENESIS["initial_parameters"]["sidechain_config"]["btc_deposit_withdrawal_fee"] BTC_WITHDRAWAL_MIN = GENESIS["initial_parameters"]["sidechain_config"]["btc_deposit_withdrawal_min"] ETHEREUM_OPERATIONS = json.load(open(os.path.join(RESOURCES_DIR, "ethereum_transactions.json"))) ETHEREUM_CONTRACTS = json.load(open(os.path.join(RESOURCES_DIR, "ethereum_contracts.json"))) with open(".env") as env_file: GANACHE_PK = (env_file.readline().split('RPC_ACCOUNT=')[1]).split(",")[0] with open(".env") as env_file: ROPSTEN_PK = env_file.readlines()[-1].split('ROPSTEN_PRIVATE_KEY=')[1]
the-stack_0_5918
"Script to add SimPizza to Haldis" from app import db from models import Location, Product pizzas = [ "Bolognese de luxe", "Hawaï", "Popeye", "Pepperoni", "Seafood", "Hot pizzaaah!!!", "Salmon delight", "Full option", "Pitza kebab", "Multi cheese", "4 Seasons", "Mega fish", "Creamy multi cheese", "Green fiësta", "Chicken bbq", "Funky chicken", "Veggie", "Meat lovers", "Scampi mampi", "Tabasco", "Chicken time", "Meatballs", "Tuna", "Anchovy", "Calzone", "Bbq meatballs", "Creamy chicken", "Hot bolognese", ] def add() -> None: "Add Simpizza to the database" simpizza = Location() simpizza.configure( "Sim-pizza", "De Pintelaan 252 9000 Gent", "tel: 09/321.02.00", "http://simpizza.be", ) db.session.add(simpizza) for pizza in pizzas: entry = Product() entry.configure(simpizza, pizza, 1195) db.session.add(entry)
the-stack_0_5919
""" Copyright 2020 The Magma Authors. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import time import ctypes from builtins import range import s1ap_types import s1ap_wrapper class TestMultipleEnbPartialReset(unittest.TestCase): def setUp(self): self._s1ap_wrapper = s1ap_wrapper.TestWrapper() def tearDown(self): self._s1ap_wrapper.cleanup() def test_multiple_enb_partial_reset(self): """ Multi eNB + attach 1 UE + s1ap partial reset + detach """ """ Note: Before execution of this test case, make sure that following steps are correct 1. Configure same plmn and tac in both MME and s1ap tester 2. How to configure plmn and tac in MME: a. Set mcc and mnc in gateway.mconfig for mme service b. Set tac in gateway.mconfig for mme service c. Restart MME service 3. How to configure plmn and tac in s1ap tester, a. For multi-eNB test case, configure plmn and tac from test case. In each multi-eNB test case, set plmn, plmn length and tac in enb_list b. For single eNB test case, configure plmn and tac in nbAppCfg.txt """ # column is an enb parameter, row is number of enbs """ Cell Id, Tac, EnbType, PLMN Id, PLMN length """ enb_list = [[1, 1, 1, "00101", 5], [2, 1, 1, "00101", 5], [3, 1, 1, "00101", 5], [4, 1, 1, "00101", 5], [5, 1, 1, "00101", 5]] self._s1ap_wrapper.multiEnbConfig(len(enb_list), enb_list) time.sleep(2) ue_ids = [] num_ues = 1 self._s1ap_wrapper.configUEDevice(num_ues) for _ in range(num_ues): req = self._s1ap_wrapper.ue_req print("************************* Calling attach for UE id ", req.ue_id) self._s1ap_wrapper.s1_util.attach( req.ue_id, s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST, s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND, s1ap_types.ueAttachAccept_t, ) ue_ids.append(req.ue_id) # Wait on EMM Information from MME self._s1ap_wrapper._s1_util.receive_emm_info() # Trigger eNB Reset # Add delay to ensure S1APTester sends attach partial before sending # eNB Reset Request time.sleep(0.5) print("************************* Sending eNB Partial Reset Request") reset_req = s1ap_types.ResetReq() reset_req.rstType = s1ap_types.resetType.PARTIAL_RESET.value reset_req.cause = s1ap_types.ResetCause() reset_req.cause.causeType = \ s1ap_types.NasNonDelCauseType.TFW_CAUSE_MISC.value # Set the cause to MISC.hardware-failure reset_req.cause.causeVal = 3 reset_req.r = s1ap_types.R() reset_req.r.partialRst = s1ap_types.PartialReset() reset_req.r.partialRst.numOfConn = num_ues reset_req.r.partialRst.ueIdLst = ( ctypes.c_ubyte * reset_req.r.partialRst.numOfConn )() for indx in range(reset_req.r.partialRst.numOfConn): reset_req.r.partialRst.ueIdLst[indx] = ue_ids[indx] print( "Reset_req.r.partialRst.ueIdLst[indx]", reset_req.r.partialRst.ueIdLst[indx], indx, ) print("ue_ids", ue_ids) self._s1ap_wrapper.s1_util.issue_cmd( s1ap_types.tfwCmd.RESET_REQ, reset_req) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.RESET_ACK.value) # Trigger detach request for ue in ue_ids: print("************************* Calling detach for UE id ", ue) # self._s1ap_wrapper.s1_util.detach( # ue, detach_type, wait_for_s1) self._s1ap_wrapper.s1_util.detach( ue, s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value, True ) if __name__ == "__main__": unittest.main()
the-stack_0_5921
from toontown.toonbase.ToonBaseGlobal import * from panda3d.core import * from panda3d.toontown import * from toontown.toonbase.ToontownGlobals import * import random from direct.distributed import DistributedObject from direct.directnotify import DirectNotifyGlobal from direct.actor import Actor import ToonInteriorColors from toontown.hood import ZoneUtil class DistributedPetshopInterior(DistributedObject.DistributedObject): def __init__(self, cr): DistributedObject.DistributedObject.__init__(self, cr) self.dnaStore = cr.playGame.dnaStore def generate(self): DistributedObject.DistributedObject.generate(self) def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) self.setup() def randomDNAItem(self, category, findFunc): codeCount = self.dnaStore.getNumCatalogCodes(category) index = self.randomGenerator.randint(0, codeCount - 1) code = self.dnaStore.getCatalogCode(category, index) return findFunc(code) def replaceRandomInModel(self, model): baseTag = 'random_' npc = model.findAllMatches('**/' + baseTag + '???_*') for i in xrange(npc.getNumPaths()): np = npc.getPath(i) name = np.getName() b = len(baseTag) category = name[b + 4:] key1 = name[b] key2 = name[b + 1] if key1 == 'm': model = self.randomDNAItem(category, self.dnaStore.findNode) newNP = model.copyTo(np) if key2 == 'r': self.replaceRandomInModel(newNP) elif key1 == 't': texture = self.randomDNAItem(category, self.dnaStore.findTexture) np.setTexture(texture, 100) newNP = np if key2 == 'c': if category == 'TI_wallpaper' or category == 'TI_wallpaper_border': self.randomGenerator.seed(self.zoneId) newNP.setColorScale(self.randomGenerator.choice(self.colors[category])) else: newNP.setColorScale(self.randomGenerator.choice(self.colors[category])) def setZoneIdAndBlock(self, zoneId, block): self.zoneId = zoneId self.block = block def chooseDoor(self): doorModelName = 'door_double_round_ul' if doorModelName[-1:] == 'r': doorModelName = doorModelName[:-1] + 'l' else: doorModelName = doorModelName[:-1] + 'r' door = self.dnaStore.findNode(doorModelName) return door def setup(self): self.dnaStore = base.cr.playGame.dnaStore self.randomGenerator = random.Random() self.randomGenerator.seed(self.zoneId) self.interior = loader.loadModel('phase_4/models/modules/PetShopInterior') self.interior.reparentTo(render) self.fish = Actor.Actor('phase_4/models/props/interiorfish-zero', {'swim': 'phase_4/models/props/interiorfish-swim'}) self.fish.reparentTo(self.interior) self.fish.setColorScale(0.8, 0.9, 1, 0.8) self.fish.setScale(0.8) self.fish.setPos(0, 6, -4) self.fish.setPlayRate(0.7, 'swim') self.fish.loop('swim') hoodId = ZoneUtil.getCanonicalHoodId(self.zoneId) self.colors = ToonInteriorColors.colors[hoodId] self.replaceRandomInModel(self.interior) door = self.chooseDoor() doorOrigin = render.find('**/door_origin;+s') doorNP = door.copyTo(doorOrigin) doorOrigin.setScale(0.8, 0.8, 0.8) doorOrigin.setPos(doorOrigin, 0, -0.25, 0) doorColor = self.randomGenerator.choice(self.colors['TI_door']) DNADoor.setupDoor(doorNP, self.interior, doorOrigin, self.dnaStore, str(self.block), doorColor) doorFrame = doorNP.find('door_*_flat') doorFrame.wrtReparentTo(self.interior) doorFrame.setColor(doorColor) del self.colors del self.dnaStore del self.randomGenerator self.interior.flattenMedium() def disable(self): self.fish.stop() self.fish.cleanup() del self.fish self.interior.removeNode() del self.interior DistributedObject.DistributedObject.disable(self)
the-stack_0_5924
# -*- coding: utf-8 -*- """Chemical Engineering Design Library (ChEDL). Utilities for process modeling. Copyright (C) 2016, 2017, 2018, 2019, 2020 Caleb Bell <[email protected]> Copyright (C) 2020 Yoel Rene Cortes-Pena <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This module contains lookup functions enthalpies and standard entropies of formation. Lookup functions are availa for the liquid, solid, and gas states. A compound may be in more than one lookup function. For reporting bugs, adding feature requests, or submitting pull requests, please use the `GitHub issue tracker <https://github.com/CalebBell/chemicals/>`_. .. contents:: :local: Solid Heat of Formation ----------------------- .. autofunction:: chemicals.reaction.Hfs .. autofunction:: chemicals.reaction.Hfs_methods .. autodata:: chemicals.reaction.Hfs_all_methods Liquid Heat of Formation ------------------------ .. autofunction:: chemicals.reaction.Hfl .. autofunction:: chemicals.reaction.Hfl_methods .. autodata:: chemicals.reaction.Hfl_all_methods Gas Heat of Formation --------------------- .. autofunction:: chemicals.reaction.Hfg .. autofunction:: chemicals.reaction.Hfg_methods .. autodata:: chemicals.reaction.Hfg_all_methods Solid Absolute Entropy ---------------------- .. autofunction:: chemicals.reaction.S0s .. autofunction:: chemicals.reaction.S0s_methods .. autodata:: chemicals.reaction.S0s_all_methods Liquid Absolute Entropy ----------------------- .. autofunction:: chemicals.reaction.S0l .. autofunction:: chemicals.reaction.S0l_methods .. autodata:: chemicals.reaction.S0l_all_methods Gas Absolute Entropy -------------------- .. autofunction:: chemicals.reaction.S0g .. autofunction:: chemicals.reaction.S0g_methods .. autodata:: chemicals.reaction.S0g_all_methods Utility Functions ----------------- .. autofunction:: chemicals.reaction.Gibbs_formation .. autofunction:: chemicals.reaction.entropy_formation .. autofunction:: chemicals.reaction.Hf_basis_converter Chemical Reactions ------------------ .. autofunction:: chemicals.reaction.balance_stoichiometry .. autofunction:: chemicals.reaction.stoichiometric_matrix """ __all__ = ['Hfg', 'Hfl', 'Hfs', 'S0g', 'S0l', 'S0s', 'Hfl_methods', 'Hfg_methods', 'Hfs_methods', 'S0l_methods', 'S0g_methods', 'S0s_methods', 'Hfl_all_methods', 'Hfg_all_methods', 'Hfs_all_methods', 'S0l_all_methods', 'S0g_all_methods', 'S0s_all_methods', 'Gibbs_formation', 'entropy_formation', 'Hf_basis_converter', 'balance_stoichiometry', 'stoichiometric_matrix'] from chemicals.utils import ceil, log10, PY37, source_path, os_path_join, can_load_data from chemicals import heat_capacity from chemicals.data_reader import (register_df_source, data_source, retrieve_from_df_dict, retrieve_any_from_df_dict, list_available_methods_from_df_dict) # %% Register data sources and lazy load them CRC = 'CRC' YAWS = 'YAWS' API_TDB_G = 'API_TDB_G' ATCT_L = 'ATCT_L' ATCT_G = 'ATCT_G' TRC = 'TRC' folder = os_path_join(source_path, 'Reactions') register_df_source(folder, 'API TDB Albahri Hf (g).tsv') register_df_source(folder, 'ATcT 1.112 (g).tsv') register_df_source(folder, 'ATcT 1.112 (l).tsv') register_df_source(folder, 'Yaws Hf S0 (g).tsv') _reaction_data_loaded = False def _load_reaction_data(): global Hfg_API_TDB_data, Hfg_ATcT_data, Hfl_ATcT_data, Hfg_S0g_YAWS_data global Hfg_sources, Hfl_sources, Hfs_sources global S0g_sources, S0l_sources, S0s_sources global _reaction_data_loaded Hfg_API_TDB_data = data_source('API TDB Albahri Hf (g).tsv') Hfg_ATcT_data = data_source('ATcT 1.112 (g).tsv') Hfl_ATcT_data = data_source('ATcT 1.112 (l).tsv') Hfg_S0g_YAWS_data = data_source('Yaws Hf S0 (g).tsv') _reaction_data_loaded = True S0g_sources = { CRC: heat_capacity.CRC_standard_data, YAWS: Hfg_S0g_YAWS_data, } S0l_sources = { CRC: heat_capacity.CRC_standard_data, } S0s_sources = { CRC: heat_capacity.CRC_standard_data, } Hfg_sources = { ATCT_G: Hfg_ATcT_data, CRC: heat_capacity.CRC_standard_data, API_TDB_G: Hfg_API_TDB_data, TRC: heat_capacity.TRC_gas_data, YAWS: Hfg_S0g_YAWS_data, } Hfl_sources = { ATCT_L: Hfl_ATcT_data, CRC: heat_capacity.CRC_standard_data, } Hfs_sources = { CRC: heat_capacity.CRC_standard_data, } if PY37: def __getattr__(name): if name in ('Hfg_API_TDB_data', 'Hfg_ATcT_data', 'Hfl_ATcT_data', 'Hfg_S0g_YAWS_data', 'Hfg_sources', 'Hfl_sources', 'Hfs_sources', 'S0g_sources', 'S0l_sources', 'S0s_sources'): _load_reaction_data() return globals()[name] raise AttributeError("module %s has no attribute %s" %(__name__, name)) else: if can_load_data: _load_reaction_data() # %% Lookup functions # TODO: more data from https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3692305/ # has dippr standard heats of formation, about 55% of the database Hfs_all_methods = (CRC,) '''Tuple of method name keys. See the `Hfs` for the actual references''' def Hfs_methods(CASRN): """Return all methods available to obtain the Hfs for the desired chemical. Parameters ---------- CASRN : str CASRN, [-] Returns ------- methods : list[str] Methods which can be used to obtain the Hfs with the given inputs. See Also -------- Hfs """ if not _reaction_data_loaded: _load_reaction_data() return list_available_methods_from_df_dict(Hfs_sources, CASRN, 'Hfs') def Hfs(CASRN, method=None): r'''This function handles the retrieval of a chemical's solid/crystaline standard phase heat of formation. The lookup is based on CASRNs. Will automatically select a data source to use if no method is provided; returns None if the data is not available. Parameters ---------- CASRN : str CASRN [-] Returns ------- Hfs : float Solid standard-state heat of formation, [J/mol] Other Parameters ---------------- method : string, optional A string for the method name to use, as defined by constants in Hfs_methods Notes ----- Sources are: * 'CRC', from the CRC handbook (1360 values) Examples -------- >>> Hfs('101-81-5') # Diphenylmethane 71500.0 See Also -------- Hfs_methods References ---------- .. [1] Ruscic, Branko, Reinhardt E. Pinzon, Gregor von Laszewski, Deepti Kodeboyina, Alexander Burcat, David Leahy, David Montoy, and Albert F. Wagner. "Active Thermochemical Tables: Thermochemistry for the 21st Century." Journal of Physics: Conference Series 16, no. 1 (January 1, 2005): 561. doi:10.1088/1742-6596/16/1/078. ''' if not _reaction_data_loaded: _load_reaction_data() if method: return retrieve_from_df_dict(Hfs_sources, CASRN, 'Hfs', method) else: return retrieve_any_from_df_dict(Hfs_sources, CASRN, 'Hfs') Hfl_all_methods = (ATCT_L, CRC) '''Tuple of method name keys. See the `Hfl` for the actual references''' def Hfl_methods(CASRN): """Return all methods available to obtain the Hfl for the desired chemical. Parameters ---------- CASRN : str CASRN, [-] Returns ------- methods : list[str] Methods which can be used to obtain the Hfl with the given inputs. See Also -------- Hfl """ if not _reaction_data_loaded: _load_reaction_data() return list_available_methods_from_df_dict(Hfl_sources, CASRN, 'Hfl') def Hfl(CASRN, method=None): r'''This function handles the retrieval of a chemical's liquid standard phase heat of formation. The lookup is based on CASRNs. Will automatically select a data source to use if no method is provided; returns None if the data is not available. Parameters ---------- CASRN : str CASRN [-] Returns ------- Hfl : float Liquid standard-state heat of formation, [J/mol] Other Parameters ---------------- method : string, optional A string for the method name to use, as defined in the variable, `Hfl_all_methods`. Notes ----- Sources are: * 'ATCT_L', the Active Thermochemical Tables version 1.112. * 'CRC', from the CRC handbook (1360 values) Examples -------- >>> Hfl('67-56-1') -238400.0 See Also -------- Hfl_methods References ---------- .. [1] Ruscic, Branko, Reinhardt E. Pinzon, Gregor von Laszewski, Deepti Kodeboyina, Alexander Burcat, David Leahy, David Montoy, and Albert F. Wagner. "Active Thermochemical Tables: Thermochemistry for the 21st Century." Journal of Physics: Conference Series 16, no. 1 (January 1, 2005): 561. doi:10.1088/1742-6596/16/1/078. .. [2] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014. ''' if not _reaction_data_loaded: _load_reaction_data() if method: return retrieve_from_df_dict(Hfl_sources, CASRN, 'Hfl', method) else: return retrieve_any_from_df_dict(Hfl_sources, CASRN, 'Hfl') Hfg_all_methods = (ATCT_G, TRC, CRC, YAWS) '''Tuple of method name keys. See the `Hfg` for the actual references''' def Hfg_methods(CASRN): """Return all methods available to obtain the Hfg for the desired chemical. Parameters ---------- CASRN : str CASRN, [-] Returns ------- methods : list[str] Methods which can be used to obtain the Hfg with the given inputs. See Also -------- Hfg """ if not _reaction_data_loaded: _load_reaction_data() return list_available_methods_from_df_dict(Hfg_sources, CASRN, 'Hfg') def Hfg(CASRN, method=None): r'''This function handles the retrieval of a chemical's gas heat of formation. Lookup is based on CASRNs. Will automatically select a data source to use if no method is provided; returns None if the data is not available. Parameters ---------- CASRN : str CASRN [-] Returns ------- Hfg : float Ideal gas phase heat of formation, [J/mol] Other Parameters ---------------- method : string, optional A string for the method name to use, as defined by constants in Hfg_methods Notes ----- Function has data for approximately 8700 chemicals. Sources are: * 'ATCT_G', the Active Thermochemical Tables version 1.112 (600 values) * 'TRC', from a 1994 compilation (1750 values) * 'CRC', from the CRC handbook (1360 values) * 'YAWS', a large compillation of values, mostly estimated (5000 values) 'TRC' data may have come from computational procedures, for example petane is off by 30%. Examples -------- >>> Hfg('67-56-1') -200700.0 >>> Hfg('67-56-1', method='YAWS') -200900.0 >>> Hfg('67-56-1', method='CRC') -201000.0 >>> Hfg('67-56-1', method='TRC') -190100.0 See Also -------- Hfg_methods References ---------- .. [1] Ruscic, Branko, Reinhardt E. Pinzon, Gregor von Laszewski, Deepti Kodeboyina, Alexander Burcat, David Leahy, David Montoy, and Albert F. Wagner. "Active Thermochemical Tables: Thermochemistry for the 21st Century." Journal of Physics: Conference Series 16, no. 1 (January 1, 2005): 561. doi:10.1088/1742-6596/16/1/078. .. [2] Frenkelʹ, M. L, Texas Engineering Experiment Station, and Thermodynamics Research Center. Thermodynamics of Organic Compounds in the Gas State. College Station, Tex.: Thermodynamics Research Center, 1994. .. [3] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014. .. [4] Yaws, Carl L. Thermophysical Properties of Chemicals and Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional Publishing, 2014. ''' if not _reaction_data_loaded: _load_reaction_data() if method: return retrieve_from_df_dict(Hfg_sources, CASRN, 'Hfg', method) else: return retrieve_any_from_df_dict(Hfg_sources, CASRN, 'Hfg') S0s_all_methods = (CRC,) '''Tuple of method name keys. See the `S0s` for the actual references''' def S0s_methods(CASRN): """Return all methods available to obtain the S0s for the desired chemical. Parameters ---------- CASRN : str CASRN, [-] Returns ------- methods : list[str] Methods which can be used to obtain the S0s with the given inputs. See Also -------- S0s """ if not _reaction_data_loaded: _load_reaction_data() return list_available_methods_from_df_dict(S0s_sources, CASRN, 'S0s') def S0s(CASRN, method=None): r'''This function handles the retrieval of a chemical's absolute entropy at a reference temperature of 298.15 K and pressure of 1 bar, in the solid state. Lookup is based on CASRNs. Will automatically select a data source to use if no method is provided; returns None if the data is not available. Parameters ---------- CASRN : str CASRN [-] Returns ------- S0s : float Ideal gas standard absolute entropy of compound, [J/mol/K] Other Parameters ---------------- method : string, optional A string for the method name to use, as defined by constants in `S0s_all_methods`. Notes ----- Sources are: * 'CRC', from the CRC handbook (1360 values) Examples -------- >>> S0s('7439-93-2') # Lithium 29.1 See Also -------- S0s_methods ''' if not _reaction_data_loaded: _load_reaction_data() if method: return retrieve_from_df_dict(S0s_sources, CASRN, 'S0s', method) else: return retrieve_any_from_df_dict(S0s_sources, CASRN, 'S0s') S0l_all_methods = (CRC,) '''Tuple of method name keys. See the `S0l` for the actual references''' def S0l_methods(CASRN): """Return all methods available to obtain the S0l for the desired chemical. Parameters ---------- CASRN : str CASRN, [-] Returns ------- methods : list[str] Methods which can be used to obtain the S0l with the given inputs. See Also -------- S0l """ if not _reaction_data_loaded: _load_reaction_data() return list_available_methods_from_df_dict(S0l_sources, CASRN, 'S0l') def S0l(CASRN, method=None): r'''This function handles the retrieval of a chemical's absolute entropy at a reference temperature of 298.15 K and pressure of 1 bar, in the liquid state. Lookup is based on CASRNs. Will automatically select a data source to use if no method is provided; returns None if the data is not available. Parameters ---------- CASRN : str CASRN [-] Returns ------- S0l : float Ideal gas standard absolute entropy of compound, [J/mol/K] Other Parameters ---------------- method : string, optional A string for the method name to use, as defined in the variable, `S0l_all_methods`. Notes ----- Sources are: * 'CRC', from the CRC handbook Examples -------- >>> S0l('7439-97-6') # Mercury 75.9 See Also -------- S0l_methods References ---------- .. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014. ''' if not _reaction_data_loaded: _load_reaction_data() if method: return retrieve_from_df_dict(S0l_sources, CASRN, 'S0l', method) else: return retrieve_any_from_df_dict(S0l_sources, CASRN, 'S0l') S0g_all_methods = (CRC, YAWS) '''Tuple of method name keys. See the `S0g` for the actual references''' def S0g_methods(CASRN): """Return all methods available to obtain the S0g for the desired chemical. Parameters ---------- CASRN : str CASRN, [-] Returns ------- methods : list[str] Methods which can be used to obtain the S0g with the given inputs. See Also -------- S0g """ if not _reaction_data_loaded: _load_reaction_data() return list_available_methods_from_df_dict(S0g_sources, CASRN, 'S0g') def S0g(CASRN, method=None): r'''This function handles the retrieval of a chemical's absolute entropy at a reference temperature of 298.15 K and pressure of 1 bar, in the ideal gas state. Lookup is based on CASRNs. Will automatically select a data source to use if no method is provided; returns None if the data is not available. Parameters ---------- CASRN : str CASRN [-] Returns ------- S0g : float Ideal gas standard absolute entropy of compound, [J/mol/K] Other Parameters ---------------- method : string, optional A string for the method name to use, as defined in the variable, `S0g_all_methods` Notes ----- Function has data for approximately 5400 chemicals. Sources are: * 'CRC', from the CRC handbook (520 values) * 'YAWS', a large compillation of values, mostly estimated (4890 values) Examples -------- >>> S0g('67-56-1') 239.9 >>> S0g('67-56-1', method='YAWS') 239.88 See Also -------- S0g_methods References ---------- .. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014. .. [2] Yaws, Carl L. Thermophysical Properties of Chemicals and Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional Publishing, 2014. ''' if not _reaction_data_loaded: _load_reaction_data() if method: return retrieve_from_df_dict(S0g_sources, CASRN, 'S0g', method) else: return retrieve_any_from_df_dict(S0g_sources, CASRN, 'S0g') # %% Converter functions def Hf_basis_converter(Hvapm, Hf_liq=None, Hf_gas=None): r'''This function converts a liquid or gas enthalpy of formation to the other. This is useful, as thermodynamic packages often work with ideal- gas as the reference state and require ideal-gas enthalpies of formation. Parameters ---------- Hvapm : float Molar enthalpy of vaporization of compound at 298.15 K or (unlikely) the reference temperature, [J/mol] Hf_liq : float, optional Enthalpy of formation of the compound in its liquid state, [J/mol] Hf_gas : float, optional Enthalpy of formation of the compound in its ideal-gas state, [J/mol] Returns ------- Hf_calc : float, optional Enthalpy of formation of the compound in the other state to the one provided, [J/mol] Notes ----- Examples -------- Calculate the ideal-gas enthalpy of formation for water, from its standard- state (liquid) value: >>> Hf_basis_converter(44018, Hf_liq=-285830) -241812 Calculate the standard-state (liquid) enthalpy of formation for water, from its ideal-gas value: >>> Hf_basis_converter(44018, Hf_gas=-241812) -285830 ''' if Hf_liq is None and Hf_gas is None: raise ValueError("Provide either a liquid or a gas enthalpy of formation") if Hvapm is None or Hvapm < 0.0: raise ValueError("Enthalpy of formation unknown or zero") if Hf_liq is None: return Hf_gas - Hvapm else: return Hf_liq + Hvapm def Gibbs_formation(dHf, S0_abs, dHfs_std, S0_abs_elements, coeffs_elements, T_ref=298.15): r'''This function calculates the Gibbs free energy of formation of a compound, from its constituent elements. The calculated value will be for a "standard-state" value if `dHf` and `S0_abs` are provided in the standard state; or it will be in an "ideal gas" basis if they are both for an ideal gas. For compounds which are gases at STP, the two values are the same. Parameters ---------- dHf : float Molar enthalpy of formation of the created compound, [J/mol] S0_abs : float Absolute molar entropy of the created compound at the reference temperature, [J/mol/K] dHfs_std : list[float] List of standard molar enthalpies of formation of all elements used in the formation of the created compound, [J/mol] S0_abs_elements : list[float] List of standard absolute molar entropies at the reference temperature of all elements used in the formation of the created compound, [J/mol/K] coeffs_elements : list[float] List of coefficients for each compound (i.e. 1 for C, 2 for H2 if the target is methane), in the same order as `dHfs_std` and `S0_abs_elements`, [-] T_ref : float, optional The standard state temperature, default 298.15 K; few values are tabulated at other temperatures, [-] Returns ------- dGf : float Gibbs free energy of formation for the created compound, [J/mol] Notes ----- Be careful for elements like Bromine - is the tabulated value for Br2 or Br? Examples -------- Calculate the standard-state Gibbs free energy of formation for water, using water's standard state heat of formation and absolute entropy at 298.15 K: >>> Gibbs_formation(-285830, 69.91, [0, 0], [130.571, 205.147], [1, .5]) -237161.633825 Calculate the ideal-gas state Gibbs free energy of formation for water, using water's ideal-gas state heat of formation and absolute entropy at 298.15 K as a gas: >>> Gibbs_formation(-241818, 188.825, [0, 0], [130.571, 205.147], [1, .5]) -228604.141075 Calculate the Gibbs free energy of formation for CBrF3 (it is a gas at STP, so its standard-state and ideal-gas state values are the same) at 298.15 K: >>> Gibbs_formation(-648980, 297.713, [0, 0, 0], [5.74, 152.206, 202.789], [1, .5, 1.5]) -622649.329975 Note in the above calculation that the Bromine's `S0` and `Hf` are for Br2; and that the value for Bromine as a liquid, which is its standard state, is used. References ---------- .. [1] "Standard Gibbs Free Energy of Formation Calculations Chemistry Tutorial." Accessed March, 2019. https://www.ausetute.com.au/gibbsform.html. ''' N = len(coeffs_elements) dH = dHf dS = S0_abs for i in range(N): dH -= dHfs_std[i]*coeffs_elements[i] dS -= S0_abs_elements[i]*coeffs_elements[i] return dH - T_ref*dS def entropy_formation(Hf, Gf, T_ref=298.15): r'''This function calculates the entropy of formation of a compound, from its constituent elements. The calculated value will be for a "standard-state" value if `Hf` and `Gf` are provided in the standard state; or it will be in an "ideal gas" basis if they are both for an ideal gas. For compounds which are gases at STP, the two values are the same. Parameters ---------- Hf : float Molar enthalpy of formation of the compound, [J/mol] Gf : float Molar Gibbs free energy of formation of the compound, [J/mol] T_ref : float, optional The standard state temperature, default 298.15 K; few values are tabulated at other temperatures, [-] Returns ------- S0 : float Entropy of formation of the compound, [J/mol/K] Notes ----- Examples -------- Entropy of formation of methane: >>> entropy_formation(Hf=-74520, Gf=-50490) -80.59701492537314 Entropy of formation of water in ideal gas state: >>> entropy_formation(Hf=-241818, Gf=-228572) -44.427301693778304 ''' return (Hf - Gf)/T_ref # %% Stoichiometry functions def stoichiometric_matrix(atomss, reactants): r'''This function calculates a stoichiometric matrix of reactants and stoichiometric matrix, as required by a solver to compute the reation coefficients. Parameters ---------- atomss : list[dict[(str, float)]] A list of dictionaties of (element, element_count) pairs for each chemical, [-] reactants : list[bool] List of booleans indicating whether each chemical is a reactant (True) or a product (False), [-] Returns ------- matrix : list[list[float]] Chemical reaction matrix for further processing; rows contain element counts of each compound, and the columns represent each chemical, [-] Notes ----- The rows of the matrix contain the element counts of each compound, and the columns represent each chemical. Examples -------- MgO2 -> Mg + 1/2 O2 (k=1) >>> stoichiometric_matrix([{'Mg': 1, 'O': 1}, {'Mg': 1}, {'O': 2}], [True, False, False]) [[1, -1, 0], [1, 0, -2]] Cl2 + propylene -> allyl chloride + HCl >>> stoichiometric_matrix([{'Cl': 2}, {'C': 3, 'H': 6}, {'C': 3, 'Cl': 1, 'H': 5}, {'Cl': 1, 'H': 1}], [True, True, False, False, False]) [[0, 3, -3, 0], [2, 0, -1, -1], [0, 6, -5, -1]] Al + 4HNO3 -> Al(NO3)3 + NO + 2H2O (k=1) >>> stoichiometric_matrix([{'Al': 1}, {'H': 1, 'N': 1, 'O': 3}, {'Al': 1, 'N': 3, 'O': 9}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False, False]) [[1, 0, -1, 0, 0], [0, 1, 0, 0, -2], [0, 1, -3, -1, 0], [0, 3, -9, -1, -1]] 4Fe + 3O2 -> 2(Fe2O3) (k=2) >>> stoichiometric_matrix([{'Fe': 1}, {'O': 2}, {'Fe':2, 'O': 3}], [True, True, False]) [[1, 0, -2], [0, 2, -3]] 4NH3 + 5O2 -> 4NO + 6(H2O) (k=4) >>> stoichiometric_matrix([{'N': 1, 'H': 3}, {'O': 2}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False]) [[3, 0, 0, -2], [1, 0, -1, 0], [0, 2, -1, -1]] No unique solution: C2H5NO2 + C3H7NO3 + 2C6H14N4O2 + 3C5H9NO2 + 2C9H11NO2 -> 8H2O + C50H73N15O11 >>> stoichiometric_matrix([{'C': 2, 'H': 5, 'N': 1, 'O': 2}, {'C': 3, 'H': 7, 'N': 1, 'O': 3}, {'C': 6, 'H': 14, 'N': 4, 'O': 2}, {'C': 5, 'H': 9, 'N': 1, 'O': 2}, {'C': 9, 'H': 11, 'N': 1, 'O': 2}, {'H': 2, 'O': 1}, {'C': 50, 'H': 73, 'N': 15, 'O': 11}], [True, True, True, True, True, False, False]) [[2, 3, 6, 5, 9, 0, -50], [5, 7, 14, 9, 11, -2, -73], [1, 1, 4, 1, 1, 0, -15], [2, 3, 2, 2, 2, -1, -11]] References ---------- .. [1] Sen, S. K., Hans Agarwal, and Sagar Sen. "Chemical Equation Balancing: An Integer Programming Approach." Mathematical and Computer Modelling 44, no. 7 (October 1, 2006): 678-91. https://doi.org/10.1016/j.mcm.2006.02.004. .. [2] URAVNOTE, NOVOODKRITI PARADOKSI V. TEORIJI, and ENJA KEMIJSKIH REAKCIJ. "New Discovered Paradoxes in Theory of Balancing Chemical Reactions." Materiali in Tehnologije 45, no. 6 (2011): 503-22. ''' n_compounds = len(atomss) elements = set() for atoms in atomss: elements.update(atoms.keys()) elements = sorted(list(elements)) # Ensure reproducibility n_elements = len(elements) matrix = [[0]*n_compounds for _ in range(n_elements)] for i, atoms in enumerate(atomss): for k, v in atoms.items(): if not reactants[i]: v = -v matrix[elements.index(k)][i] = v return matrix def balance_stoichiometry(matrix, rounding=9, allow_fractional=False): r'''This function balances a chemical reaction. Parameters ---------- matrix : list[list[float]] Chemical reaction matrix for further processing; rows contain element counts of each compound, and the columns represent each chemical, [-] Returns ------- coefficients : list[float] Balanced coefficients; all numbers are positive, [-] Notes ----- Balance the reaction 4 NH3 + 5 O2 = 4 NO + 6 H2O, without knowing the coefficients: >>> matrix = stoichiometric_matrix([{'N': 1, 'H': 3}, {'O': 2}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False]) >>> matrix [[3, 0, 0, -2], [1, 0, -1, 0], [0, 2, -1, -1]] >>> balance_stoichiometry(matrix) [4.0, 5.0, 4.0, 6.0] >>> balance_stoichiometry(matrix, allow_fractional=True) [1.0, 1.25, 1.0, 1.5] This algorithm relies on `scipy`. The behavior of this function for inputs which do not have a unique solution is undefined. This algorithm may suffer from floating point issues. If you believe there is an error in the result, please report your reaction to the developers. References ---------- .. [1] Sen, S. K., Hans Agarwal, and Sagar Sen. "Chemical Equation Balancing: An Integer Programming Approach." Mathematical and Computer Modelling 44, no. 7 (October 1, 2006): 678-91. https://doi.org/10.1016/j.mcm.2006.02.004. .. [2] URAVNOTE, NOVOODKRITI PARADOKSI V. TEORIJI, and ENJA KEMIJSKIH REAKCIJ. "New Discovered Paradoxes in Theory of Balancing Chemical Reactions." Materiali in Tehnologije 45, no. 6 (2011): 503-22. ''' import scipy.linalg done = scipy.linalg.null_space(matrix) if len(done[0]) > 1: raise ValueError("No solution") d = done[:, 0].tolist() min_value_inv = 1.0/min(d) d = [i*min_value_inv for i in d] if not allow_fractional: from fractions import Fraction max_denominator = 10**rounding fs = [Fraction(x).limit_denominator(max_denominator=max_denominator) for x in d] all_denominators = set([i.denominator for i in fs]) if 1 in all_denominators: all_denominators.remove(1) for den in sorted(list(all_denominators), reverse=True): fs = [num*den for num in fs] if all(i.denominator == 1 for i in fs): break # May have gone too far return [float(i) for i in fs] # done = False # for i in range(100): # for c in d: # ratio = c.as_integer_ratio()[1] # if ratio != 1: # d = [di*ratio for di in d] # break # done = True # if done: # break # # d_as_int = [int(i) for i in d] # for i, j in zip(d, d_as_int): # if i != j: # raise ValueError("Could not find integer coefficients (%s, %s)" %(i, j)) # return d_as_int else: d = [round(i, rounding + int(ceil(log10(abs(i))))) for i in d] return d
the-stack_0_5926
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Ai the coins developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the RPC HTTP basics.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import http.client import urllib.parse class HTTPBasicsTest (BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 3 self.setup_clean_chain = False def setup_network(self): self.setup_nodes() def run_test(self): ################################################# # lowlevel check for http persistent connection # ################################################# url = urllib.parse.urlparse(self.nodes[0].url) authpair = url.username + ':' + url.password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) assert(conn.sock!=None) #according to http/1.1 connection must still be open! #send 2nd request without closing connection conn.request('POST', '/', '{"method": "getchaintips"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) #must also response with a correct json-rpc message assert(conn.sock!=None) #according to http/1.1 connection must still be open! conn.close() #same should be if we add keep-alive because this should be the std. behaviour headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) assert(conn.sock!=None) #according to http/1.1 connection must still be open! #send 2nd request without closing connection conn.request('POST', '/', '{"method": "getchaintips"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) #must also response with a correct json-rpc message assert(conn.sock!=None) #according to http/1.1 connection must still be open! conn.close() #now do the same with "Connection: close" headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) assert(conn.sock==None) #now the connection must be closed after the response #node1 (2nd node) is running with disabled keep-alive option urlNode1 = urllib.parse.urlparse(self.nodes[1].url) authpair = urlNode1.username + ':' + urlNode1.password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) #node2 (third node) is running with standard keep-alive parameters which means keep-alive is on urlNode2 = urllib.parse.urlparse(self.nodes[2].url) authpair = urlNode2.username + ':' + urlNode2.password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) assert(conn.sock!=None) #connection must be closed because bitcoind should use keep-alive by default # Check excessive request size conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() conn.request('GET', '/' + ('x'*1000), '', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.NOT_FOUND) conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() conn.request('GET', '/' + ('x'*10000), '', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.BAD_REQUEST) if __name__ == '__main__': HTTPBasicsTest ().main ()
the-stack_0_5927
"""SciUnit tests live in this module.""" import inspect import traceback from sciunit import settings from sciunit.base import SciUnit from .capabilities import ProducesNumber from .models import Model from .scores import Score, BooleanScore, NoneScore, ErrorScore, TBDScore,\ NAScore from .validators import ObservationValidator, ParametersValidator from .errors import Error, CapabilityError, ObservationError,\ InvalidScoreError, ParametersError class Test(SciUnit): """Abstract base class for tests.""" def __init__(self, observation, name=None, **params): """ Args: observation (dict): A dictionary of observed values to parameterize the test. name (str, optional): Name of the test instance. """ self.name = name if name else self.__class__.__name__ assert isinstance(self.name, str), "Test name must be a string" if self.description is None: self.description = self.__class__.__doc__ self.params = params if params else {} self.verbose = self.params.pop('verbose', 1) #self.params.update(params) self.validate_params(self.params) self.observation = observation if settings['PREVALIDATE']: self.validate_observation(self.observation) if self.score_type is None or not issubclass(self.score_type, Score): raise Error(("The score type '%s' specified for Test '%s' " "is not valid.") % (self.score_type, self.name)) super(Test, self).__init__() name = None """The name of the test. Defaults to the test class name.""" description = None """A description of the test. Defaults to the docstring for the class.""" observation = None """The empirical observation that the test is using.""" params = None """A dictionary containing the parameters to the test.""" score_type = BooleanScore """A score type for this test's `judge` method to return.""" converter = None """A conversion to be done on the score after it is computed.""" observation_schema = None """A schema that the observation must adhere to (validated by cerberus). Can also be a list of schemas, one of which the observation must match. If it is a list, each schema in the list can optionally be named by putting (name, schema) tuples in that list.""" params_schema = None """A schema that the params must adhere to (validated by cerberus). Can also be a list of schemas, one of which the params must match.""" def validate_observation(self, observation): """Validate the observation provided to the constructor. Raises an ObservationError if invalid. """ if not observation: raise ObservationError("Observation is missing.") if not isinstance(observation, dict): raise ObservationError("Observation is not a dictionary.") if "mean" in observation and observation["mean"] is None: raise ObservationError("Observation mean cannot be 'None'.") if self.observation_schema: if isinstance(self.observation_schema, list): schemas = [x[1] if isinstance(x, tuple) else x for x in self.observation_schema] schema = {'oneof_schema': schemas, 'type': 'dict'} else: schema = {'schema': self.observation_schema, 'type': 'dict'} schema = {'observation': schema} v = ObservationValidator(schema, test=self) if not v.validate({'observation': observation}): raise ObservationError(v.errors) return observation @classmethod def observation_schema_names(cls): """Return a list of names of observation schema, if they are set.""" names = [] if cls.observation_schema: if isinstance(cls.observation_schema, list): names = [x[0] if isinstance(x, tuple) else 'Schema %d' % (i+1) for i, x in enumerate(cls.observation_schema)] return names def validate_params(self, params): """Validate the params provided to the constructor. Raises an ParametersError if invalid. """ if params is None: raise ParametersError("Parameters cannot be `None`.") if not isinstance(params, dict): raise ParametersError("Parameters are not a dictionary.") if self.params_schema: if isinstance(self.params_schema, list): schema = {'oneof_schema': self.params_schema, 'type': 'dict'} else: schema = {'schema': self.params_schema, 'type': 'dict'} schema = {'params': schema} v = ParametersValidator(schema, test=self) if not v.validate({'params': params}): raise ParametersError(v.errors) return params required_capabilities = () """A sequence of capabilities that a model must have in order for the test to be run. Defaults to empty.""" def check_capabilities(self, model, skip_incapable=False, require_extra=False): """Check that test's required capabilities are implemented by `model`. Raises an Error if model is not a Model. Raises a CapabilityError if model does not have a capability. """ if not isinstance(model, Model): raise Error("Model %s is not a sciunit.Model." % str(model)) capable = all([self.check_capability(model, c, skip_incapable, require_extra) for c in self.required_capabilities]) return capable def check_capability(self, model, c, skip_incapable=False, require_extra=False): """Check if `model` has capability `c`. Optionally (default:True) raise a `CapabilityError` if it does not. """ capable = c.check(model, require_extra=require_extra) if not capable and not skip_incapable: raise CapabilityError(model, c) return capable def condition_model(self, model): """Update the model in any way needed before generating the prediction. This could include updating parameters such as simulation durations that do not define the model but do define experiments performed on the model. No default implementation. """ pass def generate_prediction(self, model): """Generate a prediction from a model using the required capabilities. No default implementation. """ raise NotImplementedError(("Test %s does not implement " "generate_prediction.") % str()) def check_prediction(self, prediction): """Check the prediction for acceptable values. No default implementation. """ pass def compute_score(self, observation, prediction): """Generates a score given the observations provided in the constructor and the prediction generated by generate_prediction. Must generate a score of score_type. No default implementation. """ if not hasattr(self, 'score_type') or \ not hasattr(self.score_type, 'compute'): raise NotImplementedError(("Test %s either implements no " "compute_score method or provides no " "score_type with a compute method.") % self.name) # After some processing of the observation and the prediction. score = self.score_type.compute(observation, prediction) return score def _bind_score(self, score, model, observation, prediction): """Bind some useful attributes to the score.""" score.model = model score.test = self score.prediction = prediction score.observation = observation # Don't let scores share related_data. score.related_data = score.related_data.copy() self.bind_score(score, model, observation, prediction) def bind_score(self, score, model, observation, prediction): """For the user to bind additional features to the score.""" pass def check_score_type(self, score): """Check that the score is the correct type for this test.""" if not isinstance(score, (self.score_type, NoneScore, ErrorScore)): msg = (("Score for test '%s' is not of correct type. " "The test requires type %s but %s was provided.") % (self.name, self.score_type.__name__, score.__class__.__name__)) raise InvalidScoreError(msg) def _judge(self, model, skip_incapable=True): """Generate a score for the model (internal API use only).""" # 1. self.check_capabilities(model, skip_incapable=skip_incapable) # 2. prediction = self.generate_prediction(model) self.check_prediction(prediction) self.last_model = model # 3. Validate observation and compute score validated = self.validate_observation(self.observation) if validated is not None: self.observation = validated score = self.compute_score(self.observation, prediction) if self.converter: score = self.converter.convert(score) # 4. self.check_score_type(score) # 5. self._bind_score(score, model, self.observation, prediction) return score def judge(self, model, skip_incapable=False, stop_on_error=True, deep_error=False): """Generate a score for the provided model (public method). Operates as follows: 1. Checks if the model has all the required capabilities. If it does not, and skip_incapable=False, then a `CapabilityError` is raised. 2. Calls generate_prediction to generate a prediction. 3. Calls score_prediction to generate a score. 4. Checks that the score is of score_type, raising an InvalidScoreError. 5. Equips the score with metadata: a) A reference to the model, in attribute model. b) A reference to the test, in attribute test. c) A reference to the prediction, in attribute prediction. d) A reference to the observation, in attribute observation. 6. Returns the score. If stop_on_error is true (default), exceptions propagate upward. If false, an ErrorScore is generated containing the exception. If deep_error is true (not default), the traceback will contain the actual code execution error, instead of the content of an ErrorScore. """ if isinstance(model, (list, tuple, set)): # If a collection of models is provided from .suites import TestSuite suite = TestSuite([self], name=self.name) # then test them using a one-test suite. return suite.judge(model, skip_incapable=skip_incapable, stop_on_error=stop_on_error, deep_error=deep_error) if deep_error: score = self._judge(model, skip_incapable=skip_incapable) else: try: score = self._judge(model, skip_incapable=skip_incapable) except CapabilityError as e: score = NAScore(str(e)) score.model = model score.test = self except Exception as e: e.stack = traceback.format_exc() score = ErrorScore(e) score.model = model score.test = self if isinstance(score, ErrorScore) and stop_on_error: raise score.score # An exception. return score def check(self, model, skip_incapable=True, stop_on_error=True, require_extra=False): """Check to see if the test can run this model. Like judge, but without actually running the test. Just returns a Score indicating whether the model can take the test or not. """ try: if self.check_capabilities(model, skip_incapable=skip_incapable, require_extra=require_extra): score = TBDScore(None) else: score = NAScore(None) except Exception as e: score = ErrorScore(e) if stop_on_error: raise e return score def optimize(self, model): """Optimize the parameters of the model to get the best score.""" raise NotImplementedError(("Optimization not implemented " "for Test '%s'" % self)) def describe(self): """Describe the test in words.""" result = "No description available" if self.description: result = "%s" % self.description else: if self.__doc__: s = [] s += [self.__doc__.strip().replace('\n', ''). replace(' ', '')] if self.converter: s += [self.converter.description] result = '\n'.join(s) return result @property def state(self): """Get the frozen (pickled) model state.""" return self._state(exclude=['last_model']) @classmethod def is_test_class(cls, other_cls): """Return whether `other_cls` is a subclass of this test class.""" return inspect.isclass(other_cls) and issubclass(other_cls, cls) def __str__(self): """Return the string representation of the test's name.""" return '%s' % self.name class TestM2M(Test): """Abstract class for handling tests involving multiple models. Enables comparison of model to model predictions, and also against experimental reference data (optional). Note: 'TestM2M' would typically be used when handling mutliple (>2) models, with/without experimental reference data. For single model tests, you can use the 'Test' class. """ def __init__(self, observation=None, name=None, **params): super(TestM2M, self).__init__(observation, name=name, **params) def validate_observation(self, observation): """Validate the observation provided to the constructor. Note: TestM2M does not compulsorily require an observation (i.e. None allowed). """ pass def compute_score(self, prediction1, prediction2): """Generate a score given the observations provided in the constructor and/or the prediction(s) generated by generate_prediction. Must generate a score of score_type. No default implementation. """ try: # After some processing of the observation and/or the prediction(s) score = self.score_type.compute(prediction1, prediction2) return score except Exception: raise NotImplementedError(("Test %s either implements no " "compute_score method or provides no " "score_type with a compute method.") % self.name) def _bind_score(self, score, prediction1, prediction2, model1, model2): """Bind some useful attributes to the score.""" score.model1 = model1 score.model2 = model2 score.test = self score.prediction1 = prediction1 score.prediction2 = prediction2 # Don't let scores share related_data. score.related_data = score.related_data.copy() self.bind_score(score, prediction1, prediction2, model1, model2) def bind_score(self, score, prediction1, prediction2, model1, model2): """For the user to bind additional features to the score.""" pass def _judge(self, prediction1, prediction2, model1, model2=None): # TODO: Not sure if below statement is required # self.last_model = model # 6. score = self.compute_score(prediction1, prediction2) if self.converter: score = self.converter.convert(score) # 7. if not isinstance(score, (self.score_type, NoneScore, ErrorScore)): raise InvalidScoreError(("Score for test '%s' is not of correct " "type. The test requires type %s but %s " "was provided.") % (self.name, self.score_type.__name__, score.__class__.__name__)) # 8. self._bind_score(score, prediction1, prediction2, model1, model2) return score def judge(self, models, skip_incapable=False, stop_on_error=True, deep_error=False): """Generate a score matrix for the provided model(s). Operates as follows: 1. Check if models have been specified as a list/tuple/set. If not, raise exception. 2. Create a list of predictions. If a test observation is provided, add it to predictions. 3. Checks if all models have all the required capabilities. If a model does not, then a CapabilityError is raised. 4. Calls generate_prediction to generate predictions for each model, and these are appeneded to the predictions list. 5. Generate a 2D list as a placeholder for all the scores. 6. Calls score_prediction to generate scores for each comparison. 7. Checks that the score is of score_type, raising an InvalidScoreError. 8. Equips the score with metadata: a) Reference(s) to the model(s), in attribute model1 (and model2). b) A reference to the test, in attribute test. c) A reference to the predictions, in attributes prediction1 and prediction2. 9. Returns the score as a Pandas DataFrame. If stop_on_error is true (default), exceptions propagate upward. If false, an ErrorScore is generated containing the exception. If deep_error is true (not default), the traceback will contain the actual code execution error, instead of the content of an ErrorScore. """ # 1. if not isinstance(models, (list, tuple, set)): raise TypeError(("Models must be specified as a list, tuple or " "set. For single model tests, use 'Test' class.")) else: models = list(models) # 2. predictions = [] # If observation exists, store it as first element in predictions[] if self.observation: predictions.append(self.observation) for model in models: if not isinstance(model, Model): raise TypeError(("TestM2M's judge method received a non-Model." "Invalid model name: '%s'" % model)) else: try: # 3. self.check_capabilities(model, skip_incapable=skip_incapable) # 4. prediction = self.generate_prediction(model) self.check_prediction(prediction) predictions.append(prediction) except CapabilityError as e: raise CapabilityError(model, e.capability, ("TestM2M's judge method resulted in" " error for '%s'. Error: '%s'" % (model, str(e)))) except Exception as e: raise Exception(("TestM2M's judge method resulted in error" "for '%s'. Error: '%s'" % (model, str(e)))) # 5. 2D list for scores; num(rows) = num(cols) = num(predictions) scores = [[NoneScore for x in range(len(predictions))] for y in range(len(predictions))] for i in range(len(predictions)): for j in range(len(predictions)): if not self.observation: model1 = models[i] model2 = models[j] elif i == 0 and j == 0: model1 = None model2 = None elif i == 0: model1 = models[j-1] model2 = None elif j == 0: model1 = models[i-1] model2 = None else: model1 = models[i-1] model2 = models[j-1] scores[i][j] = self._judge(predictions[i], predictions[j], model1, model2) if isinstance(scores[i][j], ErrorScore) and stop_on_error: raise scores[i][j].score # An exception. # 9. from sciunit.scores.collections_m2m import ScoreMatrixM2M sm = ScoreMatrixM2M(self, models, scores=scores) return sm """ # TODO: see if this needs to be updated and provided: def optimize(self, model): raise NotImplementedError(("Optimization not implemented " "for Test '%s'" % self)) """ class RangeTest(Test): """Test if the model generates a number with a certain sign""" def __init__(self, observation, name=None): super(RangeTest, self).__init__(observation, name=name) required_capabilities = (ProducesNumber,) score_type = BooleanScore def validate_observation(self, observation): assert type(observation) in (tuple, list, set) assert len(observation) == 2 assert observation[1] > observation[0] def generate_prediction(self, model): return model.produce_number() def compute_score(self, observation, prediction): low = observation[0] high = observation[1] return self.score_type(low < prediction < high)
the-stack_0_5930
#FLM: Adjust Anchors __copyright__ = __license__ = """ Copyright (c) 2010-2012 Adobe Systems Incorporated. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __doc__ = """ Adjust Anchors v1.2 - Jul 12 2012 This script provides a UI for adjusting the position of anchors interactively. FontLab's own UI for ajusting anchors is too poor. Opening FontLab's Preview window and selecting the Anchors pane before running this script, will allow you to preview the adjustments even better. ================================================== Versions: v1.0 - Apr 29 2010 - Initial version. v1.1 - Jun 15 2012 - UI improvements. v1.2 - Jul 12 2012 - Fixed issue that affected single master fonts. """ listGlyphsSelected = [] def getgselectedglyphs(font, glyph, gindex): listGlyphsSelected.append(gindex) fl.ForSelected(getgselectedglyphs) def getMasterNames(masters, axes): global matrix masterNames = [] if masters > 1: for m in range(masters): mtx = matrix[m] masterName = '' for i in range(len(axes)): masterName += ' ' + axes[i][1] + str(mtx[i]) masterNames.append(masterName) return masterNames matrix = [ (0,0,0,0),(1,0,0,0),(0,1,0,0),(1,1,0,0),(0,0,1,0),(1,0,1,0),(0,1,1,0),(1,1,1,0), (0,0,0,1),(1,0,0,1),(0,1,0,1),(1,1,0,1),(0,0,1,1),(1,0,1,1),(0,1,1,1),(1,1,1,1) ] STYLE_RADIO = STYLE_CHECKBOX + cTO_CENTER def run(gIndex): masters = f[0].layers_number axes = f.axis masterNames = getMasterNames(masters, axes) increment = 0 if len(axes) == 3: increment = 90 elif len(axes) > 3: fl.Message("This macro does not support 4-axis fonts") return fl.EditGlyph(gIndex) # opens Glyph Window in case it's not open yet glyphBkupDict = {} # this will store a copy of the edited glyphs and will be used in case 'Cancel' is pressed class DialogClass: def __init__(self): self.d = Dialog(self) self.d.size = Point(660, 110 + 48*4 + increment) self.d.Center() self.d.title = 'Adjust Anchors' self.anchorList = [] self.anchorList_index = 0 self.anchorList_selected = 0 self.selectedAnchor = None self.glyph = f[gIndex] self.gIndex = gIndex self.gName = self.glyph.name self.gHasAnchors = 0 self.glyphList = [] self.glyphList_index = 0 self.glyphList_selected = 0 self.selectedglyph = None self.k_BIG_SHIFT = 20 self.k_MEDIUM_SHIFT = 5 self.k_SMALL_SHIFT = 1 self.Xshift = 0 self.Yshift = 0 self.Xorig = 0 self.Yorig = 0 self.Xfinal = 0 self.Yfinal = 0 self.RBmasterIndex = 0 if fl.layer == 0: self.RBmaster0 = 1 else: self.RBmaster0 = 0 if fl.layer == 1: self.RBmaster1 = 1 else: self.RBmaster1 = 0 if fl.layer == 2: self.RBmaster2 = 1 else: self.RBmaster2 = 0 if fl.layer == 3: self.RBmaster3 = 1 else: self.RBmaster3 = 0 if fl.layer == 4: self.RBmaster4 = 1 else: self.RBmaster4 = 0 if fl.layer == 5: self.RBmaster5 = 1 else: self.RBmaster5 = 0 if fl.layer == 6: self.RBmaster6 = 1 else: self.RBmaster6 = 0 if fl.layer == 7: self.RBmaster7 = 1 else: self.RBmaster7 = 0 # Fill in the Anchor list for anchor in self.glyph.anchors: self.anchorList.append(anchor.name) # Fill in the Glyph list for g in f.glyphs: if len(g.anchors) > 0: self.glyphList.append(g.name) # Checks if the initially selected glyph has anchors if self.gName in self.glyphList: self.gHasAnchors = 1 posy = 10 + 48*0 # (xTop , yTop , xBot , yBot) self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yplus5', STYLE_BUTTON, '+'+ str(self.k_MEDIUM_SHIFT)) posy = 10 + 24*1 self.d.AddControl(LISTCONTROL, Rect( 10, posy, 150, posy+110), 'glyphList', STYLE_LIST, 'Glyphs') self.d.AddControl(LISTCONTROL, Rect(510, posy, 650, posy+110), 'anchorList', STYLE_LIST, 'Anchors') posy = 10 + 48*1 self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yplus1', STYLE_BUTTON, '+'+ str(self.k_SMALL_SHIFT)) posy = 10 + 48*2 self.d.AddControl(BUTTONCONTROL, Rect(160, posy, 200, posy+40), 'Xminus20', STYLE_BUTTON, '-'+ str(self.k_BIG_SHIFT)) self.d.AddControl(BUTTONCONTROL, Rect(210, posy, 250, posy+40), 'Xminus5', STYLE_BUTTON, '-'+ str(self.k_MEDIUM_SHIFT)) self.d.AddControl(BUTTONCONTROL, Rect(260, posy, 300, posy+40), 'Xminus1', STYLE_BUTTON, '-'+ str(self.k_SMALL_SHIFT)) self.d.AddControl(STATICCONTROL, Rect(310, posy, 323, posy+20), 'stat_label', STYLE_LABEL+cTO_CENTER, 'x:') self.d.AddControl(STATICCONTROL, Rect(323, posy, 360, posy+20), 'Xshift', STYLE_LABEL+cTO_CENTER) self.d.AddControl(STATICCONTROL, Rect(310, posy+20, 323, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'y:') self.d.AddControl(STATICCONTROL, Rect(323, posy+20, 360, posy+40), 'Yshift', STYLE_LABEL+cTO_CENTER) self.d.AddControl(BUTTONCONTROL, Rect(360, posy, 400, posy+40), 'Xplus1', STYLE_BUTTON, '+'+ str(self.k_SMALL_SHIFT)) self.d.AddControl(BUTTONCONTROL, Rect(410, posy, 450, posy+40), 'Xplus5', STYLE_BUTTON, '+'+ str(self.k_MEDIUM_SHIFT)) self.d.AddControl(BUTTONCONTROL, Rect(460, posy, 500, posy+40), 'Xplus20', STYLE_BUTTON, '+'+ str(self.k_BIG_SHIFT)) for i in range(len(masterNames)): posy = 154 + 22*i self.d.AddControl(CHECKBOXCONTROL, Rect( 25, posy, 200, posy+20), 'RBmaster'+ str(i), STYLE_RADIO, masterNames[i]) posy = 10 + 48*3 self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yminus1', STYLE_BUTTON, '-'+ str(self.k_SMALL_SHIFT)) self.d.AddControl(STATICCONTROL, Rect(528, posy, 650, posy+20), 'stat_label', STYLE_LABEL+cTO_CENTER, 'Original position') self.d.AddControl(STATICCONTROL, Rect(530, posy+20, 543, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'x:') self.d.AddControl(STATICCONTROL, Rect(543, posy+20, 580, posy+40), 'Xorig', STYLE_LABEL+cTO_CENTER) self.d.AddControl(STATICCONTROL, Rect(590, posy+20, 603, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'y:') self.d.AddControl(STATICCONTROL, Rect(603, posy+20, 640, posy+40), 'Yorig', STYLE_LABEL+cTO_CENTER) posy = 10 + 48*4 self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yminus5', STYLE_BUTTON, '-'+ str(self.k_MEDIUM_SHIFT)) self.d.AddControl(STATICCONTROL, Rect(528, posy, 650, posy+20), 'stat_label', STYLE_LABEL+cTO_CENTER, 'Final position') self.d.AddControl(STATICCONTROL, Rect(530, posy+20, 543, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'x:') self.d.AddControl(STATICCONTROL, Rect(543, posy+20, 580, posy+40), 'Xfinal', STYLE_LABEL+cTO_CENTER) self.d.AddControl(STATICCONTROL, Rect(590, posy+20, 603, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'y:') self.d.AddControl(STATICCONTROL, Rect(603, posy+20, 640, posy+40), 'Yfinal', STYLE_LABEL+cTO_CENTER) #====== DIALOG FUNCTIONS ========= def on_Xminus20(self, code): if self.anchorList_selected: self.Xshift -= self.k_BIG_SHIFT self.d.PutValue('Xshift') self.updateXfinal() self.update_glyph() def on_Xminus5(self, code): if self.anchorList_selected: self.Xshift -= self.k_MEDIUM_SHIFT self.d.PutValue('Xshift') self.updateXfinal() self.update_glyph() def on_Xminus1(self, code): if self.anchorList_selected: self.Xshift -= self.k_SMALL_SHIFT self.d.PutValue('Xshift') self.updateXfinal() self.update_glyph() def on_Xplus1(self, code): if self.anchorList_selected: self.Xshift += self.k_SMALL_SHIFT self.d.PutValue('Xshift') self.updateXfinal() self.update_glyph() def on_Xplus5(self, code): if self.anchorList_selected: self.Xshift += self.k_MEDIUM_SHIFT self.d.PutValue('Xshift') self.updateXfinal() self.update_glyph() def on_Xplus20(self, code): if self.anchorList_selected: self.Xshift += self.k_BIG_SHIFT self.d.PutValue('Xshift') self.updateXfinal() self.update_glyph() def on_Yminus5(self, code): if self.anchorList_selected: self.Yshift -= self.k_MEDIUM_SHIFT self.d.PutValue('Yshift') self.updateYfinal() self.update_glyph() def on_Yminus1(self, code): if self.anchorList_selected: self.Yshift -= self.k_SMALL_SHIFT self.d.PutValue('Yshift') self.updateYfinal() self.update_glyph() def on_Yplus1(self, code): if self.anchorList_selected: self.Yshift += self.k_SMALL_SHIFT self.d.PutValue('Yshift') self.updateYfinal() self.update_glyph() def on_Yplus5(self, code): if self.anchorList_selected: self.Yshift += self.k_MEDIUM_SHIFT self.d.PutValue('Yshift') self.updateYfinal() self.update_glyph() def on_glyphList(self, code): self.glyphList_selected = 1 self.gHasAnchors = 1 self.d.GetValue('glyphList') self.gName = self.glyphList[self.glyphList_index] # Name of the glyph selected on the glyph list self.gIndex = f.FindGlyph(self.gName) fl.iglyph = self.gIndex # Switch the glyph on the Glyph Window self.glyph = f[self.gIndex] self.updateAnchorsList() self.resetDialogValues() def on_anchorList(self, code): self.anchorList_selected = 1 self.d.GetValue('anchorList') self.updateDialogValues() def on_RBmaster0(self, code): self.updateRBmaster(0) def on_RBmaster1(self, code): self.updateRBmaster(1) def on_RBmaster2(self, code): self.updateRBmaster(2) def on_RBmaster3(self, code): self.updateRBmaster(3) def on_RBmaster4(self, code): self.updateRBmaster(4) def on_RBmaster5(self, code): self.updateRBmaster(5) def on_RBmaster6(self, code): self.updateRBmaster(6) def on_RBmaster7(self, code): self.updateRBmaster(7) def on_ok(self, code): return 1 #====== RESET FUNCTIONS ========= def resetDialogValues(self): self.resetXorig() self.resetYorig() self.resetXshift() self.resetYshift() self.resetXfinal() self.resetYfinal() def resetXorig(self): self.Xorig = 0 self.d.PutValue('Xorig') def resetYorig(self): self.Yorig = 0 self.d.PutValue('Yorig') def resetXshift(self): self.Xshift = 0 self.d.PutValue('Xshift') def resetYshift(self): self.Yshift = 0 self.d.PutValue('Yshift') def resetXfinal(self): self.Xfinal = 0 self.d.PutValue('Xfinal') def resetYfinal(self): self.Yfinal = 0 self.d.PutValue('Yfinal') #====== UPDATE FUNCTIONS ========= def updateRBmaster(self, newIndex): self.RBmasterIndex = newIndex if self.RBmasterIndex == 0: self.RBmaster0 = 1 else: self.RBmaster0 = 0 if self.RBmasterIndex == 1: self.RBmaster1 = 1 else: self.RBmaster1 = 0 if self.RBmasterIndex == 2: self.RBmaster2 = 1 else: self.RBmaster2 = 0 if self.RBmasterIndex == 3: self.RBmaster3 = 1 else: self.RBmaster3 = 0 if self.RBmasterIndex == 4: self.RBmaster4 = 1 else: self.RBmaster4 = 0 if self.RBmasterIndex == 5: self.RBmaster5 = 1 else: self.RBmaster5 = 0 if self.RBmasterIndex == 6: self.RBmaster6 = 1 else: self.RBmaster6 = 0 if self.RBmasterIndex == 7: self.RBmaster7 = 1 else: self.RBmaster7 = 0 for v in ['RBmaster0','RBmaster1','RBmaster2','RBmaster3','RBmaster4','RBmaster5','RBmaster6','RBmaster7']: self.d.PutValue(v) fl.layer = self.RBmasterIndex if self.gHasAnchors and self.anchorList_selected: self.updateDialogValues() def updateAnchorsList(self): self.anchorList = [] for anchor in self.glyph.anchors: self.anchorList.append(anchor.name) self.d.PutValue('anchorList') self.anchorList_selected = 0 self.selectedAnchor = None def updateDialogValues(self): self.selectedAnchor = self.glyph.anchors[self.anchorList_index].Layer(fl.layer) self.updateXorig(self.selectedAnchor.x) self.updateYorig(self.selectedAnchor.y) self.resetXshift() self.resetYshift() self.updateXfinal() self.updateYfinal() def updateXorig(self, pos): self.Xorig = pos self.d.PutValue('Xorig') def updateYorig(self, pos): self.Yorig = pos self.d.PutValue('Yorig') def updateXfinal(self): if self.anchorList_selected: self.Xfinal = self.Xorig + self.Xshift self.d.PutValue('Xfinal') def updateYfinal(self): if self.anchorList_selected: self.Yfinal = self.Yorig + self.Yshift self.d.PutValue('Yfinal') def update_glyph(self): if self.anchorList_selected: if self.gIndex not in glyphBkupDict: # print "Made backup copy of '%s'" % self.glyph.name glyphBkupDict[self.gIndex] = Glyph(f[self.gIndex]) fl.SetUndo(self.gIndex) x = self.Xfinal y = self.Yfinal anchorPosition = Point(x, y) anchorIndex = self.anchorList_index anchor = self.glyph.anchors[anchorIndex] # In single master fonts the adjustment of the anchors cannot be handled by the codepath used for multiple # master fonts, because the UI gets updated but the changes are not stored in the VFB file upon saving. if masters == 1: anchor.x = x anchor.y = y else: anchor.SetLayer(fl.layer, anchorPosition) fl.UpdateGlyph(self.gIndex) def Run(self): return self.d.Run() d = DialogClass() if d.Run() == 1: f.modified = 1 else: for gID in glyphBkupDict: f[gID] = glyphBkupDict[gID] fl.UpdateGlyph(gID) f.modified = 0 if __name__ == "__main__": f = fl.font gIndex = fl.iglyph if f is None: fl.Message('No font opened') elif gIndex < 0: if len(listGlyphsSelected) == 0: fl.Message('Glyph selection is not valid') else: gIndex = listGlyphsSelected[0] run(gIndex) else: run(gIndex)
the-stack_0_5934
import numpy as np import torch def to_tensor(blob): if isinstance(blob, np.ndarray): return torch.from_numpy(blob) if isinstance(blob, int) or isinstance(blob, float): return torch.Tensor(blob) if isinstance(blob, dict): ts = {} for k, v in blob.items(): ts[k] = to_tensor(v) return ts if isinstance(blob, list): ts = list([to_tensor(e) for e in blob]) return ts if isinstance(blob, tuple): # namedtuple if hasattr(blob, '_fields'): ts = {k: to_tensor(getattr(blob, k)) for k in blob._fields} ts = type(blob)(**ts) else: ts = tuple([to_tensor(e) for e in blob]) return ts def to_device(blob, device, *args, **kwargs): if hasattr(blob, 'to'): return blob.to(device, *args, **kwargs) if isinstance(blob, torch.Tensor): return blob.to(device, *args, **kwargs) if isinstance(blob, dict): ts = {} for k, v in blob.items(): ts[k] = to_device(v, device) return ts if isinstance(blob, list): ts = list([to_device(e, device) for e in blob]) return ts if isinstance(blob, tuple): # namedtuple if hasattr(blob, '_fields'): ts = {k: to_device(getattr(blob, k), device) for k in blob._fields} ts = type(blob)(**ts) else: ts = tuple([to_device(e, device) for e in blob]) return ts return blob # raise ValueError('type of {} is not support for to_device'.format(type(blob)))
the-stack_0_5935
""" odm2rest -------- A Python RESTful web service inteface for accessing data in an ODM2 database via Django rest swagger APIs. """ from __future__ import (absolute_import, division, print_function) import os from setuptools import find_packages, setup import versioneer here = os.path.abspath(os.path.dirname(__file__)) # Dependencies. with open('requirements.txt') as f: requirements = f.readlines() install_requires = [t.strip() for t in requirements] with open(os.path.join(here, 'README.md')) as readme: README = readme.read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name='odm2rest', version=versioneer.get_version(), packages=find_packages(), include_package_data=True, license='BSD License', description='A Python RESTful web service inteface for accessing data in an ' 'ODM2 database via Django rest swagger APIs', long_description=README, url='https://github.com/ODM2/ODM2RESTfulWebServices', author='Landung Setiawan', author_email='[email protected]', install_requires=install_requires, classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Framework :: Django :: 1.11', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
the-stack_0_5938
"""Window Covering devices.""" from ..extended_property import ( DURATION_HIGH, DURATION_LOW, ON_LEVEL, RAMP_RATE, X10_HOUSE, X10_UNIT, ) from ..groups import COVER from ..operating_flag import ( DUAL_LINE_ON, FORWARD_ON, KEY_BEEP_ON, LED_BLINK_ON_ERROR_OFF, LED_BLINK_ON_TX_ON, LED_ON, MOMENTARY_LINE_ON, NOT_3_WAY, PROGRAM_LOCK_ON, ) from .open_close_responder_base import OpenCloseResponderBase class WindowCovering(OpenCloseResponderBase): """Window Covering device.""" def __init__(self, address, cat, subcat, firmware=0x00, description="", model=""): """Init the WindowCovering class.""" super().__init__( address, cat, subcat, firmware, description, model, state_name=COVER ) def _register_operating_flags(self): """Register the operating and properties.""" super()._register_operating_flags() self._add_operating_flag(PROGRAM_LOCK_ON, 0, 0, 0, 1) self._add_operating_flag(LED_BLINK_ON_TX_ON, 0, 1, 2, 3) self._add_operating_flag(LED_ON, 0, 4, 0x0A, 0x0B, is_reversed=True) self._add_operating_flag(KEY_BEEP_ON, 0, 5, 0x0C, 0x0D) self._add_operating_flag(LED_BLINK_ON_ERROR_OFF, 2, 3, 0x15, 0x16) self._add_operating_flag(DUAL_LINE_ON, 3, 0, 0x1E, 0x1F) self._add_operating_flag(MOMENTARY_LINE_ON, 3, 1, 0x20, 0x21) self._add_operating_flag(NOT_3_WAY, 3, 3, 0x22, 0x23) self._add_operating_flag(FORWARD_ON, 3, 4, 0x24, 0x25) self._add_property(X10_HOUSE, 5, None) # 4 self._add_property(X10_UNIT, 6, None) # 4 self._add_property(RAMP_RATE, 7, 5) # Need to verify use_data position self._add_property(ON_LEVEL, 8, 6) self._add_property(DURATION_HIGH, 9, None) # 0x10 self._add_property(DURATION_LOW, 10, None) # 0x10
the-stack_0_5942
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import click from calendar import month_abbr from datetime import datetime, date, timedelta from dateutil.relativedelta import relativedelta FILLED = u'\u25CF' EMPTY = u'\u25CB' row_label_formats = { 'year': '{year:<{max_year_width}}', 'age': 'Age {age:<{max_age_width}}' } class Date(click.ParamType): name = 'date' def __init__(self, format="%d-%m-%Y"): self.format = format def convert(self, value, param, ctx): try: return datetime.strptime(value, self.format).date() except ValueError: self.fail('%s is not a valid date' % value, param, ctx) def header(fill=' ', default_width=9, widths={'Feb': 8}): return ''.join('{month:{fill}<{width}}' .format(month=abbr, fill=fill, width=widths.get(abbr, default_width)) for abbr in month_abbr[1:]) # Week of the year yweek = lambda d: timedelta(days=d.timetuple().tm_yday) // timedelta(weeks=1) @click.command() @click.option('--birth-date', '-d', type=Date(), help='Date of birth (dd-mm-YYYY)', prompt='Date of birth (dd-mm-YYYY)') @click.option('--life-expectancy', '-l', 'expected_years', type=int, default=85, help='Number of years you expect to live') @click.option('--row-label', '-r', type=click.Choice(['year', 'age']), default='year', help='Label for rows') @click.option('--row-label-period', type=int, default=5, help='Show label after every duration') @click.option('--highlight-date', '-h', multiple=True, type=Date(), help='Dates to highlight') def main(birth_date, expected_years, row_label, row_label_period, highlight_date): expected_death_date = birth_date + relativedelta(years=expected_years) expected_death_year = expected_death_date.year birth_year = birth_date.year curr_date = date.today() with click.progressbar(range(1, 53), label='{}/52 weeks of year'.format(yweek(curr_date))) as bar: for i in bar: if i == yweek(curr_date): break # ensures that the formatting won't break for those who are alive # between 9999 and 10000 A.D. and still using this for some reason max_year_width = len(str(expected_death_year)) + 1 max_age_width = len(str(expected_years)) + 1 fmt_dct = dict(age=expected_years, year=expected_death_year, max_year_width=max_year_width, max_age_width=max_age_width) row_label_len = len(row_label_formats[row_label].format(**fmt_dct)) # Normalize set of dates to highlight (using set for constant time lookup) highlight_set = set(date(d.year, 1, 1) + timedelta(weeks=yweek(d)) for d in highlight_date) for year in range(birth_year, expected_death_year + 1): if year == birth_year: # Print header on first iteration in loop click.echo(' ' * row_label_len, nl=False) click.echo(header()) age = year - birth_year if age % row_label_period: click.echo(' ' * row_label_len, nl=False) else: fmt_dct = dict(age=age, year=year, max_year_width=max_year_width, max_age_width=max_age_width) click.echo(row_label_formats[row_label].format(**fmt_dct), nl=False) date_iter = date(year, 1, 1) while date_iter.year == year: if birth_date < date_iter < curr_date: if date_iter in highlight_set: click.secho(FILLED, nl=False, fg='red') else: click.secho(FILLED, nl=False, fg='green') else: click.echo(EMPTY, nl=False) click.echo(' ', nl=False) date_iter += timedelta(weeks=1) click.echo('') if __name__ == '__main__': main()
the-stack_0_5944
import run_squad as rs import tokenization import collections import json import os import modeling import requests import math def read_squad_data(json_input, is_training): """Read a SQuAD json file into a list of SquadExample.""" # input_data = json_input["data"] input_data = json_input def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False #데이터를 받아오는 부분 examples = [] # 읽어온 input_data는 paragraphs와 title로 구분 되어있음 # paragraph는 질의응답셋인 qas와 문맥정보를 의미하는 context로 구성되어 있다. for entry in input_data: # input_date에서 각 데이터를 하나씩 불러 오고 # 데이터를 context 먼저 처리 paragraph_text = entry["context"] doc_tokens = [] # 띄어쓰기 기준으로 단어를 토큰으로 나눈다 char_to_word_offset = [] # 각 charater가 몇 번째 단어에 속하는지 순서를 0,1,2,...,n으로 나타낸다 prev_is_whitespace = True for c in paragraph_text: # context를 character 단위로 받아온다. if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: # character가 화이트스페이스가 아니고, 이전이 화이트페이스면 doc_tokens.append(c) # 최초 삽입 else: doc_tokens[-1] += c # 마지막 배열의 요소에 character들을 추 prev_is_whitespace = False #character가 화이트스페이스가 아니므로 false로 변경 char_to_word_offset.append(len(doc_tokens) - 1) #0 부터 시작으로 len -1 # qas_id = qa["id"] # 질의의 id question_text = entry["question"] #질문 데이터 start_position = None end_position = None orig_answer_text = None is_impossible = False start_position = -1 end_position = -1 orig_answer_text = "" example = rs.SquadExample( qas_id=1, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs def process_inputs(input_data): bert_config = modeling.BertConfig.from_json_file(rs.FLAGS.bert_config_file) eval_examples = read_squad_data(input_data,is_training=False) eval_features = [] eval_writer = rs.FeatureWriter( filename=os.path.join("./colab_output", "train.tf_record"), is_training=False) def append_feature(feature): eval_features.append(feature) eval_writer.process_feature(feature) # 토크나이저에 사전과 do_lower_case 설정 tokenizer = tokenization.FullTokenizer( vocab_file=rs.FLAGS.vocab_file, do_lower_case=rs.FLAGS.do_lower_case) rs.convert_examples_to_features( examples=eval_examples, tokenizer=tokenizer, max_seq_length=rs.FLAGS.max_seq_length, doc_stride=rs.FLAGS.doc_stride, max_query_length=rs.FLAGS.max_query_length, is_training=False, output_fn=append_feature) eval_writer.close() return eval_examples, eval_features def process_result(result): # unique_id = int(result["unique_ids"].int64_val[0]) # start_logits = [float(x) for x in result["start_logits"].float_val] # end_logits = [float(x) for x in result["end_logits"].float_val] unique_id = int(result["unique_ids"][0]) start_logits= result["start_logits"].tolist() end_logits = result["end_logits"].tolist() # start_logits = np.array(start_logits).reshape(batch_size, max_seq_length) # end_logits = np.array(end_logits).reshape(batch_size, max_seq_length) formatted_result = rs.RawResult( unique_id=unique_id, start_logits=start_logits[0], end_logits=end_logits[0]) return formatted_result def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, version_2_with_negative ): example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min mull score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = rs.get_final_text(tok_text, orig_text, do_lower_case) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't inlude the empty option in the n-best, inlcude it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > rs.FLAGS.null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json return all_predictions, all_nbest_json def process_output(all_results, eval_examples, eval_features, input_data, n_best, n_best_size, max_answer_length): output_prediction_file = os.path.join(rs.FLAGS.output_dir, "predictions.json") output_nbest_file = os.path.join(rs.FLAGS.output_dir, "nbest_predictions.json") output_null_log_odds_file = os.path.join(rs.FLAGS.output_dir, "null_odds.json") all_predictions, all_nbest_json = write_predictions(eval_examples, eval_features, all_results, n_best_size=n_best_size, max_answer_length=max_answer_length, do_lower_case=True, version_2_with_negative=False) return all_predictions, all_nbest_json # re = [] # for i in range(len(all_predictions)): # id_ = input_data[i]["id"] # if n_best: # re.append(collections.OrderedDict({ # "id": id_, # "question": input_data[i]["question"], # "best_prediction": all_predictions[id_], # "n_best_predictions": all_nbest_json[id_] # })) # else: # re.append(collections.OrderedDict({ # "id": id_, # "question": input_data[i]["question"], # "best_prediction": all_predictions[id_] # })) # return re if __name__ == "__main__": input_data = { "options": { "n_best": True, "n_best_size": 3, "max_answer_length": 30 }, "data": [ { "id": "001", "question": "Who invented LSTM?", "context": "Many aspects of speech recognition were taken over by a deep learning method called long short-term memory (LSTM), a recurrent neural network published by Hochreiter and Schmidhuber in 1997.[51] LSTM RNNs avoid the vanishing gradient problem and can learn \"Very Deep Learning\" tasks[2] that require memories of events that happened thousands of discrete time steps before, which is important for speech. In 2003, LSTM started to become competitive with traditional speech recognizers on certain tasks.[52] Later it was combined with connectionist temporal classification (CTC)[53] in stacks of LSTM RNNs.[54] In 2015, Google's speech recognition reportedly experienced a dramatic performance jump of 49% through CTC-trained LSTM, which they made available through Google Voice Search." } ] } url="http://localhost:8501/v1/models/korquad_cpu_model:predict" print(type(input_data)) print(type(json.dumps(input_data))) json_input=json.dumps(input_data) example = process_inputs(input_data) # p_result=process_result(example[1][0]) input_ids = [] input_mask = [] segment_ids = [] unique_id= str(example[1][0].unique_id) for e in example[1][0].input_ids: input_ids.append(str(e)) for e in example[1][0].input_mask: input_mask.append(str(e)) for e in example[1][0].segment_ids: segment_ids.append(str(e)) pred_input = { "inputs":{ "examples":{ "unique_id": example[1][0].unique_id, "input_ids": example[1][0].input_ids, "input_mask": example[1][0].input_mask, "segment_ids": example[1][0].segment_ids, } } } pred_input5 = { "inputs": { "examples": { "unique_id": unique_id, "input_ids": input_ids, "input_mask": input_mask, "segment_ids": segment_ids, } } } pred_input2 = { "inputs": { "examples": [ input_ids, input_mask, segment_ids] } } pred_input3 = { "instances": [ unique_id, input_ids, input_mask, segment_ids] } # { # "unique_id": example[1][0].unique_id, # "input_ids": example[1][0].input_ids, # "input_mask": example[1][0].input_mask, # "segment_ids": example[1][0].segment_ids, # } # [ # example[1][0].unique_id, # example[1][0].input_ids, # example[1][0].input_mask, # example[1][0].segment_ids, # ] # pred_input={ # "instances":[ # 0, # example[1][0].input_ids, # example[1][0].input_mask, # example[1][0].segment_ids, # 0, # ] # # } print(pred_input3) # post j_data=json.dumps(pred_input3) # base64_data=base64.b64encode(j_data) r = requests.post(url, data=j_data) print(r.status_code) print(r.text)
the-stack_0_5945
# -*- coding: utf-8 -*- """ Created on Wed Jan 23 09:47:26 2019 @author: Artem Los """ import xml.etree.ElementTree import json import base64 import datetime import copy import time from licensing.internal import HelperMethods class ActivatedMachine: def __init__(self, IP, Mid, Time, FriendlyName="", FloatingExpires = ""): self.IP = IP self.Mid = Mid # TODO: check if time is int, and convert to datetime in this case. self.Time = Time self.FriendlyName = FriendlyName self.FloatingExpires = FloatingExpires class Reseller: """ Information about the reseller. """ def __init__(self, Id, InviteId, ResellerUserId, Created, Name, Url, Email, Phone, Description): self.Id = Id self.InviteId = InviteId self.ResellerUserId = ResellerUserId self.Created = Created self.Name = Name self.Url = Url self.Email = Email self.Phone = Phone self.Description = Description class LicenseKey: def __init__(self, ProductId, ID, Key, Created, Expires, Period, F1, F2,\ F3, F4, F5, F6, F7, F8, Notes, Block, GlobalId, Customer, \ ActivatedMachines, TrialActivation, MaxNoOfMachines, \ AllowedMachines, DataObjects, SignDate, Reseller, RawResponse): self.product_id = ProductId self.id = ID self.key = Key self.created = Created self.expires = Expires self.period = Period self.f1 = F1 self.f2 = F2 self.f3 = F3 self.f4 = F4 self.f5 = F5 self.f6 = F6 self.f7 = F7 self.f8 = F8 self.notes = Notes self.block = Block self.global_id = GlobalId self.customer = Customer self.activated_machines = ActivatedMachines self.trial_activation = TrialActivation self.max_no_of_machines = MaxNoOfMachines self.allowed_machines = AllowedMachines self.data_objects = DataObjects self.sign_date = SignDate self.reseller = Reseller self.raw_response = RawResponse @staticmethod def from_response(response): if response.result == "1": raise ValueError("The response did not contain any license key object since it was unsuccessful. Message '{0}'.".format(response.message)) obj = json.loads(base64.b64decode(response.license_key).decode('utf-8')) reseller = None if "Reseller" in obj and obj["Reseller"] != None: reseller = Reseller(**obj["Reseller"]) return LicenseKey(obj["ProductId"], obj["ID"], obj["Key"], datetime.datetime.fromtimestamp(obj["Created"]),\ datetime.datetime.fromtimestamp(obj["Expires"]), obj["Period"], obj["F1"], obj["F2"], \ obj["F3"], obj["F4"],obj["F5"],obj["F6"], obj["F7"], \ obj["F8"], obj["Notes"], obj["Block"], obj["GlobalId"],\ obj["Customer"], LicenseKey.__load_activated_machines(obj["ActivatedMachines"]), obj["TrialActivation"], \ obj["MaxNoOfMachines"], obj["AllowedMachines"], obj["DataObjects"], \ datetime.datetime.fromtimestamp(obj["SignDate"]),reseller, response) def save_as_string(self): """ Save the license as a string that can later be read by load_from_string. """ res = copy.copy(self.raw_response.__dict__) res["licenseKey"] = res["license_key"] res.pop("license_key", None) return json.dumps(res) @staticmethod def load_from_string(rsa_pub_key, string, signature_expiration_interval = -1): """ Loads a license from a string generated by save_as_string. Note: if an error occurs, None will be returned. An error can occur if the license string has been tampered with or if the public key is incorrectly formatted. :param signature_expiration_interval: If the license key was signed, this method will check so that no more than "signatureExpirationInterval" days have passed since the last activation. """ response = Response("","","","") try: response = Response.from_string(string) except Exception as ex: return None if response.result == "1": return None else: try: pubKey = RSAPublicKey.from_string(rsa_pub_key) if HelperMethods.verify_signature(response, pubKey): licenseKey = LicenseKey.from_response(response) if signature_expiration_interval > 0 and \ (licenseKey.sign_date + datetime.timedelta(days=1*signature_expiration_interval) < datetime.datetime.utcnow()): return None return licenseKey else: return None except Exception: return None @staticmethod def __load_activated_machines(obj): if obj == None: return None arr = [] for item in obj: arr.append(ActivatedMachine(**item)) return arr class Response: def __init__(self, license_key, signature, result, message): self.license_key = license_key self.signature = signature self.result = result self.message = message @staticmethod def from_string(responseString): obj = json.loads(responseString) licenseKey = "" signature = "" result = 0 message = "" if "licenseKey" in obj: licenseKey = obj["licenseKey"] if "signature" in obj: signature = obj["signature"] if "message" in obj: message = obj["message"] if "result" in obj: result = obj["result"] else: result = 1 return Response(licenseKey, signature, result, message) class RSAPublicKey: def __init__(self, modulus, exponent): self.modulus = modulus self.exponent = exponent @staticmethod def from_string(rsaPubKeyString): """ The rsaPubKeyString can be found at https://app.cryptolens.io/User/Security. It should be of the following format: <RSAKeyValue><Modulus>...</Modulus><Exponent>AQAB</Exponent></RSAKeyValue> """ rsaKey = xml.etree.ElementTree.fromstring(rsaPubKeyString) return RSAPublicKey(rsaKey.find('Modulus').text, rsaKey.find('Exponent').text)
the-stack_0_5949
import abc from collections import OrderedDict from torch import nn as nn from utils.logging import logger import utils.eval_util as eval_util from utils.rng import get_global_pkg_rng_state import utils.pytorch_util as ptu import gtimer as gt from replay_buffer import ReplayBuffer from path_collector import MdpPathCollector, RemoteMdpPathCollector from tqdm import trange import ray import torch import numpy as np import random class BatchRLAlgorithm(metaclass=abc.ABCMeta): def __init__( self, trainer, exploration_data_collector: MdpPathCollector, remote_eval_data_collector: RemoteMdpPathCollector, replay_buffer: ReplayBuffer, batch_size, max_path_length, num_epochs, num_eval_steps_per_epoch, num_expl_steps_per_train_loop, num_trains_per_train_loop, num_train_loops_per_epoch=1, min_num_steps_before_training=0, optimistic_exp_hp=None, ): super().__init__() """ The class state which should not mutate """ self.batch_size = batch_size self.max_path_length = max_path_length self.num_epochs = num_epochs self.num_eval_steps_per_epoch = num_eval_steps_per_epoch self.num_trains_per_train_loop = num_trains_per_train_loop self.num_train_loops_per_epoch = num_train_loops_per_epoch self.num_expl_steps_per_train_loop = num_expl_steps_per_train_loop self.min_num_steps_before_training = min_num_steps_before_training self.optimistic_exp_hp = optimistic_exp_hp """ The class mutable state """ self._start_epoch = 0 """ This class sets up the main training loop, so it needs reference to other high level objects in the algorithm But these high level object maintains their own states and has their own responsibilities in saving and restoring their state for checkpointing """ self.trainer = trainer self.expl_data_collector = exploration_data_collector self.remote_eval_data_collector = remote_eval_data_collector self.replay_buffer = replay_buffer def train(self, start_epoch=0): self._start_epoch = start_epoch self._train() def _train(self): # Fill the replay buffer to a minimum before training starts if self.min_num_steps_before_training > self.replay_buffer.num_steps_can_sample(): init_expl_paths = self.expl_data_collector.collect_new_paths( self.trainer.policy, self.max_path_length, self.min_num_steps_before_training, discard_incomplete_paths=False, ) self.replay_buffer.add_paths(init_expl_paths) self.expl_data_collector.end_epoch(-1) for epoch in gt.timed_for( trange(self._start_epoch, self.num_epochs), save_itrs=True, ): # To evaluate the policy remotely, # we're shipping the policy params to the remote evaluator # This can be made more efficient # But this is currently extremely cheap due to small network size pol_state_dict = ptu.state_dict_cpu(self.trainer.policy) remote_eval_obj_id = self.remote_eval_data_collector.async_collect_new_paths.remote( self.max_path_length, self.num_eval_steps_per_epoch, discard_incomplete_paths=True, deterministic_pol=True, pol_state_dict=pol_state_dict) gt.stamp('remote evaluation submit') for _ in range(self.num_train_loops_per_epoch): new_expl_paths = self.expl_data_collector.collect_new_paths( self.trainer.policy, self.max_path_length, self.num_expl_steps_per_train_loop, discard_incomplete_paths=False, optimistic_exploration=self.optimistic_exp_hp['should_use'], optimistic_exploration_kwargs=dict( policy=self.trainer.policy, qfs=[self.trainer.qf1, self.trainer.qf2], hyper_params=self.optimistic_exp_hp ) ) gt.stamp('exploration sampling', unique=False) self.replay_buffer.add_paths(new_expl_paths) gt.stamp('data storing', unique=False) for _ in range(self.num_trains_per_train_loop): train_data = self.replay_buffer.random_batch( self.batch_size) self.trainer.train(train_data) gt.stamp('training', unique=False) # Wait for eval to finish ray.get([remote_eval_obj_id]) gt.stamp('remote evaluation wait') self._end_epoch(epoch) def _end_epoch(self, epoch): self._log_stats(epoch) self.expl_data_collector.end_epoch(epoch) ray.get([self.remote_eval_data_collector.end_epoch.remote(epoch)]) self.replay_buffer.end_epoch(epoch) self.trainer.end_epoch(epoch) # We can only save the state of the program # after we call end epoch on all objects with internal state. # This is so that restoring from the saved state will # lead to identical result as if the program was left running. if epoch > 0: snapshot = self._get_snapshot(epoch) logger.save_itr_params(epoch, snapshot) gt.stamp('saving') logger.record_dict(_get_epoch_timings()) logger.record_tabular('Epoch', epoch) write_header = True if epoch == 0 else False logger.dump_tabular(with_prefix=False, with_timestamp=False, write_header=write_header) def _get_snapshot(self, epoch): snapshot = dict( trainer=self.trainer.get_snapshot(), exploration=self.expl_data_collector.get_snapshot(), evaluation_remote=ray.get( self.remote_eval_data_collector.get_snapshot.remote()), evaluation_remote_rng_state=ray.get( self.remote_eval_data_collector.get_global_pkg_rng_state.remote() ), replay_buffer=self.replay_buffer.get_snapshot() ) # What epoch indicates is that at the end of this epoch, # The state of the program is snapshot # Not to be consfused with at the beginning of the epoch snapshot['epoch'] = epoch # Save the state of various rng snapshot['global_pkg_rng_state'] = get_global_pkg_rng_state() return snapshot def _log_stats(self, epoch): logger.log("Epoch {} finished".format(epoch), with_timestamp=True) """ Replay Buffer """ logger.record_dict( self.replay_buffer.get_diagnostics(), prefix='replay_buffer/' ) """ Trainer """ logger.record_dict(self.trainer.get_diagnostics(), prefix='trainer/') """ Exploration """ logger.record_dict( self.expl_data_collector.get_diagnostics(), prefix='exploration/' ) expl_paths = self.expl_data_collector.get_epoch_paths() logger.record_dict( eval_util.get_generic_path_information(expl_paths), prefix="exploration/", ) """ Remote Evaluation """ logger.record_dict( ray.get(self.remote_eval_data_collector.get_diagnostics.remote()), prefix='remote_evaluation/', ) remote_eval_paths = ray.get( self.remote_eval_data_collector.get_epoch_paths.remote()) logger.record_dict( eval_util.get_generic_path_information(remote_eval_paths), prefix="remote_evaluation/", ) """ Misc """ gt.stamp('logging') def to(self, device): for net in self.trainer.networks: net.to(device) def _get_epoch_timings(): times_itrs = gt.get_times().stamps.itrs times = OrderedDict() epoch_time = 0 for key in sorted(times_itrs): time = times_itrs[key][-1] epoch_time += time times['time/{} (s)'.format(key)] = time times['time/epoch (s)'] = epoch_time times['time/total (s)'] = gt.get_times().total return times
the-stack_0_5950
from cvxopt import matrix from cvxopt.lapack import syev import numpy as np class LatentPCA: """ Structured Extension for Principle Component Analysis. Written by Nico Goernitz, TU Berlin, 2014 """ def __init__(self, sobj): self.sobj = sobj # structured object self.sol = None # (vector) solution vector (after training, of course) self.latent = None def fit(self, max_iter=50): """ Solve the optimization problem with a sequential convex programming/DC-programming approach: Iteratively, find the most likely configuration of the latent variables and then, optimize for the model parameter using fixed latent states. """ samples = self.sobj.get_num_samples() dims = self.sobj.get_num_dims() self.latent = np.random.randint(0, self.sobj.get_num_states(), samples) self.sol = np.random.randn(dims) psi = np.zeros((dims, samples)) old_psi = np.zeros((dims, samples)) threshold = 0. iter = 0 # terminate if objective function value doesn't change much while iter < max_iter and (iter < 2 or np.sum(np.abs(psi-old_psi)) >= 0.001): print('Starting iteration {0}.'.format(iter)) print(np.sum(np.abs(psi-old_psi))) iter += 1 old_psi = psi.copy() # 1. linearize # for the current solution compute the # most likely latent variable configuration mean = np.zeros(dims) for i in range(samples): _, self.latent[i], psi[:, i] = self.sobj.argmax(self.sol, i) mean += psi[:, i] mean /= np.float(samples) mpsi = psi - mean.reshape((dims, 1)) # 2. solve the intermediate convex optimization problem A = mpsi.dot(mpsi.T) W = np.zeros((dims, dims)) syev(matrix(A), matrix(W), jobz='V') self.sol = np.array(A[:, dims-1]).ravel() return self.sol, self.latent, threshold def apply(self, pred_sobj): """ Application of the StructuredPCA: score = max_z <sol*,\Psi(x,z)> latent_state = argmax_z <sol*,\Psi(x,z)> """ samples = pred_sobj.get_num_samples() vals = np.zeros(samples) structs = [] for i in range(samples): vals[i], struct, _ = pred_sobj.argmax(self.sol, i) structs.append(struct) return vals, structs
the-stack_0_5951
# -*- coding: utf-8 -*- from brawlpython.sessions import SyncSession from brawlpython.api_toolkit import unique, same from configobj import ConfigObj import pytest import time url_uuid = "http://httpbin.org/uuid" config = ConfigObj("config.ini") api_key = config["DEFAULT"].get("API_KEY") @pytest.yield_fixture def factory(): client = None def maker(*args, **kwargs): nonlocal client client = SyncSession(*args, **kwargs) return client yield maker if client is not None: client.close() @pytest.yield_fixture def client(factory): return factory(api_key, cache_ttl=1) def test_sync_init(): client = SyncSession(api_key) assert isinstance(client, SyncSession) def test_closing(client): assert not client.closed for _ in 1, 2: client.close() assert client.closed def test_cache(client): responses = [client.get(url_uuid) for _ in range(2)] assert same(responses) time.sleep(2) assert client.get(url_uuid) != responses[0] def test_no_cache(factory): client = factory(api_key, use_cache=False) assert unique([client.get(url_uuid) for _ in range(2)]) assert unique(client.gets([url_uuid] * 2)) if __name__ == "__main__": import run_tests run_tests.run(__file__)
the-stack_0_5953
# -*- coding: utf-8 -*- """ String formatting functionality for some primitive types. We do this since it depends on several object implementations at once (e.g. Buffer and String), which themselves need say, integers. """ from __future__ import print_function, division, absolute_import import math import flypy.types from flypy import jit #===------------------------------------------------------------------=== # Formatters #===------------------------------------------------------------------=== @jit def sprintf(buf, fmt, x): """ Print `x` to `buf` according to `format`. Returns the number of characters written. """ fmt = flypy.runtime.obj.core.as_cstring(fmt) n = len(buf) result = flypy.libc.snprintf(buf.pointer(), n, fmt, x) #if result >= n: # raise ValueError("Unable to print to buffer:") return result + 1 # n bytes + '\0 @jit def format_static(fmt, x, n): """ Format 'x' according to 'fmt' using a static buffer size 'n'. - upcast to a double - use snprintf - resize buffer according to # of bytes written """ buf = flypy.runtime.obj.core.newbuffer(flypy.types.char, n) n = sprintf(buf, fmt, x) buf.resize(n) return flypy.types.String(buf)
the-stack_0_5957
# -*- coding: utf-8 -*- from django.conf import settings from django.conf.urls.defaults import patterns, include, url from django.core.urlresolvers import reverse from django.test import TestCase, Client from ....cart.app import cart_app from ....cart.models import Cart, CART_SESSION_KEY from ....delivery.tests import TestDeliveryProvider from ....order import handler as order_handler from ....order.models import Order from ....payment import ConfirmationFormNeeded from ....payment.tests import TestPaymentProvider from ....product.tests import DeadParrot from ..common.views import prepare_order, confirmation from . import views urlpatterns = patterns('', url(r'^cart/', include(cart_app.urls)), url(r'^checkout/', include('satchless.contrib.checkout.singlestep.urls')), ) class TestPaymentProviderWithConfirmation(TestPaymentProvider): def confirm(self, order): raise ConfirmationFormNeeded(action='http://test.payment.gateway.example.com') class CheckoutTest(TestCase): urls = 'satchless.contrib.checkout.singlestep.tests' def _setup_settings(self, custom_settings): original_settings = {} for setting_name, value in custom_settings.items(): if hasattr(settings, setting_name): original_settings[setting_name] = getattr(settings, setting_name) setattr(settings, setting_name, value) return original_settings def _teardown_settings(self, original_settings, custom_settings=None): custom_settings = custom_settings or {} for setting_name, value in custom_settings.items(): if setting_name in original_settings: setattr(settings, setting_name, value) else: delattr(settings, setting_name) def setUp(self): self.parrot = DeadParrot.objects.create(slug='parrot', species="Hyacinth Macaw") self.dead_parrot = self.parrot.variants.create(color='blue', looks_alive=False) self.custom_settings = { 'SATCHLESS_DELIVERY_PROVIDERS': [TestDeliveryProvider], 'SATCHLESS_PAYMENT_PROVIDERS': [TestPaymentProviderWithConfirmation], } self.original_settings = self._setup_settings(self.custom_settings) order_handler.init_queues() self.anon_client = Client() def tearDown(self): self._teardown_settings(self.original_settings, self.custom_settings) order_handler.init_queues() def _test_status(self, url, method='get', *args, **kwargs): status_code = kwargs.pop('status_code', 200) client = kwargs.pop('client_instance', Client()) data = kwargs.pop('data', {}) response = getattr(client, method)(url, data=data) self.assertEqual(response.status_code, status_code, 'Incorrect status code for: %s, (%s, %s)! Expected: %s, received: %s. HTML:\n\n%s' % ( url.decode('utf-8'), args, kwargs, status_code, response.status_code, response.content.decode('utf-8'))) return response def _get_or_create_cart_for_client(self, client, typ='satchless_cart'): self._test_status(reverse('satchless-cart-view'), client_instance=client) return Cart.objects.get(pk=client.session[CART_SESSION_KEY % typ], typ=typ) def _get_or_create_order_for_client(self, client): self._test_status(reverse(prepare_order), method='post', client_instance=client, status_code=302) order_pk = client.session.get('satchless_order', None) return Order.objects.get(pk=order_pk) def _get_order_items(self, order): order_items = set() for group in order.groups.all(): order_items.update(group.items.values_list('product_variant', 'quantity')) return order_items def test_checkout_view_passes_with_correct_data(self): cart = self._get_or_create_cart_for_client(self.anon_client) cart.replace_item(self.dead_parrot, 1) order = self._get_or_create_order_for_client(self.anon_client) response = self._test_status(reverse(views.checkout, kwargs={'order_token': order.token}), client_instance=self.anon_client, data={'email': '[email protected]'}) dg = response.context['delivery_group_forms'] data = {'billing_first_name': 'First', 'billing_last_name': 'Last', 'billing_street_address_1': 'Via Rodeo 1', 'billing_city': 'Beverly Hills', 'billing_country': 'US', 'billing_country_area': 'AZ', 'billing_phone': '555-555-5555', 'billing_postal_code': '90210'} for g, typ, form in dg: data[form.add_prefix('email')] = '[email protected]' response = self._test_status(reverse(views.checkout, kwargs={'order_token': order.token}), client_instance=self.anon_client, status_code=302, method='post', data=data, follow=True) order = Order.objects.get(pk=order.pk) self.assertRedirects(response, reverse(confirmation, kwargs={'order_token': order.token})) self.assertEqual(order.status, 'payment-pending') def test_confirmation_view_redirects_when_order_or_payment_is_missing(self): cart = self._get_or_create_cart_for_client(self.anon_client) cart.replace_item(self.dead_parrot, 1) order = self._get_or_create_order_for_client(self.anon_client) # without payment self._test_status(reverse(confirmation, kwargs={'order_token': order.token}), client_instance=self.anon_client, status_code=302) # finish checkout view response = self._test_status(reverse(views.checkout, kwargs={'order_token': order.token}), client_instance=self.anon_client, data={'email': '[email protected]'}) dg = response.context['delivery_group_forms'] data = {'billing_first_name': 'First', 'billing_last_name': 'Last', 'billing_street_address_1': 'Via Rodeo 1', 'billing_city': 'Beverly Hills', 'billing_country': 'US', 'billing_country_area': 'AZ', 'billing_phone': '555-555-5555', 'billing_postal_code': '90210'} for g, typ, form in dg: data[form.add_prefix('email')] = '[email protected]' response = self._test_status(reverse(views.checkout, kwargs={'order_token': order.token}), client_instance=self.anon_client, status_code=302, method='post', data=data, follow=True) self._test_status(reverse(confirmation, kwargs={'order_token': order.token}), client_instance=self.anon_client, status_code=200)
the-stack_0_5958
from rest_framework import serializers from data_ocean.models import Status, Authority, TaxpayerType, Register class StatusSerializer(serializers.ModelSerializer): class Meta: model = Status fields = ['name'] class AuthoritySerializer(serializers.ModelSerializer): class Meta: model = Authority fields = ['id', 'name', 'code'] class TaxpayerTypeSerializer(serializers.ModelSerializer): name = serializers.CharField(max_length=200) class Meta: model = TaxpayerType fields = ['id', 'name'] class RegisterSerializer(serializers.ModelSerializer): status = serializers.CharField(source='get_status_display') class Meta: model = Register fields = [ 'id', 'name', 'name_eng', 'source_name', 'source_url_address', 'source_api_address', 'source_register_id', 'api_list', 'api_detail', 'status', 'total_records', 'updated_at' ]
the-stack_0_5960
# -*- coding: utf-8 -*- # From https://github.com/wiseodd/hipsternet/blob/master/hipsternet/im2col.py import numpy as np def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1): # First figure out what the size of the output should be N, C, H, W = x_shape assert (H + 2 * padding - field_height) % stride == 0 assert (W + 2 * padding - field_height) % stride == 0 out_height = int((H + 2 * padding - field_height) / stride + 1) out_width = int((W + 2 * padding - field_width) / stride + 1) i0 = np.repeat(np.arange(field_height), field_width) i0 = np.tile(i0, C) i1 = stride * np.repeat(np.arange(out_height), out_width) j0 = np.tile(np.arange(field_width), field_height * C) j1 = stride * np.tile(np.arange(out_width), out_height) i = i0.reshape(-1, 1) + i1.reshape(1, -1) j = j0.reshape(-1, 1) + j1.reshape(1, -1) k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1) return k.astype(int), i.astype(int), j.astype(int) def im2col_indices(x, field_height, field_width, padding=1, stride=1): """ An implementation of im2col based on some fancy indexing """ # Zero-pad the input p = padding x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant') k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding, stride) cols = x_padded[:, k, i, j] C = x.shape[1] cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1) return cols def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1, stride=1): """ An implementation of col2im based on fancy indexing and np.add.at """ N, C, H, W = x_shape H_padded, W_padded = H + 2 * padding, W + 2 * padding x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype) k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding, stride) cols_reshaped = cols.reshape(C * field_height * field_width, -1, N) cols_reshaped = cols_reshaped.transpose(2, 0, 1) np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped) if padding == 0: return x_padded return x_padded[:, :, padding:-padding, padding:-padding]
the-stack_0_5962
#!/usr/bin/env python3 # coding:utf-8 import email message = open("email.txt", "rb").read().decode() # 将本题注释的所有内容保存为 email.txt mail = email.message_from_string(message) audio = mail.get_payload(0).get_payload(decode=True) f = open("indian.wav", "wb") # 音频内容:sorry f.write(audio) f.close()
the-stack_0_5964
from zzcore import StdAns import requests import sxtwl from datetime import datetime from config import HFWEATHERKEY class Ans(StdAns): def GETMSG(self): msg = f'早上好,今天是{calendar()}\n\n' msg += getWeather() + '\n\n' # t = requests.get('https://v1.hitokoto.cn/?c=k&encode=text').text t =("只要不失去你的崇高,整个世界都会向你敞开") msg += t return msg def getWeather(id='101120206'): def wemoji(text): if '雪' in text: return text + '🌨' if '雨' in text: return text + '🌧️' if '阴' in text: return text + '⛅' if '云' in text: return text + '🌤' if '晴' in text: return text + '☀️' return text url = 'https://devapi.heweather.net/v7/weather/3d' params = { 'location': id, 'key': HFWEATHERKEY, } r = requests.get(url=url, params=params).json() tdw = r['daily'][0] # ndw = r['daily'][1] # weather = f"今日日间{wemoji(tdw['textDay'])},温度{tdw['tempMin']}~{tdw['tempMax']}℃,{tdw['windDirDay']}{tdw['windScaleDay']}级;夜间{wemoji(tdw['textNight'])},{tdw['windDirNight']}{tdw['windScaleNight']}级。明日日间{wemoji(ndw['textDay'])},温度{ndw['tempMin']}~{ndw['tempMax']}℃。" weather = f"今日日间{wemoji(tdw['textDay'])},温度{tdw['tempMin']}~{tdw['tempMax']}℃,{tdw['windDirDay']}{tdw['windScaleDay']}级;夜间{wemoji(tdw['textNight'])},{tdw['windDirNight']}{tdw['windScaleNight']}级。" if float(tdw['precip']) > 0: weather += '\n记得收好衣服,出门带伞~' return weather def calendar(): # 可选 教学、寒假、暑假 等 NowStatus = "暑假" # 开始周次是今年的第几周 StartWeek = 28 # 今年考研开始日期 KaoYanDate = datetime(2021, 12, 21) ymc = ["冬", "腊", "正", "二", "三", "四", "五", "六", "七", "八", "九", "十"] rmc = ["初一", "初二", "初三", "初四", "初五", "初六", "初七", "初八", "初九", "初十", "十一", "十二", "十三", "十四", "十五", "十六", "十七", "十八", "十九", "二十", "廿一", "廿二", "廿三", "廿四", "廿五", "廿六", "廿七", "廿八", "廿九", "三十", "卅一"] zmc = ["一", "二", "三", "四", "五", "六", "天"] nowdate = datetime.now() djs = (KaoYanDate - nowdate).days -1 y = nowdate.year m = nowdate.month d = nowdate.day zc = int(nowdate.strftime("%W")) - StartWeek z = zmc[nowdate.weekday()] lunar = sxtwl.Lunar() lunarday = lunar.getDayBySolar(y, m, d) lunardaychinese = f"{ymc[lunarday.Lmc]}月{rmc[lunarday.Ldi]}" if lunarday.Lleap: lunardaychinese = "闰" + lunardaychinese cal = f"{m}月{d}日,{lunardaychinese},{NowStatus}第{zc}周,星期{z}\n\n距离 2022 考研还有 {djs} 天" return cal
the-stack_0_5966
from mldesigner import command_component from azure.ai.ml.entities._job.resource_configuration import ResourceConfiguration resources = ResourceConfiguration() resources.instance_count = 2 @command_component(resources = resources) def basic_component( port1: str, param1: int, ): """ module run logic goes here """ return port1
the-stack_0_5968
from typing import Any, List, Union, Optional, Dict import gym import numpy as np import pettingzoo from functools import reduce from ding.envs import BaseEnv, BaseEnvTimestep, FrameStackWrapper from ding.torch_utils import to_ndarray, to_list from ding.envs.common.common_function import affine_transform from ding.utils import ENV_REGISTRY, import_module @ENV_REGISTRY.register('petting_zoo') class PettingZooEnv(BaseEnv): # Now only supports simple_spread_v2. # All agents' observations should have the same shape. def __init__(self, cfg: dict) -> None: self._cfg = cfg self._init_flag = False self._replay_path = None self._env_family = self._cfg.env_family self._env_id = self._cfg.env_id # self._num_agents = self._cfg.n_agent self._num_landmarks = self._cfg.n_landmark self._continuous_actions = self._cfg.get('continuous_actions', False) self._max_cycles = self._cfg.get('max_cycles', 25) self._act_scale = self._cfg.get('act_scale', False) self._agent_specific_global_state = self._cfg.get('agent_specific_global_state', False) if self._act_scale: assert self._continuous_actions, 'Only continuous action space env needs act_scale' def reset(self) -> np.ndarray: if not self._init_flag: import_module(['pettingzoo.{}.{}'.format(self._env_family, self._env_id)]) self._env = pettingzoo.__dict__[self._env_family].__dict__[self._env_id].parallel_env( N=self._cfg.n_agent, continuous_actions=self._continuous_actions, max_cycles=self._max_cycles ) if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed: np_seed = 100 * np.random.randint(1, 1000) self._env.seed(self._seed + np_seed) elif hasattr(self, '_seed'): self._env.seed(self._seed) if self._replay_path is not None: self._env = gym.wrappers.Monitor( self._env, self._replay_path, video_callable=lambda episode_id: True, force=True ) obs = self._env.reset() if not self._init_flag: # Because agents cannot be accessed before reset self._agents = self._env.agents self._num_agents = len(self._env.agents) self._action_space = gym.spaces.Dict({agent: self._env.action_space(agent) for agent in self._agents}) single_agent_obs_space = self._env.action_space(self._agents[0]) if isinstance(single_agent_obs_space, gym.spaces.Box): self._action_dim = single_agent_obs_space.shape elif isinstance(single_agent_obs_space, gym.spaces.Discrete): self._action_dim = (single_agent_obs_space.n, ) else: raise Exception('Only support `Box` or `Discrte` obs space for single agent.') # only for env 'simple_spread_v2', n_agent = 5 # now only for the case that each agent in the team have the same obs structure and corresponding shape. if not self._cfg.agent_obs_only: self._observation_space = gym.spaces.Dict({ 'agent_state': gym.spaces.Box( low=float("-inf"), high=float("inf"), shape=(self._num_agents, self._env.observation_space('agent_0').shape[0]), # (self._num_agents, 30) dtype=np.float32 ) , 'global_state': gym.spaces.Box( low=float("-inf"), high=float("inf"), shape=(70,), dtype=np.float32 ), 'agent_alone_state': gym.spaces.Box( low=float("-inf"), high=float("inf"), shape=(self._num_agents, 22), dtype=np.float32 ), 'agent_alone_padding_state': gym.spaces.Box( low=float("-inf"), high=float("inf"), shape=(self._num_agents, self._env.observation_space('agent_0').shape[0]), # (self._num_agents, 30) dtype=np.float32 ), 'action_mask': gym.spaces.Box( low=float("-inf"), high=float("inf"), shape=(self._num_agents, self._action_dim[0]), # (self._num_agents, 5) dtype=np.float32 )}) # whether use agent_specific_global_state. It is usually used in AC multiagent algos, e.g., mappo, masac, etc. if self._agent_specific_global_state: agent_specifig_global_state = gym.spaces.Box( low = float("-inf"), high = float("inf"), shape = (self._num_agents, self._env.observation_space('agent_0').shape[0] + 70), dtype = np.float32 ) self._observation_space['global_state'] = agent_specifig_global_state else: # for case when env.agent_obs_only=True self._observation_space = gym.spaces.Box( low=float("-inf"), high=float("inf"), shape=(self._num_agents, self._env.observation_space('agent_0').shape[0]), # (self._num_agents, 30) dtype=np.float32 ) self._reward_space = gym.spaces.Dict( { agent: gym.spaces.Box(low=float("-inf"), high=float("inf"), shape=(1, ), dtype=np.float32) for agent in self._agents } ) self._init_flag = True # self._final_eval_reward = {agent: 0. for agent in self._agents} self._final_eval_reward = 0. self._step_count = 0 obs_n = self._process_obs(obs) return obs_n def close(self) -> None: if self._init_flag: self._env.close() self._init_flag = False def render(self) -> None: self._env.render() def seed(self, seed: int, dynamic_seed: bool = True) -> None: self._seed = seed self._dynamic_seed = dynamic_seed np.random.seed(self._seed) def step(self, action: np.ndarray) -> BaseEnvTimestep: self._step_count += 1 assert isinstance(action, np.ndarray), type(action) action = self._process_action(action) if self._act_scale: for agent in self._agents: # print(action[agent]) # print(self.action_space[agent]) # print(self.action_space[agent].low, self.action_space[agent].high) action[agent] = affine_transform( action[agent], min_val=self.action_space[agent].low, max_val=self.action_space[agent].high ) obs, rew, done, info = self._env.step(action) obs_n = self._process_obs(obs) rew_n = np.array([sum([rew[agent] for agent in self._agents])]) # collide_sum = 0 # for i in range(self._num_agents): # collide_sum += info['n'][i][1] # collide_penalty = self._cfg.get('collide_penal', self._num_agent) # rew_n += collide_sum * (1.0 - collide_penalty) # rew_n = rew_n / (self._cfg.get('max_cycles', 25) * self._num_agent) self._final_eval_reward += rew_n # occupied_landmarks = info['n'][0][3] # if self._step_count >= self._max_step or occupied_landmarks >= self._n_agent \ # or occupied_landmarks >= self._num_landmarks: # done_n = True # else: # done_n = False done_n = reduce(lambda x, y: x and y, done.values()) or self._step_count >= self._max_cycles # for agent in self._agents: # self._final_eval_reward[agent] += rew[agent] if done_n: # or reduce(lambda x, y: x and y, done.values()) info['final_eval_reward'] = self._final_eval_reward # for agent in rew: # rew[agent] = to_ndarray([rew[agent]]) return BaseEnvTimestep(obs_n, rew_n, done_n, info) def enable_save_replay(self, replay_path: Optional[str] = None) -> None: if replay_path is None: replay_path = './video' self._replay_path = replay_path def _process_obs(self, obs: 'torch.Tensor') -> np.ndarray: # noqa obs = np.array([obs[agent] for agent in self._agents]).astype(np.float32) if self._cfg.get('agent_obs_only', False): return obs ret = {} # Raw agent observation structure is -- # [self_vel, self_pos, landmark_rel_positions, other_agent_rel_positions, communication] # where `communication` are signals from other agents (two for each agent in `simple_spread_v2`` env) # agent_state: Shape (n_agent, 2 + 2 + n_landmark * 2 + (n_agent - 1) * 2 + (n_agent - 1) * 2). # Stacked observation. Contains # - agent itself's state(velocity + position) # - position of items that the agent can observe(e.g. other agents, landmarks) # - communication ret['agent_state'] = obs # global_state: Shape (n_agent * (2 + 2) + n_landmark * 2 + n_agent * (n_agent - 1) * 2, ). # 1-dim vector. Contains # - all agents' state(velocity + position) + # - all landmarks' position + # - all agents' communication ret['global_state'] = np.concatenate( [ obs[0, 2:-(self._num_agents - 1) * 2], # all agents' position + all landmarks' position obs[:, 0:2].flatten(), # all agents' velocity obs[:, -(self._num_agents - 1) * 2:].flatten() # all agents' communication ] ) # agent_specific_global_state: Shape (n_agent, 2 + 2 + n_landmark * 2 + (n_agent - 1) * 2 + (n_agent - 1) * 2 + n_agent * (2 + 2) + n_landmark * 2 + n_agent * (n_agent - 1) * 2). # 2-dim vector. contains # - agent_state info # - global_state info if self._agent_specific_global_state: ret['global_state'] = np.concatenate( [ ret['agent_state'], np.expand_dims(ret['global_state'], axis=0).repeat(5, axis=0) ], axis=1 ) # agent_alone_state: Shape (n_agent, 2 + 2 + n_landmark * 2 + (n_agent - 1) * 2). # Stacked observation. Exclude other agents' positions from agent_state. Contains # - agent itself's state(velocity + position) + # - landmarks' positions (do not include other agents' positions) # - communication ret['agent_alone_state'] = np.concatenate( [ obs[:, 0:(4 + self._num_agents * 2)], # agent itself's state + landmarks' position obs[:, -(self._num_agents - 1) * 2:], # communication ], 1 ) # agent_alone_padding_state: Shape (n_agent, 2 + 2 + n_landmark * 2 + (n_agent - 1) * 2 + (n_agent - 1) * 2). # Contains the same information as agent_alone_state; # But 0-padding other agents' positions. ret['agent_alone_padding_state'] = np.concatenate( [ obs[:, 0:(4 + self._num_agents * 2)], # agent itself's state + landmarks' position np.zeros((self._num_agents, (self._num_agents - 1) * 2), np.float32), # Other agents' position(0-padding) obs[:, -(self._num_agents - 1) * 2:] # communication ], 1 ) # action_mask: All actions are of use(either 1 for discrete or 5 for continuous). Thus all 1. ret['action_mask'] = np.ones((self._num_agents, *self._action_dim)) return ret def _process_action(self, action: 'torch.Tensor') -> Dict[str, np.ndarray]: # noqa dict_action = {} for i, agent in enumerate(self._agents): agent_action = action[i] if agent_action.shape == (1, ): agent_action = agent_action.squeeze() # 0-dim array dict_action[agent] = agent_action return dict_action def random_action(self) -> np.ndarray: random_action = self.action_space.sample() for k in random_action: if isinstance(random_action[k], np.ndarray): pass elif isinstance(random_action[k], int): random_action[k] = to_ndarray([random_action[k]], dtype=np.int64) return random_action def __repr__(self) -> str: return "DI-engine PettingZoo Env" @property def agents(self) -> List[str]: return self._agents @property def observation_space(self) -> gym.spaces.Space: return self._observation_space @property def action_space(self) -> gym.spaces.Space: return self._action_space @property def reward_space(self) -> gym.spaces.Space: return self._reward_space
the-stack_0_5970
import copy from datetime import datetime import threading import uuid from optuna import distributions # NOQA from optuna.exceptions import DuplicatedStudyError from optuna.storages import base from optuna.storages.base import DEFAULT_STUDY_NAME_PREFIX from optuna.study import StudyDirection from optuna.study import StudySummary from optuna.trial import FrozenTrial from optuna.trial import TrialState from optuna import type_checking if type_checking.TYPE_CHECKING: from typing import Any # NOQA from typing import Dict # NOQA from typing import List # NOQA from typing import Optional # NOQA from typing import Tuple # NOQA class InMemoryStorage(base.BaseStorage): """Storage class that stores data in memory of the Python process. This class is not supposed to be directly accessed by library users. """ def __init__(self): # type: () -> None self._trial_id_to_study_id_and_number = {} # type: Dict[int, Tuple[int, int]] self._study_name_to_id = {} # type: Dict[str, int] self._studies = {} # type: Dict[int, _StudyInfo] self._max_study_id = -1 self._max_trial_id = -1 self._lock = threading.RLock() def __getstate__(self): # type: () -> Dict[Any, Any] state = self.__dict__.copy() del state["_lock"] return state def __setstate__(self, state): # type: (Dict[Any, Any]) -> None self.__dict__.update(state) self._lock = threading.RLock() def create_new_study(self, study_name=None): # type: (Optional[str]) -> int with self._lock: study_id = self._max_study_id + 1 self._max_study_id += 1 if study_name is not None: if study_name in self._study_name_to_id: raise DuplicatedStudyError else: study_uuid = str(uuid.uuid4()) study_name = DEFAULT_STUDY_NAME_PREFIX + study_uuid self._studies[study_id] = _StudyInfo(study_name) self._study_name_to_id[study_name] = study_id return study_id def delete_study(self, study_id): # type: (int) -> None with self._lock: self._check_study_id(study_id) for trial in self._studies[study_id].trials: del self._trial_id_to_study_id_and_number[trial._trial_id] study_name = self._studies[study_id].name del self._study_name_to_id[study_name] del self._studies[study_id] def set_study_direction(self, study_id, direction): # type: (int, StudyDirection) -> None with self._lock: self._check_study_id(study_id) study = self._studies[study_id] if study.direction != StudyDirection.NOT_SET and study.direction != direction: raise ValueError( "Cannot overwrite study direction from {} to {}.".format( study.direction, direction ) ) study.direction = direction def set_study_user_attr(self, study_id, key, value): # type: (int, str, Any) -> None with self._lock: self._check_study_id(study_id) self._studies[study_id].user_attrs[key] = value def set_study_system_attr(self, study_id, key, value): # type: (int, str, Any) -> None with self._lock: self._check_study_id(study_id) self._studies[study_id].system_attrs[key] = value def get_study_id_from_name(self, study_name): # type: (str) -> int with self._lock: if study_name not in self._study_name_to_id: raise KeyError("No such study {}.".format(study_name)) return self._study_name_to_id[study_name] def get_study_id_from_trial_id(self, trial_id): # type: (int) -> int with self._lock: self._check_trial_id(trial_id) return self._trial_id_to_study_id_and_number[trial_id][0] def get_study_name_from_id(self, study_id): # type: (int) -> str with self._lock: self._check_study_id(study_id) return self._studies[study_id].name def get_study_direction(self, study_id): # type: (int) -> StudyDirection with self._lock: self._check_study_id(study_id) return self._studies[study_id].direction def get_study_user_attrs(self, study_id): # type: (int) -> Dict[str, Any] with self._lock: self._check_study_id(study_id) return self._studies[study_id].user_attrs def get_study_system_attrs(self, study_id): # type: (int) -> Dict[str, Any] with self._lock: self._check_study_id(study_id) return self._studies[study_id].system_attrs def get_all_study_summaries(self): # type: () -> List[StudySummary] with self._lock: return [self._build_study_summary(study_id) for study_id in self._studies.keys()] def _build_study_summary(self, study_id: int) -> StudySummary: study = self._studies[study_id] return StudySummary( study_name=study.name, direction=study.direction, best_trial=copy.deepcopy(self._get_trial(study.best_trial_id)) if study.best_trial_id is not None else None, user_attrs=copy.deepcopy(study.user_attrs), system_attrs=copy.deepcopy(study.system_attrs), n_trials=len(study.trials), datetime_start=min( [trial.datetime_start for trial in self.get_all_trials(study_id, deepcopy=False)] ) if study.trials else None, study_id=study_id, ) def create_new_trial(self, study_id, template_trial=None): # type: (int, Optional[FrozenTrial]) -> int with self._lock: self._check_study_id(study_id) if template_trial is None: trial = self._create_running_trial() else: trial = copy.deepcopy(template_trial) trial_id = self._max_trial_id + 1 self._max_trial_id += 1 trial.number = len(self._studies[study_id].trials) trial._trial_id = trial_id self._trial_id_to_study_id_and_number[trial_id] = (study_id, trial.number) self._studies[study_id].trials.append(trial) self._update_cache(trial_id, study_id) return trial_id @staticmethod def _create_running_trial(): # type: () -> FrozenTrial return FrozenTrial( trial_id=-1, # dummy value. number=-1, # dummy value. state=TrialState.RUNNING, params={}, distributions={}, user_attrs={}, system_attrs={}, value=None, intermediate_values={}, datetime_start=datetime.now(), datetime_complete=None, ) def set_trial_state(self, trial_id, state): # type: (int, TrialState) -> bool with self._lock: trial = self._get_trial(trial_id) self.check_trial_is_updatable(trial_id, trial.state) trial = copy.copy(trial) self.check_trial_is_updatable(trial_id, trial.state) if state == TrialState.RUNNING and trial.state != TrialState.WAITING: return False trial.state = state if state.is_finished(): trial.datetime_complete = datetime.now() self._set_trial(trial_id, trial) study_id = self._trial_id_to_study_id_and_number[trial_id][0] self._update_cache(trial_id, study_id) else: self._set_trial(trial_id, trial) return True def set_trial_param(self, trial_id, param_name, param_value_internal, distribution): # type: (int, str, float, distributions.BaseDistribution) -> bool with self._lock: trial = self._get_trial(trial_id) self.check_trial_is_updatable(trial_id, trial.state) study_id = self._trial_id_to_study_id_and_number[trial_id][0] # Check param distribution compatibility with previous trial(s). if param_name in self._studies[study_id].param_distribution: distributions.check_distribution_compatibility( self._studies[study_id].param_distribution[param_name], distribution ) # Check param has not been set; otherwise, return False. if param_name in trial.params: return False # Set param distribution. self._studies[study_id].param_distribution[param_name] = distribution # Set param. trial = copy.copy(trial) trial.params = copy.copy(trial.params) trial.params[param_name] = distribution.to_external_repr(param_value_internal) trial.distributions = copy.copy(trial.distributions) trial.distributions[param_name] = distribution self._set_trial(trial_id, trial) return True def get_trial_number_from_id(self, trial_id): # type: (int) -> int with self._lock: self._check_trial_id(trial_id) return self._trial_id_to_study_id_and_number[trial_id][1] def get_best_trial(self, study_id): # type: (int) -> FrozenTrial with self._lock: self._check_study_id(study_id) best_trial_id = self._studies[study_id].best_trial_id if best_trial_id is None: raise ValueError("No trials are completed yet.") return self.get_trial(best_trial_id) def get_trial_param(self, trial_id, param_name): # type: (int, str) -> float with self._lock: trial = self._get_trial(trial_id) distribution = trial.distributions[param_name] return distribution.to_internal_repr(trial.params[param_name]) def set_trial_value(self, trial_id, value): # type: (int, float) -> None with self._lock: trial = self._get_trial(trial_id) self.check_trial_is_updatable(trial_id, trial.state) trial = copy.copy(trial) self.check_trial_is_updatable(trial_id, trial.state) trial.value = value self._set_trial(trial_id, trial) def _update_cache(self, trial_id: int, study_id: int) -> None: trial = self._get_trial(trial_id) if trial.state != TrialState.COMPLETE: return best_trial_id = self._studies[study_id].best_trial_id if best_trial_id is None: self._studies[study_id].best_trial_id = trial_id return best_trial = self._get_trial(best_trial_id) assert best_trial is not None best_value = best_trial.value new_value = trial.value if best_value is None: self._studies[study_id].best_trial_id = trial_id return # Complete trials do not have `None` values. assert new_value is not None if self.get_study_direction(study_id) == StudyDirection.MAXIMIZE: if best_value < new_value: self._studies[study_id].best_trial_id = trial_id else: if best_value > new_value: self._studies[study_id].best_trial_id = trial_id def set_trial_intermediate_value(self, trial_id, step, intermediate_value): # type: (int, int, float) -> bool with self._lock: trial = self._get_trial(trial_id) self.check_trial_is_updatable(trial_id, trial.state) self.check_trial_is_updatable(trial_id, trial.state) trial = copy.copy(trial) values = copy.copy(trial.intermediate_values) if step in values: return False values[step] = intermediate_value trial.intermediate_values = values self._set_trial(trial_id, trial) return True def set_trial_user_attr(self, trial_id, key, value): # type: (int, str, Any) -> None with self._lock: self._check_trial_id(trial_id) trial = self._get_trial(trial_id) self.check_trial_is_updatable(trial_id, trial.state) self.check_trial_is_updatable(trial_id, trial.state) trial = copy.copy(trial) trial.user_attrs = copy.copy(trial.user_attrs) trial.user_attrs[key] = value self._set_trial(trial_id, trial) def set_trial_system_attr(self, trial_id, key, value): # type: (int, str, Any) -> None with self._lock: trial = self._get_trial(trial_id) self.check_trial_is_updatable(trial_id, trial.state) self.check_trial_is_updatable(trial_id, trial.state) trial = copy.copy(trial) trial.system_attrs = copy.copy(trial.system_attrs) trial.system_attrs[key] = value self._set_trial(trial_id, trial) def get_trial(self, trial_id): # type: (int) -> FrozenTrial with self._lock: return self._get_trial(trial_id) def _get_trial(self, trial_id: int) -> FrozenTrial: self._check_trial_id(trial_id) study_id, trial_number = self._trial_id_to_study_id_and_number[trial_id] return self._studies[study_id].trials[trial_number] def _set_trial(self, trial_id: int, trial: FrozenTrial) -> None: study_id, trial_number = self._trial_id_to_study_id_and_number[trial_id] self._studies[study_id].trials[trial_number] = trial def get_all_trials(self, study_id, deepcopy=True): # type: (int, bool) -> List[FrozenTrial] with self._lock: self._check_study_id(study_id) if deepcopy: return copy.deepcopy(self._studies[study_id].trials) else: return self._studies[study_id].trials[:] def get_n_trials(self, study_id, state=None): # type: (int, Optional[TrialState]) -> int with self._lock: self._check_study_id(study_id) if state is None: return len(self._studies[study_id].trials) return sum( trial.state == state for trial in self.get_all_trials(study_id, deepcopy=False) ) def _check_study_id(self, study_id): # type: (int) -> None if study_id not in self._studies: raise KeyError("No study with study_id {} exists.".format(study_id)) def _check_trial_id(self, trial_id: int) -> None: if trial_id not in self._trial_id_to_study_id_and_number: raise KeyError("No trial with trial_id {} exists.".format(trial_id)) class _StudyInfo: def __init__(self, name: str) -> None: self.trials = [] # type: List[FrozenTrial] self.param_distribution = {} # type: Dict[str, distributions.BaseDistribution] self.user_attrs = {} # type: Dict[str, Any] self.system_attrs = {} # type: Dict[str, Any] self.name = name # type: str self.direction = StudyDirection.NOT_SET self.best_trial_id = None # type: Optional[int]
the-stack_0_5971
from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User, Permission from majora2 import models from tatl import models as tmodels from django.utils import timezone class Command(BaseCommand): help = "Load a list of organisations" def add_arguments(self, parser): parser.add_argument('filename') def handle(self, *args, **options): su = User.objects.get(is_superuser=True) fh = open(options["filename"]) for line in fh: fields = line.strip().split('\t') code = fields[1] name = fields[0] org, created = models.Institute.objects.get_or_create(code=code, name=name) org.save() if created: treq = tmodels.TatlPermFlex( user = su, substitute_user = None, used_permission = "majora2.management.commands.load_orgs", timestamp = timezone.now(), content_object = org, ) treq.save()
the-stack_0_5975
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from skbio.util import classproperty, overrides from ._nucleotide_sequence import NucleotideSequence from ._iupac_sequence import IUPACSequence class RNA(NucleotideSequence): """Store RNA sequence data and optional associated metadata. Only characters in the IUPAC RNA character set [1]_ are supported. Parameters ---------- sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1') Characters representing the RNA sequence itself. metadata : dict, optional Arbitrary metadata which applies to the entire sequence. positional_metadata : Pandas DataFrame consumable, optional Arbitrary per-character metadata. For example, quality data from sequencing reads. Must be able to be passed directly to the Pandas DataFrame constructor. validate : bool, optional If ``True``, validation will be performed to ensure that all sequence characters are in the IUPAC RNA character set. If ``False``, validation will not be performed. Turning off validation will improve runtime performance. If invalid characters are present, however, there is **no guarantee that operations performed on the resulting object will work or behave as expected.** Only turn off validation if you are certain that the sequence characters are valid. To store sequence data that is not IUPAC-compliant, use ``Sequence``. case_insenstive : bool, optional If ``True``, lowercase sequence characters will be converted to uppercase characters in order to be valid IUPAC RNA characters. Attributes ---------- values metadata positional_metadata alphabet gap_chars nondegenerate_chars degenerate_chars degenerate_map complement_map See Also -------- DNA References ---------- .. [1] Nomenclature for incompletely specified bases in nucleic acid sequences: recommendations 1984. Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030. A Cornish-Bowden Examples -------- >>> from skbio import RNA >>> s = RNA('ACCGAAU') >>> s RNA('ACCGAAU', length=7, has_metadata=False, has_positional_metadata=False) Convert lowercase characters to uppercase: >>> s = RNA('AcCGaaU', lowercase=True) >>> s RNA('ACCGAAU', length=7, has_metadata=False, has_positional_metadata=False) """ @classproperty @overrides(NucleotideSequence) def complement_map(cls): comp_map = { 'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y', 'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H', 'H': 'D', 'V': 'B', 'N': 'N' } comp_map.update({c: c for c in cls.gap_chars}) return comp_map @classproperty @overrides(IUPACSequence) def nondegenerate_chars(cls): return set("ACGU") @classproperty @overrides(IUPACSequence) def degenerate_map(cls): return { "R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"), "W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"), "H": set("ACU"), "V": set("ACG"), "N": set("ACGU") }
the-stack_0_5978
""" The unit test module for AutoTVM dialect. """ # pylint:disable=missing-docstring, redefined-outer-name, invalid-name # pylint:disable=unused-argument, unused-import, wrong-import-position, ungrouped-imports import argparse import glob import os import tempfile from copy import deepcopy import json import mock import numpy as np import pytest from moto import mock_dynamodb2 from lorien.util import is_dialect_enabled if not is_dialect_enabled("autotvm"): pytest.skip("AutoTVM dialect is not available", allow_module_level=True) from tvm import autotvm from tvm.autotvm.measure import MeasureInput, MeasureResult from tvm.autotvm.task.space import ConfigEntity from lorien.database.table import create_table from lorien.dialect.tvm_dial.autotvm_dial.job import ( AutoTVMJob, AutoTVMJobConfigs, create_autotvm_tuner, ) from lorien.dialect.tvm_dial.autotvm_dial.extract_from_model import extract_from_models from lorien.dialect.tvm_dial.autotvm_dial.extract_from_record import extract_from_records from lorien.dialect.tvm_dial.autotvm_dial.result import AutoTVMRecords, AutoTVMTuneResult from lorien.dialect.tvm_dial.autotvm_dial.workload import AutoTVMWorkload from lorien.tune.result import TuneErrorCode from lorien.util import dump_to_yaml, load_from_yaml @pytest.fixture def fixture_autotvm_workload(): # This fixture workload has 429 configs. workload = AutoTVMWorkload() workload.task_name = "dense_nopack.x86" workload.args = [ ["TENSOR", [1, 9216], "float32"], ["TENSOR", [4096, 9216], "float32"], None, "float32", ] workload.lib = "topi" workload.target = "llvm" return workload def gen_x86_conv2d_log_record(target, n_config, data, weight, stride, padding, dilation): records = [] # Generate configs for 3 different data layouts to test the commit mechanism. layouts = [(1, 8), (2, 16), (4, 4)] assert n_config % len(layouts) == 0 # Generate records to mimic tuning logs. inp = [ target, "conv2d_NCHWc.x86", [ ["TENSOR", data, "float32"], ["TENSOR", weight, "float32"], (stride, stride), (padding, padding, padding, padding), (dilation, dilation), "NCHW", "NCHW", "float32", ], {}, ] latencies = np.random.uniform(1e-6, 1e-5, n_config) for idx in range(0, n_config, len(layouts)): for sid, layout in enumerate(layouts): entity = [ ["tile_ic", "sp", [-1, layout[0]]], ["tile_oc", "sp", [-1, layout[1]]], ["tile_ow", "sp", [-1, 1]], ["unroll_kw", "ot", False], ] records.append( { "input": inp, "config": {"index": idx, "code_hash": "", "entity": entity}, "result": [[latencies[idx + sid]], 0, 0, idx + sid], "version": 0.2, "tvm_version": "0.7.dev1", } ) return records def gen_cuda_conv2d_log_record(n_config, data, weight, stride, padding, dilation): records = [] # Generate records to mimic tuning logs. inp = [ "cuda", "conv2d_nchw.cuda", [ ["TENSOR", data, "float32"], ["TENSOR", weight, "float32"], (stride, stride), (padding, padding, padding, padding), (dilation, dilation), "float32", ], {}, ] latencies = np.random.uniform(1e-6, 1e-5, n_config) for idx in range(n_config): entity = [ ["tile_f", "sp", [-1, 1, 1, idx + 1]], ["tile_y", "sp", [-1, 1, 1, 1]], ["tile_x", "sp", [-1, 1, 1, 1]], ["tile_rc", "sp", [-1, 1]], ["tile_ry", "sp", [-1, 1]], ["tile_rx", "sp", [-1, 1]], ["auto_unroll_max_step", "ot", 0], ["unroll_explicit", "ot", 0], ] records.append( { "input": inp, "config": {"index": idx, "code_hash": "", "entity": entity}, "result": [[latencies[idx]], 0, 0, idx], "version": 0.2, "tvm_version": "0.7.dev1", } ) return records def gen_dense_log_record_w_cblas(target, n_config, shape_a, shape_b): records = [] # Generate records to mimic tuning logs. assert n_config > 1, "Must have at least one non-vendor library record" n_config -= 1 inp = [ target, "dense_pack.x86", [["TENSOR", shape_a, "float32"], ["TENSOR", shape_b, "float32"], None, "float32"], {}, ] latencies = np.random.uniform(1e-6, 1e-5, n_config) for idx in range(n_config): entity = [ ["tile_y", "sp", [-1, 1, idx + 1]], ["tile_x", "sp", [-1, 1, 1]], ["tile_k", "sp", [-1, 1]], ] records.append( { "input": inp, "config": {"index": idx, "code_hash": "", "entity": entity}, "result": [[latencies[idx]], 0, 0, idx], "version": 0.2, "tvm_version": "0.7.dev1", } ) # Add one vendor library record. inp = [ target, "dense_cblas.x86", [["TENSOR", shape_a, "float32"], ["TENSOR", shape_b, "float32"], None, "float32"], {}, ] records.append( { "input": inp, "config": {"index": 0, "code_hash": "", "entity": []}, "result": [[5e-7], 0, 0, 0], "version": 0.2, "tvm_version": "0.7.dev1", } ) return records def test_workload(): # pylint:disable=missing-docstring, redefined-outer-name workload = AutoTVMWorkload() workload.target = "cuda -model=v100 -libs=cublas" # Test invalid arguments caused task creation failure workload.args = [[1, 3, 224, 224], [32, 3, 3, 3]] with pytest.raises(RuntimeError): workload.to_task() workload.args = [ ["TENSOR", [1, 3, 224, 224], "float32"], ["TENSOR", [32, 3, 3, 3], "float32"], ] # Test missing task definition with pytest.raises(RuntimeError): workload.to_task() workload.task_name = "conv2d_nchw_winograd.cuda" # Test invalid workload for the TOPI schedule. conv2d winograd on CUDA only accepts stide 1. workload.args += [[2, 2], [1, 1, 1, 1], [1, 1], "float32"] with pytest.raises(RuntimeError): workload.to_task() workload.args[-4] = [1, 1] task = workload.to_task() assert isinstance(workload.to_job(), AutoTVMJob) # Test load from task. -libs should be removed from target since conv2d_nchw_winograd.cuda # does not depend on it. workload_from_task = AutoTVMWorkload.from_task(task) assert ( workload_from_task.target == "cuda -keys=cuda,gpu -max_num_threads=1024 -model=v100 -thread_warp_size=32" ) # Other than that should be identical. workload_from_task.target = workload.target assert workload == workload_from_task task.target = None with pytest.raises(RuntimeError): AutoTVMWorkload.from_task(task) # Test dump and load from YAML workload_str = dump_to_yaml(workload) assert workload == load_from_yaml(workload_str, AutoTVMWorkload) workload2 = deepcopy(workload) # Different argument values. workload2.args[-2] = [0, 0] assert workload > workload2 # Different argument numbers. workload2.args = workload2.args[:-1] assert workload > workload2 # Different target. workload2.target = "cuda -model=zz" assert workload < workload2 # Test loading invalid workload with pytest.raises(RuntimeError): load_from_yaml(workload_str.replace("TENSOR", ""), AutoTVMWorkload) # Test mutation workload = AutoTVMWorkload() workload.task_name = "conv2d_NCHWc.x86" workload.target = "llvm" workload.args = [ ["TENSOR", [1, 3, 224, 224], "float32"], ["TENSOR", [32, 3, 3, 3], "float32"], [1, 1], [1, 1, 1, 1], [1, 1], "NCHW", "NCHW", "float32", ] # A rule to mutate batch size and channel rules = {(0, 1, 0): "[1, 2, 3, 4]", (0, 1, 1): "[v, v * 2, v * 4]"} mutated = workload.mutate(rules) assert len(mutated) == 12 # Wrong index rules = {(0, 1, 0, 0): "[1, 2, 3, 4]"} with pytest.raises(RuntimeError): workload.mutate(rules) # Wrong description rules = {(0, 1, 0): "[a, a * 2]"} with pytest.raises(RuntimeError): workload.mutate(rules) def test_create_autotvm_tuner(fixture_autotvm_workload): task = fixture_autotvm_workload.to_task() create_autotvm_tuner("xgb", task) create_autotvm_tuner("ga", task) create_autotvm_tuner("random", task) create_autotvm_tuner("gridsearch", task) with pytest.raises(RuntimeError): create_autotvm_tuner("wrong-tuner", task) @mock_dynamodb2 def test_job_n_configs_n_commit_n_query(mocker, fixture_autotvm_workload): table_name = "lorien-test" arn = create_table(table_name, region_name="us-west-2") workload = fixture_autotvm_workload job = workload.to_job() assert isinstance(job, AutoTVMJob) assert not job.is_target_compatible("cuda") task = workload.to_task() configs = argparse.Namespace( tuner="random", ntrial=4, test=1, repeat=1, min=400, db="{ region_name: us-west-2 }", commit_table_name=table_name, commit_nbest=1, commit_workload=False, commit_log_to=None, ) job_configs = job.create_job_configs(configs) job_configs.commit_options["table-arn"] = arn assert isinstance(job_configs, AutoTVMJobConfigs) assert job_configs.tune_options assert job_configs.measure_options assert job_configs.check_tvm_build_config() # Localize with RPC runner rpc_config = argparse.Namespace(device="test-device", runner_port=188875) job_configs.localize("llvm", configs=rpc_config) with tempfile.TemporaryDirectory(prefix="lorien-test-autotvm-commit-") as temp_dir: # Localize with local runner job_configs = job.create_job_configs(configs) job_configs.tune_options["tune_dir"] = temp_dir job_configs.commit_options["table-arn"] = arn job_configs.tvm_build_config = {} job_configs.localize("llvm") def mock_tuner_no_valid(_, task): class MockTuner: def tune(self, n_trial, early_stopping, measure_option, callbacks): for _ in range(2): res = mock.MagicMock() res.error_no = 2 callbacks[1](None, [None], [res]) return MockTuner() mocker.patch( "lorien.dialect.tvm_dial.autotvm_dial.job.create_autotvm_tuner" ).side_effect = mock_tuner_no_valid job.tune(job_configs.tune_options, job_configs.measure_options, job_configs.commit_options) assert job.result.error_code == TuneErrorCode.NO_VALID_RESULT def mock_tuner(_, task): class MockTuner: def __init__(self, task): self.task = task def tune(self, n_trial, early_stopping, measure_option, callbacks): # Write to log file to test commit inp = MeasureInput("llvm", self.task, ConfigEntity(0, "", {}, [])) ret = MeasureResult([10], 0, 20, 0) callbacks[0](None, [inp], [ret]) inp = MeasureInput("llvm", self.task, ConfigEntity(1, "", {}, [])) ret = MeasureResult([1e8], 2, 20, 0) callbacks[0](None, [inp], [ret]) # Update metadata res = mock.MagicMock() res.error_no = 0 res.costs = [1, 1, 1] inp = mock.MagicMock() inp.task = mock.MagicMock() inp.task.flop = 1e9 callbacks[1](None, [inp], [res]) return MockTuner(task) mocker.patch( "lorien.dialect.tvm_dial.autotvm_dial.job.create_autotvm_tuner" ).side_effect = mock_tuner # Do not commit job.tune(job_configs.tune_options, job_configs.measure_options, commit_options=None) assert job.result.error_code == TuneErrorCode.NORMAL assert "tune_logs" in job.result.metadata # Success job.tune(job_configs.tune_options, job_configs.measure_options, job_configs.commit_options) assert job.result.error_code == TuneErrorCode.NORMAL assert "tune_logs" not in job.result.metadata # Test failed to localize mock_check_tvm_build_config = mock.MagicMock() mock_check_tvm_build_config.return_value = False job_configs.check_tvm_build_config = mock_check_tvm_build_config with pytest.raises(RuntimeError): job_configs.localize("llvm") log_file = os.path.join(temp_dir, "tune.log") inps = [ MeasureInput("llvm", task, ConfigEntity(1, "", {}, [])), MeasureInput("llvm", task, ConfigEntity(2, "", {}, [])), ] ress = [MeasureResult([1e8], 2, 20, 0), MeasureResult([1e2], 0, 20, 0)] autotvm.callback.log_to_file(log_file)(None, inps, ress) # Add other records to test the filter. with open(log_file, "a") as filep: records = gen_dense_log_record_w_cblas( "llvm -mcpu=core-avx2 -libs=cblas", 5, [100, 1024], [256, 1024] ) for record in records: filep.write("{}\n".format(json.dumps(record))) records = AutoTVMTuneResult.create_records_by_workloads(log_file, 1, workload) assert len(records) == 1 assert records[0].target_key == "llvm -keys=cpu -link-params=0", records[0].target_key assert records[0].alter_key == "llvm_cpu", records[0].alter_key assert ( # pylint: disable=line-too-long records[0].workload_key == "dense_nopack.x86#_TENSOR__1_9216__float32_#_TENSOR__4096_9216__float32_#None#float32" ), records[0].workload_key job.result.commit_tuning_log( workload, log_file, table_name, nbest=1, region_name="us-west-2" ) job.result.commit_tuning_log(None, log_file, table_name, nbest=1, region_name="us-west-2") records = AutoTVMRecords(task.target, workload.get_workload_key()) records.query(table_name, region_name="us-west-2") assert len(records) == 1 records = AutoTVMRecords(task.target, workload.get_workload_key()) records.query(table_name, use_alter_key=True, region_name="us-west-2") assert len(records) == 1 # Do not provide workload key to query all records with the same target records = AutoTVMRecords("llvm", workload_key=None) records.query(table_name, region_name="us-west-2") assert len(records) == 1 def test_extract_from_model(): configs = argparse.Namespace( gcv=["alexnet", "alexnet: { data: [1, 3, 224, 224]}"], target=["llvm -libs=cblas"], tf=[], tflite=[], onnx=[], keras=[], torch=[], mxnet=[], ) workloads = extract_from_models(configs) assert len(workloads) == 14, "\nWorkloads:\n%s" % "\n".join([str(wkl) for wkl in workloads]) # Test failure. configs = argparse.Namespace( gcv=["alexnet_wrong_name"], target=["llvm"], tf=[], tflite=[], onnx=[], keras=[], torch=[], mxnet=[], ) workloads = extract_from_models(configs) assert len(workloads) == 0 @mock_dynamodb2 def test_extract_from_record(mocker): # Mock a table. records = gen_x86_conv2d_log_record( "llvm -mcpu=core-avx2 -libs=cblas", 6, [1, 1024, 32, 32], [16, 1024, 3, 3], 1, 1, 1 ) records += gen_dense_log_record_w_cblas( "llvm -mcpu=core-avx2 -libs=cblas", 5, [100, 1024], [256, 1024] ) table_name = "lorien-test" with tempfile.TemporaryDirectory(prefix="lorien-test-autotvm-layout-") as temp_dir: create_table(table_name, region_name="us-west-2") log_file = "{}/fake.log".format(temp_dir) with open(log_file, "w") as filep: for record in records: filep.write("{}\n".format(json.dumps(record))) AutoTVMTuneResult().commit_tuning_log(None, log_file, table_name, region_name="us-west-2") # Test layout transform workload generation. configs = argparse.Namespace( table_name=table_name, db='{ "region_name": "us-west-2" }', target=["llvm"], ignore_target_attrs=False, ) # The target "llvm" does not match "llvm -mcpu=core-avx2" so it should get nothing # unless we enable ignore-target-attrs. assert len(extract_from_records(configs)) == 0 # "gen_x86_conv2d_log_record" generates 3 layouts, but one of them has the same # input and output layout so it should be ignored when generting layout transform workloads. # In addition, all records from "gen_dense_log_record_w_cblas" should be ignored because layout # transform does not support dense. configs.ignore_target_attrs = True assert len(extract_from_records(configs)) == 2 # Intend to fail all task creations. mocker.patch( "lorien.dialect.tvm_dial.autotvm_dial.extract_from_record.autotvm.task.create" ).side_effect = Exception() assert not extract_from_records(configs) def test_gen_feature(): with tempfile.TemporaryDirectory(prefix="lorien-test-autotvm-feature-") as temp_dir: log_dir = os.path.join(temp_dir, "logs") os.mkdir(log_dir) # Generate the first log file, which includes conv2d_NCHWc.x86 log_file = os.path.join(log_dir, "fake1.log") with open(log_file, "w") as filep: records = gen_x86_conv2d_log_record( "llvm -mcpu=core-avx2", 6, [1, 1024, 32, 32], [16, 1024, 3, 3], 1, 1, 1 ) failed_record = deepcopy(records[0]) failed_record["result"][1] = 1 # let error code be non-zero. records.append(failed_record) for record in records: filep.write("{}\n".format(json.dumps(record))) # Generate the second log file, which includes dense_cblas.x86 and dense_pack.x86 log_file = os.path.join(log_dir, "fake2.log") with open(log_file, "w") as filep: records = gen_dense_log_record_w_cblas( "llvm -mcpu=core-avx2", 5, [100, 1024], [256, 1024] ) for record in records: filep.write("{}\n".format(json.dumps(record))) feature_dir = os.path.join(temp_dir, "features") AutoTVMTuneResult.gen_features(log_dir, feature_dir) # The lock files should be removed. assert not glob.glob("{}/**/*.lock".format(feature_dir), recursive=True) def check_helper(name, n_data, n_numeric_features, n_category_features): """Check dumped feature files.""" csv_file = os.path.join(feature_dir, "{}.csv".format(name)) meta_file = os.path.join(feature_dir, "{}.meta".format(name)) assert os.path.exists(csv_file), "Missing %s" % csv_file assert os.path.exists(meta_file), "Missing %s" % meta_file with open(csv_file, "r") as filep: features = filep.readline().replace("\n", "").split(",") assert len(features) == n_numeric_features + n_category_features + 1 assert len(filep.read().split("\n")) == n_data + 1 with open(meta_file, "r") as filep: n_numeric = 0 n_category = 0 for line in filep: tokens = line.split(",") if tokens[1] == "numeric": n_numeric += 1 elif tokens[1] == "category": n_category += 1 assert n_numeric == n_numeric_features assert n_category == n_category_features check_helper("conv2d_NCHWc.x86", 7, 22, 6) check_helper("dense_cblas.x86", 1, 4, 4) check_helper("dense_pack.x86", 4, 12, 4) def test_extract_feature(fixture_autotvm_workload): task = fixture_autotvm_workload.to_task() config_dict = { "index": 7, "code_hash": "some_hash", "entity": [ ("tile", "sp", [16, 4]), ("reorder", "re", [0, 2, 1]), ("annotate", "an", "unroll"), ("other", "ot", "auto"), ], } config = ConfigEntity.from_json_dict(config_dict) inp = MeasureInput("llvm", task, config) features = AutoTVMTuneResult.extract_feature(inp) expected_features = { "in_0": 1, "in_1": 9216, "in_2": "float32", "in_3": 4096, "in_4": 9216, "in_5": "float32", "attr_0": None, "attr_1": "float32", "sp_tile_0": 16, "sp_tile_1": 4, "re_reorder": "0;2;1", "an_annotate": "unroll", "ot_other": "auto", } assert features == expected_features
the-stack_0_5980
""" This file offers the methods to automatically retrieve the graph Hydrogenophaga flava NBRC 102514. The graph is automatically retrieved from the STRING repository. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen import Graph # pylint: disable=import-error def HydrogenophagaFlavaNbrc102514( directed: bool = False, preprocess: bool = True, load_nodes: bool = True, verbose: int = 2, cache: bool = True, cache_path: str = "graphs/string", version: str = "links.v11.5", **additional_graph_kwargs: Dict ) -> Graph: """Return new instance of the Hydrogenophaga flava NBRC 102514 graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False Wether to load the graph as directed or undirected. By default false. preprocess: bool = True Whether to preprocess the graph to be loaded in optimal time and memory. load_nodes: bool = True, Whether to load the nodes vocabulary or treat the nodes simply as a numeric range. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache: bool = True Whether to use cache, i.e. download files only once and preprocess them only once. cache_path: str = "graphs" Where to store the downloaded graphs. version: str = "links.v11.5" The version of the graph to retrieve. The available versions are: - homology.v11.5 - physical.links.v11.5 - links.v11.5 additional_graph_kwargs: Dict Additional graph kwargs. Returns ----------------------- Instace of Hydrogenophaga flava NBRC 102514 graph. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ return AutomaticallyRetrievedGraph( graph_name="HydrogenophagaFlavaNbrc102514", repository="string", version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
the-stack_0_5981
# Definisikan function validate def validate(hand): # Tambahkan control flow berdasarkan nilai hand if hand < 0 or hand > 2: return False else: return True def print_hand(hand, name='Tamu'): hands = ['Batu', 'Kertas', 'Gunting'] print(name + ' memilih: ' + hands[hand]) print('Memulai permainan Batu Kertas Gunting!') player_name = input('Masukkan nama Anda: ') print('Pilih tangan: (0: Batu, 1: Kertas, 2: Gunting)') player_hand = int(input('masukkan nomor (0-2): ')) # Tambahkan control flow berdasarkan nilai return dari function validate if validate(player_hand): print_hand(player_hand, player_name) else: print('Mohon masukkan nomor yang benar')
the-stack_0_5982
import argparse import torch import benchmark_core import benchmark_utils """Performance microbenchmarks's main binary. This is the main function for running performance microbenchmark tests. It also registers existing benchmark tests via Python module imports. """ def main(): parser = argparse.ArgumentParser( description="Run microbenchmarks.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( '--tag_filter', help='tag_filter can be used to run the shapes which matches the tag. (all is used to run all the shapes)', default='short') # This option is used to filter test cases to run. parser.add_argument( '--operators', help='Filter tests based on comma-delimited list of operators to test', default=None) parser.add_argument( '--operator_range', help='Filter tests based on operator_range(e.g. a-c or b,c-d)', default=None) parser.add_argument( '--test_name', help='Run tests that have the provided test_name', default=None) parser.add_argument( '--list_ops', help='List operators without running them', action='store_true') parser.add_argument( '--list_tests', help='List all test cases without running them', action='store_true') parser.add_argument( "--iterations", help="Repeat each operator for the number of iterations", type=int ) parser.add_argument( "--num_runs", help="Run each test for num_runs. Each run executes an operator for number of <--iterations>", type=int, default=1, ) parser.add_argument( "--min_time_per_test", help="Set the minimum time (unit: seconds) to run each test", type=int, default=0, ) parser.add_argument( "--warmup_iterations", help="Number of iterations to ignore before measuring performance", default=100, type=int ) parser.add_argument( "--omp_num_threads", help="Number of OpenMP threads used in PyTorch/Caffe2 runtime", default=None, type=int ) parser.add_argument( "--mkl_num_threads", help="Number of MKL threads used in PyTorch/Caffe2 runtime", default=None, type=int ) parser.add_argument( "--ai_pep_format", type=benchmark_utils.str2bool, nargs='?', const=True, default=False, help="Print result when running on AI-PEP" ) parser.add_argument( "--use_jit", type=benchmark_utils.str2bool, nargs='?', const=True, default=False, help="Run operators with PyTorch JIT mode" ) parser.add_argument( "--forward_only", type=benchmark_utils.str2bool, nargs='?', const=True, default=False, help="Only run the forward path of operators" ) parser.add_argument( '--framework', help='Comma-delimited list of frameworks to test (Caffe2, PyTorch)', default="Caffe2,PyTorch") parser.add_argument( '--device', help='Run tests on the provided architecture (cpu, cuda)', default='None') args, _ = parser.parse_known_args() if args.omp_num_threads: # benchmark_utils.set_omp_threads sets the env variable OMP_NUM_THREADS # which doesn't have any impact as C2 init logic has already been called # before setting the env var. # In general, OMP_NUM_THREADS (and other OMP env variables) needs to be set # before the program is started. # From Chapter 4 in OMP standard: https://www.openmp.org/wp-content/uploads/openmp-4.5.pdf # "Modifications to the environment variables after the program has started, # even if modified by the program itself, are ignored by the OpenMP implementation" benchmark_utils.set_omp_threads(args.omp_num_threads) if benchmark_utils.is_pytorch_enabled(args.framework): torch.set_num_threads(args.omp_num_threads) if args.mkl_num_threads: benchmark_utils.set_mkl_threads(args.mkl_num_threads) benchmark_core.BenchmarkRunner(args).run() if __name__ == "__main__": main()
the-stack_0_5983
#INSERTION SORT def insertion_sort(array): # We start from 1 since the first element is trivially sorted for index in range(1, len(array)): currentValue = array[index] currentPosition = index while currentPosition > 0 and array[currentPosition - 1] > currentValue: array[currentPosition] = array[currentPosition -1] currentPosition = currentPosition - 1 array[currentPosition] = currentValue print("array now : ") print(array) if __name__ == '__main__': array = [] n = int(input("Enter number of elements : ")) for i in range(0, n): ele = int(input()) array.append(ele) # adding the element print("sorted array: " + str(insertion_sort(array)))
the-stack_0_5984
#!/usr/bin/env python3 # Copyright (c) 2014-2017 The Syndicate Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mempool persistence. By default, syndicated will dump mempool on shutdown and then reload it on startup. This can be overridden with the -persistmempool=0 command line option. Test is as follows: - start node0, node1 and node2. node1 has -persistmempool=0 - create 5 transactions on node2 to its own address. Note that these are not sent to node0 or node1 addresses because we don't want them to be saved in the wallet. - check that node0 and node1 have 5 transactions in their mempools - shutdown all nodes. - startup node0. Verify that it still has 5 transactions in its mempool. Shutdown node0. This tests that by default the mempool is persistent. - startup node1. Verify that its mempool is empty. Shutdown node1. This tests that with -persistmempool=0, the mempool is not dumped to disk when the node is shut down. - Restart node0 with -persistmempool=0. Verify that its mempool is empty. Shutdown node0. This tests that with -persistmempool=0, the mempool is not loaded from disk on start up. - Restart node0 with -persistmempool. Verify that it has 5 transactions in its mempool. This tests that -persistmempool=0 does not overwrite a previously valid mempool stored on disk. - Remove node0 mempool.dat and verify savemempool RPC recreates it and verify that node1 can load it and has 5 transaction in its mempool. - Verify that savemempool throws when the RPC is called if node1 can't write to disk. """ import os import time from test_framework.test_framework import SyndicateTestFramework from test_framework.util import * class MempoolPersistTest(SyndicateTestFramework): def set_test_params(self): self.num_nodes = 3 self.extra_args = [[], ["-persistmempool=0"], []] def run_test(self): chain_height = self.nodes[0].getblockcount() assert_equal(chain_height, 200) self.log.debug("Mine a single block to get out of IBD") self.nodes[0].generate(1) self.sync_all() self.log.debug("Send 5 transactions from node2 (to its own address)") for i in range(5): self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10")) node2_balance = self.nodes[2].getbalance() self.sync_all() self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools") assert_equal(len(self.nodes[0].getrawmempool()), 5) assert_equal(len(self.nodes[1].getrawmempool()), 5) self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.") self.stop_nodes() self.start_node(1) # Give this one a head-start, so we can be "extra-sure" that it didn't load anything later self.start_node(0) self.start_node(2) # Give syndicated a second to reload the mempool wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5, timeout=1) wait_until(lambda: len(self.nodes[2].getrawmempool()) == 5, timeout=1) # The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now: assert_equal(len(self.nodes[1].getrawmempool()), 0) # Verify accounting of mempool transactions after restart is correct self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet assert_equal(node2_balance, self.nodes[2].getbalance()) self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.") self.stop_nodes() self.start_node(0, extra_args=["-persistmempool=0"]) # Give syndicated a second to reload the mempool time.sleep(1) assert_equal(len(self.nodes[0].getrawmempool()), 0) self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.") self.stop_nodes() self.start_node(0) wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5) mempooldat0 = os.path.join(self.options.tmpdir, 'node0', 'regtest', 'mempool.dat') mempooldat1 = os.path.join(self.options.tmpdir, 'node1', 'regtest', 'mempool.dat') self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it") os.remove(mempooldat0) self.nodes[0].savemempool() assert os.path.isfile(mempooldat0) self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions") os.rename(mempooldat0, mempooldat1) self.stop_nodes() self.start_node(1, extra_args=[]) wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5) self.log.debug("Prevent syndicated from writing mempool.dat to disk. Verify that `savemempool` fails") # to test the exception we are setting bad permissions on a tmp file called mempool.dat.new # which is an implementation detail that could change and break this test mempooldotnew1 = mempooldat1 + '.new' with os.fdopen(os.open(mempooldotnew1, os.O_CREAT, 0o000), 'w'): pass assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool) os.remove(mempooldotnew1) if __name__ == '__main__': MempoolPersistTest().main()
the-stack_0_5987
# !/usr/bin/python3 # -*- coding: utf-8 -*- import logging logger = logging.getLogger(__name__) class ChannelType(object): """ Define if channel type is input or output. These values must be set according to Bpod firmware specification. """ #: Input channel INPUT = 1 #: Output channel OUTPUT = 2 class ChannelName(object): """ Available channel names. These values must be set according to Bpod firmware specification. """ #: Output channel with PWM support (e.g. Led) PWM = "PWM" #: Output channel for connecting a valve VALVE = "Valve" #: BNC channel BNC = "BNC" #: Wire channel WIRE = "Wire" #: Serial channel SERIAL = "Serial" #: Flex channel FLEX = "Flex" class EventsPositions(object): """ """ def __init__(self): self.Event_USB = 0 # type: int self.Event_Port = 0 # type: int self.Event_BNC = 0 # type: int self.Event_Wire = 0 # type: int self.Event_Flex = 0 # type: int self.globalTimerStart = 0 # type: int self.globalTimerEnd = 0 # type: int self.globalTimerTrigger = 0 # type: int self.globalTimerCancel = 0 # type: int self.globalCounter = 0 # type: int self.globalCounterReset = 0 # type: int self.condition = 0 # type: int self.jump = 0 # type: int self.Tup = 0 # type: int self.output_USB = 0 # type: int self.output_VALVE = 0 # type: int self.output_BNC = 0 # type: int self.output_Wire = 0 # type: int self.output_PWM = 0 # type: int self.output_Flex = 0 # type: int self.analogThreshEnable = 0 # type: int self.analogThreshDisable = 0 # type: int def __str__(self): return ( "Events Positions\n" "Event_USB: {Event_USB}\n" "Event_Port: {Event_Port}\n" "Event_BNC: {Event_BNC}\n" "Event_Wire {Event_Wire}\n" "Event_Flex: {Event_Flex}\n" "globalTimerStart: {globalTimerStart}\n" "globalTimerEnd: {globalTimerEnd}\n" "globalTimerTrigger: {globalTimerTrigger}\n" "globalTimerCancel: {globalTimerCancel}\n" "globalCounter: {globalCounter}\n" "globalCounterReset: {globalCounterReset}\n" "condition: {condition}\n" "jump: {jump}\n" "Tup: {Tup}\n" "output_USB: {output_USB}\n" "output_VALVE: {output_VALVE}\n" "output_BNC: {output_BNC}\n" "output_Wire: {output_Wire}\n" "output_PWM: {output_PWM}\n" "output_Flex: {output_Flex}\n" "analogThreshEnable: {analogThreshEnable}\n" "analogThreshDisable: {analogThreshDisable}\n" "".format( Event_USB=self.Event_USB, Event_Port=self.Event_Port, Event_BNC=self.Event_BNC, Event_Wire=self.Event_Wire, Event_Flex=self.Event_Flex, globalTimerStart=self.globalTimerStart, globalTimerEnd=self.globalTimerEnd, globalTimerTrigger=self.globalTimerTrigger, globalTimerCancel=self.globalTimerCancel, globalCounter=self.globalCounter, globalCounterReset=self.globalCounterReset, condition=self.condition, jump=self.jump, Tup=self.Tup, output_USB=self.output_USB, output_VALVE=self.output_VALVE, output_BNC=self.output_BNC, output_Wire=self.output_Wire, output_PWM=self.output_PWM, output_Flex=self.output_Flex, analogThreshEnable=self.analogThreshEnable, analogThreshDisable=self.analogThreshDisable ) ) class Channels(object): """ Bpod main class """ def __init__(self): self.event_names = [] self.input_channel_names = [] self.output_channel_names = [] self.events_positions = EventsPositions() def setup_input_channels(self, hardware, modules): """ Generate event and input channel names """ Pos = 0 nUSB = 0 nUART = 0 nBNCs = 0 nWires = 0 nPorts = 0 nFlex = 0 for i in range(len(hardware.inputs)): if hardware.inputs[i] == "U": nUART += 1 module = modules[nUART - 1] module_name = "" if module.connected: module_name = module.name self.input_channel_names += [module_name] else: module_name = "Serial" + str(nUART) self.input_channel_names += [module_name] n_module_event_names = len(module.event_names) for j in range(module.n_serial_events): if j < n_module_event_names: self.event_names += [module_name + "_" + module.event_names[j]] else: self.event_names += [module_name + "_" + str(j + 1)] Pos += 1 elif hardware.inputs[i] == "X": if nUSB == 0: self.events_positions.Event_USB = Pos nUSB += 1 self.input_channel_names += ["USB" + str(nUSB)] loops_n = int(hardware.max_serial_events / (len(modules) + 1)) for j in range(loops_n): self.event_names += ["SoftCode" + str(j + 1)] Pos += 1 elif hardware.inputs[i] == "P": if nPorts == 0: self.events_positions.Event_Port = Pos nPorts += 1 self.input_channel_names += ["Port" + str(nPorts)] self.event_names += [self.input_channel_names[-1] + "In"] Pos += 1 self.event_names += [self.input_channel_names[-1] + "Out"] Pos += 1 elif hardware.inputs[i] == "B": if nBNCs == 0: self.events_positions.Event_BNC = Pos nBNCs += 1 self.input_channel_names += ["BNC" + str(nBNCs)] self.event_names += [self.input_channel_names[-1] + "High"] Pos += 1 self.event_names += [self.input_channel_names[-1] + "Low"] Pos += 1 elif hardware.inputs[i] == "W": if nWires == 0: self.events_positions.Event_Wire = Pos nWires += 1 self.input_channel_names += ["Wire" + str(nWires)] self.event_names += [self.input_channel_names[-1] + "High"] Pos += 1 self.event_names += [self.input_channel_names[-1] + "Low"] Pos += 1 elif hardware.inputs[i] == "F": if nFlex == 0: self.events_positions.Event_Flex = Pos # Check if channel is configured for digital input if hardware.flex_channel_types[nFlex] == 0: nFlex += 1 self.input_channel_names += ["Flex" + str(nFlex)] self.event_names += [self.input_channel_names[-1] + "High"] Pos += 1 self.event_names += [self.input_channel_names[-1] + "Low"] Pos += 1 # Check if channel is configured for analog input elif hardware.flex_channel_types[nFlex] == 2: nFlex += 1 self.input_channel_names += ["Flex" + str(nFlex)] self.event_names += [self.input_channel_names[-1] + "Trig1"] Pos += 1 self.event_names += [self.input_channel_names[-1] + "Trig2"] Pos += 1 # This means the flex channel must be configured as output else: self.input_channel_names += ["---"] # Placeholder to maintain appropriate index self.event_names += ["---"] # Placeholder for "high"/"trig1" Pos += 1 self.event_names += ["---"] # Placeholder for "low"/"trig2" Pos += 1 nFlex += 1 # increment to maintain flex_channel_types index self.events_positions.globalTimerStart = Pos for i in range(hardware.n_global_timers): self.event_names += ["GlobalTimer" + str(i + 1) + "_Start"] Pos += 1 self.events_positions.globalTimerEnd = Pos for i in range(hardware.n_global_timers): self.event_names += ["GlobalTimer" + str(i + 1) + "_End"] self.input_channel_names += ["GlobalTimer" + str(i + 1)] Pos += 1 self.events_positions.globalCounter = Pos for i in range(hardware.n_global_counters): self.event_names += ["GlobalCounter" + str(i + 1) + "_End"] Pos += 1 self.events_positions.condition = Pos for i in range(hardware.n_conditions): self.event_names += ["Condition" + str(i + 1)] Pos += 1 self.event_names += ["Tup"] self.events_positions.Tup = Pos Pos += 1 logger.debug("event_names: %s", self.event_names) logger.debug("events_positions: %s", self.events_positions) def setup_output_channels(self, hardware, modules): """ Generate output channel names """ nUSB = 0 nUART = 0 nVALVE = 0 nBNCs = 0 nWires = 0 nPorts = 0 nFlex = 0 for i in range(len(hardware.outputs)): if hardware.outputs[i] == "U": nUART += 1 module = modules[nUART - 1] module_name = "" if module.connected: module_name = module.name self.output_channel_names += [module_name] else: module_name = "Serial" + str(nUART) self.output_channel_names += [module_name] elif hardware.outputs[i] == "X": if nUSB == 0: self.events_positions.output_USB = len(self.output_channel_names) nUSB += 1 self.output_channel_names += ["SoftCode"] elif hardware.outputs[i] == "V": if nVALVE == 0: self.events_positions.output_VALVE = len(self.output_channel_names) nVALVE += 1 self.output_channel_names += ["Valve" + str(nVALVE)] # Assume an SPI shift register mapping bits of a byte to 8 valves elif hardware.outputs[i] == "B": if nBNCs == 0: self.events_positions.output_BNC = len(self.output_channel_names) nBNCs += 1 self.output_channel_names += ["BNC" + str(nBNCs)] elif hardware.outputs[i] == "W": if nWires == 0: self.events_positions.output_Wire = len(self.output_channel_names) nWires += 1 self.output_channel_names += ["Wire" + str(nWires)] elif hardware.outputs[i] == "P": if nPorts == 0: self.events_positions.output_PWM = len(self.output_channel_names) nPorts += 1 self.output_channel_names += ["PWM" + str(nPorts)] elif hardware.outputs[i] == "F": if nFlex == 0: self.events_positions.output_Flex = len(self.output_channel_names) # Check if channel is configured for digital output if hardware.flex_channel_types[nFlex] == 1: nFlex += 1 self.output_channel_names += ["Flex" + str(nFlex) + "DO"] # Check if channel is configured for analog output elif hardware.flex_channel_types[nFlex] == 3: nFlex += 1 self.output_channel_names += ["Flex" + str(nFlex) + "AO"] # This means the flex channel must be configured as input else: self.output_channel_names += ["---"] # placeholder to maintain appropriate index. nFlex += 1 # increment to maintain the flex_channel_types index self.output_channel_names += ["GlobalTimerTrig"] self.events_positions.globalTimerTrigger = len(self.output_channel_names) - 1 self.output_channel_names += ["GlobalTimerCancel"] self.events_positions.globalTimerCancel = len(self.output_channel_names) - 1 self.output_channel_names += ["GlobalCounterReset"] self.events_positions.globalCounterReset = len(self.output_channel_names) - 1 if hardware.machine_type > 3: self.output_channel_names += ["AnalogThreshEnable"] self.events_positions.analogThreshEnable = len(self.output_channel_names) - 1 self.output_channel_names += ["AnalogThreshDisable"] self.events_positions.analogThreshDisable = len(self.output_channel_names) - 1 logger.debug("output_channel_names: %s", self.output_channel_names) logger.debug("events_positions: %s", self.events_positions) def get_event_name(self, event_idx): """ :param event_idx: :return: """ try: event_name = self.event_names[event_idx] except IndexError: event_name = "unknown event name" return event_name def __str__(self): buff = "\n****************** EVENTS ******************\n" for idx, event in enumerate(self.event_names): buff += "{0: >3} : {1: <24}".format(idx, event) if ((idx + 1) % 3) == 0 and idx != 0: buff += "\n" buff += "\n\n****************** INPUT CHANNELS ******************\n" for idx, channel in enumerate(self.input_channel_names): buff += "{0: >3} : {1: <24}".format(idx, channel) if ((idx + 1) % 3) == 0 and idx != 0: buff += "\n" buff += "\n\n****************** OUTPUT CHANNELS ******************\n" for idx, channel in enumerate(self.output_channel_names): buff += "{0: >3} : {1: <24}".format(idx, channel) if ((idx + 1) % 3) == 0 and idx != 0: buff += "\n" return "SMA Channels\n" + buff + "\n\n"
the-stack_0_5988
from pydex.core.designer import Designer import numpy as np import sobol_seq """ Setting : a non-dynamic experimental system with 2 time-invariant control variables and 1 response. Problem : design optimal experiment for a order 2 polynomial. Solution : 3^2 factorial design, varying efforts depending on chosen criterion: ~ D-optimal: well distributed. ~ A-optimal: slight central-focus. ~ E-optimal: strong central-focus. """ def simulate(ti_controls, model_parameters): return np.array([ # constant term model_parameters[0] + # linear term model_parameters[1] * ti_controls[0] + model_parameters[2] * ti_controls[1] + # linear-linear terms model_parameters[3] * ti_controls[0] * ti_controls[1] + # squared terms model_parameters[4] * ti_controls[0] ** 2 + model_parameters[5] * ti_controls[1] ** 2 ]) designer = Designer() designer.simulate = simulate designer.model_parameters = np.ones(6) # values won't affect design, but still needed designer.ti_controls_candidates = designer.enumerate_candidates( bounds=[ [-1, 1], [-1, 1], ], levels=[ 11, 11, ], ) designer.start_logging() designer.initialize(verbose=2) # 0: silent, 1: overview, 2: detailed, 3: very detailed designer.ti_controls_names = [r"$x_1$", r"$x_2$"] """ cvxpy optimizers """ package, optimizer = ("cvxpy", "MOSEK") # package, optimizer = ("cvxpy", "SCS") # package, optimizer = ("cvxpy", "CVXOPT") # only for A-optimal """ scipy optimizers, all supported, but many require unconstrained form """ # package, optimizer = ("scipy", "powell") # package, optimizer = ("scipy", "cg") # package, optimizer = ("scipy", "tnc") # package, optimizer = ("scipy", "l-bfgs-b") # package, optimizer = ("scipy", "bfgs") # package, optimizer = ("scipy", "nelder-mead") # package, optimizer = ("scipy", "SLSQP") # supports constrained form designer.eval_sensitivities(method="central", num_steps=3) """ designing experiment """ criterion = designer.d_opt_criterion designer.design_experiment( criterion=criterion, package=package, optimizer=optimizer, write=False, ) designer.print_optimal_candidates() designer.apportion(9) designer.plot_optimal_efforts() designer.plot_optimal_controls(non_opt_candidates=True, title=True, write=False) criterion = designer.a_opt_criterion designer.design_experiment( criterion=criterion, package=package, optimizer=optimizer, write=False, ) designer.print_optimal_candidates() designer.apportion(9) designer.plot_optimal_efforts() designer.plot_optimal_controls(non_opt_candidates=True, title=True, write=False) criterion = designer.e_opt_criterion designer.design_experiment( criterion=criterion, package=package, optimizer=optimizer, write=False, ) designer.print_optimal_candidates() designer.apportion(11) designer.plot_optimal_efforts() designer.plot_optimal_controls(non_opt_candidates=True, title=True, write=False) designer.stop_logging() designer.show_plots()
the-stack_0_5989
import logging from .models import TwitterBotResponseLog, TwitterBotVisitLog logger = logging.getLogger(__name__) class LogTwitterbotLinkVisitMiddleware(object): def __init__(self, get_response): self.get_response = get_response def __call__(self, request): param = 'twitterbot_log_id' if param in request.GET: response_log = TwitterBotResponseLog.objects.get(id=request.GET[param]) logger.info( f'{self.__class__.__name__} - Someone visit {request.path} from status {response_log.tweet_url}' ) TwitterBotVisitLog.objects.create(request_path=request.path, response_log=response_log) response = self.get_response(request) return response
the-stack_0_5990
import pickle import random import numpy as np from soepy.simulate.simulate_python import simulate from soepy.soepy_config import TEST_RESOURCES_DIR from development.tests.auxiliary.auxiliary import cleanup def test1(): """This test runs a random selection of test regression tests from our regression test battery. """ vault = TEST_RESOURCES_DIR / "regression_vault.soepy.pkl" with open(vault, "rb") as file: tests = pickle.load(file) for i in random.sample(range(0, 100), 10): ( model_spec_init_dict, random_model_params_df, exog_educ_shares, exog_child_age_shares, exog_partner_shares, exog_exper_shares_pt, exog_exper_shares_ft, exog_child_info, exog_partner_arrival_info, exog_partner_separation_info, expected_df, ) = tests[i] exog_educ_shares.to_pickle("test.soepy.educ.shares.pkl") exog_child_age_shares.to_pickle("test.soepy.child.age.shares.pkl") exog_child_info.to_pickle("test.soepy.child.pkl") exog_partner_shares.to_pickle("test.soepy.partner.shares.pkl") exog_exper_shares_pt.to_pickle("test.soepy.pt.exp.shares.pkl") exog_exper_shares_ft.to_pickle("test.soepy.ft.exp.shares.pkl") exog_partner_arrival_info.to_pickle("test.soepy.partner.arrival.pkl") exog_partner_separation_info.to_pickle("test.soepy.partner.separation.pkl") calculated_df = simulate(random_model_params_df, model_spec_init_dict) for col in expected_df.columns.tolist(): np.testing.assert_array_almost_equal( expected_df[col], calculated_df[col], ) cleanup()
the-stack_0_5991
import ctypes import time, math, random from random import randint import win32gui, win32con, win32api dx=10 def OnPaint(hwnd, msg, wp, lp): global dx font=win32gui.LOGFONT() font.lfFaceName="Consolas" font.lfHeight=48 # font.lfWidth=font.lfHeight # font.lfWeight=150 # font.lfItalic=1 # font.lfUnderline=1 hfont=win32gui.CreateFontIndirect(font) dc, ps=win32gui.BeginPaint(hwnd) win32gui.SetGraphicsMode(dc, win32con.GM_ADVANCED) l,t,r,b=win32gui.GetClientRect(hwnd) br=win32gui.CreateSolidBrush(win32api.RGB(0,0,255)) bitmap=win32gui.CreateBitmap(20,5,4,1,None) win32gui.SelectObject(dc, bitmap) win32gui.SelectObject(dc, br) win32gui.SelectObject(dc, hfont) win32gui.SetTextColor(dc,win32api.RGB(randint(1,255),randint(1,255),randint(1,255))); win32gui.DrawText(dc,'hello',-1,(100,100,300,300),0) win32gui.FillRect(dc,(200+dx,200+dx,100+dx,100+dx),br) dx=(dx+10)%100 win32gui.EndPaint(hwnd, ps) return 0 wc = win32gui.WNDCLASS() wc.lpszClassName = 'win32' wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW wc.hbrBackground = win32con.COLOR_WINDOW+1 wndproc={win32con.WM_PAINT:OnPaint} wc.lpfnWndProc=wndproc wc.hCursor = win32gui.LoadCursor (None, win32con.IDC_ARROW) class_atom=win32gui.RegisterClass(wc) hwnd = win32gui.CreateWindow(class_atom,'hello', win32con.WS_OVERLAPPEDWINDOW|win32con.WS_VISIBLE, 350,120,640,480, 0, 0, 0, None) for _ in range(30): win32gui.InvalidateRect(hwnd,None,True) win32gui.PumpWaitingMessages() time.sleep(0.1) win32gui.DestroyWindow(hwnd) win32gui.UnregisterClass(class_atom,None)
the-stack_0_5995
"""Base segment definitions. Here we define: - BaseSegment. This is the root class for all segments, and is designed to hold other subsegments. - UnparsableSegment. A special wrapper to indicate that the parse function failed on this block of segments and to prevent further analysis. """ from io import StringIO from cached_property import cached_property from typing import Any, Callable, Optional, List, Tuple, NamedTuple, Iterator import logging from sqlfluff.core.string_helpers import ( frame_msg, curtail_string, ) from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.match_logging import parse_match_logging from sqlfluff.core.parser.match_wrapper import match_wrapper from sqlfluff.core.parser.helpers import ( check_still_complete, trim_non_code_segments, ) from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.context import ParseContext # Instantiate the linter logger (only for use in methods involved with fixing.) linter_logger = logging.getLogger("sqlfluff.linter") class FixPatch(NamedTuple): """An edit patch for a templated file.""" templated_slice: slice fixed_raw: str # The patch category, functions mostly for debugging and explanation # than for function. It allows traceability of *why* this patch was # generated. It has no siginificance for processing. patch_category: str class BaseSegment: """The base segment element. This defines the base element which drives both Lexing, Parsing and Linting. A large chunk of the logic which defines those three operations are centered here. Much of what is defined in the BaseSegment is also used by its many subclasses rather than directly here. For clarity, the `BaseSegment` is mostly centered around a segment which contains other subsegments. For segments which don't have *children*, refer to the `RawSegment` class (which still inherits from this one). Segments are used both as instances to hold chunks of text, but also as classes themselves where they function a lot like grammars, and return instances of themselves when they match. The many classmethods in this class are usually to serve their purpose as a matcher. """ # `type` should be the *category* of this kind of segment type = "base" parse_grammar: Optional[Matchable] = None # We define the type here but no value. Subclasses must provide a value. match_grammar: Matchable comment_seperate = False optional = False # NB: See the sequence grammar for details _name: Optional[str] = None is_meta = False # Are we able to have non-code at the start or end? can_start_end_non_code = False # Can we allow it to be empty? Usually used in combination # with the can_start_end_non_code. allow_empty = False # What other kwargs need to be copied when applying fixes. additional_kwargs: List[str] = [] def __init__(self, segments, pos_marker=None, name: Optional[str] = None): # A cache variable for expandable self._is_expandable = None # Surrogate name option. self._surrogate_name = name if len(segments) == 0: raise RuntimeError( "Setting {} with a zero length segment set. This shouldn't happen.".format( self.__class__ ) ) if hasattr(segments, "matched_segments"): # Safely extract segments from a match self.segments = segments.matched_segments elif isinstance(segments, tuple): self.segments = segments elif isinstance(segments, list): self.segments = tuple(segments) else: raise TypeError(f"Unexpected type passed to BaseSegment: {type(segments)}") if not pos_marker: # If no pos given, it's the pos of the first segment. if isinstance(segments, (tuple, list)): pos_marker = PositionMarker.from_child_markers( *(seg.pos_marker for seg in segments) ) else: raise TypeError( f"Unexpected type passed to BaseSegment: {type(segments)}" ) self.pos_marker: PositionMarker = pos_marker def __eq__(self, other): # NB: this should also work for RawSegment return ( # Same class NAME. (could be constructed elsewhere) self.__class__.__name__ == other.__class__.__name__ and (self.raw == other.raw) # Both must have a non-null position marker to compare. and self.pos_marker and other.pos_marker # We only match that the *start* is the same. This means we can # still effectively construct searches look for segments. # This is important for .apply_fixes(). and ( self.pos_marker.start_point_marker() == other.pos_marker.start_point_marker() ) ) def __repr__(self): return f"<{self.__class__.__name__}: ({self.pos_marker})>" # ################ PRIVATE PROPERTIES @property def _comments(self): """Returns only the comment elements of this segment.""" return [seg for seg in self.segments if seg.is_type("comment")] @property def _non_comments(self): """Returns only the non-comment elements of this segment.""" return [seg for seg in self.segments if not seg.is_type("comment")] # ################ PUBLIC PROPERTIES @property def name(self): """The name of this segment. The reason for three routes for names is that some subclasses might want to override the name rather than just getting the class name. Instances may also override this with the _surrogate_name. Name should be specific to this kind of segment, while `type` should be a higher level descriptor of the kind of segment. For example, the name of `+` is 'plus' but the type might be 'binary_operator'. """ return self._surrogate_name or self._name or self.__class__.__name__ @property def is_expandable(self): """Return true if it is meaningful to call `expand` on this segment. We need to do this recursively because even if *this* segment doesn't need expanding, maybe one of its children does. Once a segment is *not* expandable, it can never become so, which is why the variable is cached. """ if self._is_expandable is False: return self._is_expandable elif self.parse_grammar: return True elif self.segments and any(s.is_expandable for s in self.segments): return True else: # Cache the variable self._is_expandable = False return False @cached_property def is_code(self): """Return True if this segment contains any code.""" return any(seg.is_code for seg in self.segments) @cached_property def is_comment(self): """Return True if this is entirely made of comments.""" return all(seg.is_comment for seg in self.segments) @cached_property def is_whitespace(self): """Return True if this segment is entirely whitespace.""" return all(seg.is_whitespace for seg in self.segments) @cached_property def raw(self): """Make a string from the segments of this segment.""" return self._reconstruct() @cached_property def raw_upper(self): """Make an uppercase string from the segments of this segment.""" return self._reconstruct().upper() @cached_property def matched_length(self): """Return the length of the segment in characters.""" return sum(seg.matched_length for seg in self.segments) # ################ STATIC METHODS @staticmethod def segs_to_tuple(segs, **kwargs): """Return a tuple structure from an iterable of segments.""" return tuple(seg.to_tuple(**kwargs) for seg in segs) @staticmethod def _suffix(): """Return any extra output required at the end when logging. NB Override this for specific subclasses if we want extra output. """ return "" @staticmethod def expand(segments, parse_context): """Expand the list of child segments using their `parse` methods.""" segs = () for stmt in segments: try: if not stmt.is_expandable: parse_context.logger.info( "[PD:%s] Skipping expansion of %s...", parse_context.parse_depth, stmt, ) segs += (stmt,) continue except Exception as err: parse_context.logger.error( "%s has no attribute `is_expandable`. This segment appears poorly constructed.", stmt, ) raise err if not hasattr(stmt, "parse"): raise ValueError( "{} has no method `parse`. This segment appears poorly constructed.".format( stmt ) ) parse_depth_msg = "Parse Depth {}. Expanding: {}: {!r}".format( parse_context.parse_depth, stmt.__class__.__name__, curtail_string(stmt.raw, length=40), ) parse_context.logger.info(frame_msg(parse_depth_msg)) res = stmt.parse(parse_context=parse_context) if isinstance(res, BaseSegment): segs += (res,) else: # We might get back an iterable of segments segs += tuple(res) # Basic Validation check_still_complete(segments, segs, ()) return segs @classmethod def _position_segments(cls, segments, parent_pos=None): """Refresh positions of segments within a span. This does two things: - Assign positions to any segments without them. - Updates the working line_no and line_pos for all segments during fixing. New segments are assumed to be metas or insertions and so therefore have a zero-length position in the source and templated file. """ # If there are no segments, there's no need to reposition. if not segments: return segments # Work out our starting position for working through if parent_pos: line_no = parent_pos.working_line_no line_pos = parent_pos.working_line_pos # If we don't have it, infer it from the first position # in this segment that does have a position. else: for fwd_seg in segments: if fwd_seg.pos_marker: line_no = fwd_seg.pos_marker.working_line_no line_pos = fwd_seg.pos_marker.working_line_pos break else: linter_logger.warning("SEG: %r, POS: %r", segments, parent_pos) raise ValueError("Unable to find working position.") # Use the index so that we can look forward # and backward. for idx, segment in enumerate(segments): # Fill any that don't have a position. if not segment.pos_marker: # Can we get a position from the previous? if idx > 0: segment.pos_marker = segments[idx - 1].pos_marker.end_point_marker() # Can we get it from the parent? elif parent_pos: segment.pos_marker = parent_pos.start_point_marker() # Search forward for a following one, if we have to? else: for fwd_seg in segments[idx + 1 :]: if fwd_seg.pos_marker: segments[ idx ].pos_marker = fwd_seg.pos_marker.start_point_marker() break else: raise ValueError("Unable to position new segment") # Update the working position. segment.pos_marker = segment.pos_marker.with_working_position( line_no, line_pos, ) line_no, line_pos = segment.pos_marker.infer_next_position( segment.raw, line_no, line_pos ) # If this segment has children, recurse and reposition them too. if segment.segments: segment.segments = cls._position_segments( segment.segments, parent_pos=segment.pos_marker ) return segments # ################ CLASS METHODS @classmethod def simple(cls, parse_context: ParseContext) -> Optional[List[str]]: """Does this matcher support an uppercase hash matching route? This should be true if the MATCH grammar is simple. Most more complicated segments will be assumed to overwrite this method if they wish to be considered simple. """ if cls.match_grammar: return cls.match_grammar.simple(parse_context=parse_context) else: # Other segments will either override this method, or aren't # simple. return None @classmethod def is_optional(cls): """Return True if this segment is optional. This is used primarily in sequence matching, where optional segments can be skipped. """ return cls.optional @classmethod def class_is_type(cls, *seg_type): """Is this segment class (or its parent) of the given type.""" # Do we match on the type of _this_ class. if cls.type in seg_type: return True # If not, check types of parents. for base_class in cls.__bases__: if base_class is object: break elif base_class.type in seg_type: return True elif base_class.type == "base": break return False @classmethod def structural_simplify(cls, elem): """Simplify the structure recursively so it serializes nicely in json/yaml.""" if isinstance(elem, tuple): # Does this look like an element? if len(elem) == 2 and isinstance(elem[0], str): # This looks like a single element, make a dict elem = {elem[0]: cls.structural_simplify(elem[1])} elif isinstance(elem[0], tuple): # This looks like a list of elements. keys = [e[0] for e in elem] # Any duplicate elements? if len(set(keys)) == len(keys): # No, we can use a mapping tuple elem = {e[0]: cls.structural_simplify(e[1]) for e in elem} else: # Yes, this has to be a list :( elem = [cls.structural_simplify(e) for e in elem] return elem @classmethod @match_wrapper(v_level=4) def match( cls, segments: Tuple["BaseSegment", ...], parse_context: ParseContext ) -> MatchResult: """Match a list of segments against this segment. Note: Match for segments is done in the ABSTRACT. When dealing with concrete then we're always in parse. Parse is what happens during expand. Matching can be done from either the raw or the segments. This raw function can be overridden, or a grammar defined on the underlying class. """ # Edge case, but it's possible that we have *already matched* on # a previous cycle. Do should first check whether this is a case # of that. if len(segments) == 1 and isinstance(segments[0], cls): # This has already matched. Winner. parse_match_logging( cls.__name__, "_match", "SELF", parse_context=parse_context, v_level=3, symbol="+++", ) return MatchResult.from_matched(segments) elif len(segments) > 1 and isinstance(segments[0], cls): parse_match_logging( cls.__name__, "_match", "SELF", parse_context=parse_context, v_level=3, symbol="+++", ) # This has already matched, but only partially. return MatchResult((segments[0],), segments[1:]) if cls.match_grammar: # Call the private method with parse_context.deeper_match() as ctx: m = cls.match_grammar.match(segments=segments, parse_context=ctx) # Calling unify here, allows the MatchResult class to do all the type checking. if not isinstance(m, MatchResult): raise TypeError( "[PD:{} MD:{}] {}.match. Result is {}, not a MatchResult!".format( parse_context.parse_depth, parse_context.match_depth, cls.__name__, type(m), ) ) # Once unified we can deal with it just as a MatchResult if m.has_match(): return MatchResult( (cls(segments=m.matched_segments),), m.unmatched_segments ) else: return MatchResult.from_unmatched(segments) else: raise NotImplementedError( f"{cls.__name__} has no match function implemented" ) # ################ PRIVATE INSTANCE METHODS def _reconstruct(self): """Make a string from the segments of this segment.""" return "".join(seg.raw for seg in self.segments) def _preface(self, ident, tabsize): """Returns the preamble to any logging.""" padded_type = "{padding}{modifier}{type}".format( padding=" " * (ident * tabsize), modifier="[META] " if self.is_meta else "", type=self.get_type() + ":", ) preface = "{pos:20}|{padded_type:60} {suffix}".format( pos=str(self.pos_marker) if self.pos_marker else "-", padded_type=padded_type, suffix=self._suffix() or "", ) # Trim unnecessary whitespace before returning return preface.rstrip() # ################ PUBLIC INSTANCE METHODS def get_type(self): """Returns the type of this segment as a string.""" return self.type def is_type(self, *seg_type): """Is this segment (or its parent) of the given type.""" return self.class_is_type(*seg_type) def invalidate_caches(self): """Invalidate the cached properties. This should be called whenever the segments within this segment is mutated. """ for key in ["is_code", "is_comment", "raw", "raw_upper", "matched_length"]: self.__dict__.pop(key, None) def get_start_point_marker(self): """Get a point marker at the start of this segment.""" return self.pos_marker.start_point_marker() def get_end_point_marker(self): """Get a point marker at the end of this segment.""" return self.pos_marker.end_point_marker() def get_start_loc(self): """Get a location tuple at the start of this segment.""" return self.pos_marker.working_loc def get_end_loc(self): """Get a location tuple at the end of this segment.""" return self.pos_marker.working_loc_after( self.raw, ) def stringify(self, ident=0, tabsize=4, code_only=False): """Use indentation to render this segment and its children as a string.""" buff = StringIO() preface = self._preface(ident=ident, tabsize=tabsize) buff.write(preface + "\n") if not code_only and self.comment_seperate and len(self._comments) > 0: if self._comments: buff.write((" " * ((ident + 1) * tabsize)) + "Comments:" + "\n") for seg in self._comments: buff.write( seg.stringify( ident=ident + 2, tabsize=tabsize, code_only=code_only, ) ) if self._non_comments: buff.write((" " * ((ident + 1) * tabsize)) + "Code:" + "\n") for seg in self._non_comments: buff.write( seg.stringify( ident=ident + 2, tabsize=tabsize, code_only=code_only, ) ) else: for seg in self.segments: # If we're in code_only, only show the code segments, otherwise always true if not code_only or seg.is_code: buff.write( seg.stringify( ident=ident + 1, tabsize=tabsize, code_only=code_only, ) ) return buff.getvalue() def to_tuple(self, code_only=False, show_raw=False, include_meta=False): """Return a tuple structure from this segment.""" # works for both base and raw if show_raw and not self.segments: result = (self.get_type(), self.raw) elif code_only: result = ( self.get_type(), tuple( seg.to_tuple( code_only=code_only, show_raw=show_raw, include_meta=include_meta, ) for seg in self.segments if seg.is_code and not seg.is_meta ), ) else: result = ( self.get_type(), tuple( seg.to_tuple( code_only=code_only, show_raw=show_raw, include_meta=include_meta, ) for seg in self.segments if include_meta or not seg.is_meta ), ) return result def as_record(self, **kwargs): """Return the segment as a structurally simplified record. This is useful for serialization to yaml or json. kwargs passed to to_tuple """ return self.structural_simplify(self.to_tuple(**kwargs)) def raw_list(self): """Return a list of raw elements, mostly for testing or searching.""" buff = [] for s in self.segments: buff += s.raw_list() return buff def iter_raw_seg(self): """Iterate raw segments, mostly for searching.""" for s in self.segments: yield from s.iter_raw_seg() def iter_segments(self, expanding=None, pass_through=False): """Iterate raw segments, optionally expanding some chldren.""" for s in self.segments: if expanding and s.is_type(*expanding): yield from s.iter_segments( expanding=expanding if pass_through else None ) else: yield s def iter_unparsables(self): """Iterate through any unparsables this segment may contain.""" for s in self.segments: yield from s.iter_unparsables() def type_set(self): """Return a set of the types contained, mostly for testing.""" typs = {self.type} for s in self.segments: typs |= s.type_set() return typs def is_raw(self): """Return True if this segment has no children.""" return len(self.segments) == 0 def get_child(self, *seg_type): """Retrieve the first of the children of this segment with matching type.""" for seg in self.segments: if seg.is_type(*seg_type): return seg return None def get_children(self, *seg_type): """Retrieve the all of the children of this segment with matching type.""" buff = [] for seg in self.segments: if seg.is_type(*seg_type): buff.append(seg) return buff def select_children( self, start_seg: Optional["BaseSegment"] = None, stop_seg: Optional["BaseSegment"] = None, select_if: Optional[Callable[["BaseSegment"], Any]] = None, loop_while: Optional[Callable[["BaseSegment"], Any]] = None, ): """Retrieve subset of children based on range and filters. Often useful by linter rules when generating fixes, e.g. to find whitespace segments between two already known segments. """ start_index = self.segments.index(start_seg) if start_seg else -1 stop_index = self.segments.index(stop_seg) if stop_seg else len(self.segments) buff = [] for seg in self.segments[start_index + 1 : stop_index]: if loop_while and not loop_while(seg): break if not select_if or select_if(seg): buff.append(seg) return buff def recursive_crawl(self, *seg_type, recurse_into=True): """Recursively crawl for segments of a given type. Args: seg_type: :obj:`str`: one or more type of segment to look for. recurse_into: :obj:`bool`: When an element of type "seg_type" is found, whether to recurse into it. """ # Check this segment if self.is_type(*seg_type): match = True yield self else: match = False if recurse_into or not match: # Recurse for seg in self.segments: yield from seg.recursive_crawl(*seg_type, recurse_into=recurse_into) def path_to(self, other): """Given a segment which is assumed within self, get the intermediate segments. Returns: :obj:`list` of segments, including the segment we're looking for. None if not found. """ # Return self if we've found the segment. if self is other: return [self] # Are we in the right ballpark? # NB: Comparisons have a higher precedence than `not`. if not self.get_start_loc() <= other.get_start_loc() <= self.get_end_loc(): return None # Do we have any child segments at all? if not self.segments: return None # Check through each of the child segments for seg in self.segments: res = seg.path_to(other) if res: return [self] + res return None def parse(self, parse_context=None, parse_grammar=None): """Use the parse grammar to find subsegments within this segment. A large chunk of the logic around this can be found in the `expand` method. Use the parse setting in the context for testing, mostly to check how deep to go. True/False for yes or no, an integer allows a certain number of levels. Optionally, this method allows a custom parse grammar to be provided which will override any existing parse grammar on the segment. """ # Clear the blacklist cache so avoid missteps if parse_context: parse_context.blacklist.clear() # the parse_depth and recurse kwargs control how deep we will recurse for testing. if not self.segments: # This means we're a root segment, just return an unmutated self return self # Check the Parse Grammar parse_grammar = parse_grammar or self.parse_grammar if parse_grammar is None: # No parse grammar, go straight to expansion parse_context.logger.debug( "{}.parse: no grammar. Going straight to expansion".format( self.__class__.__name__ ) ) else: # For debugging purposes. Ensure that we don't have non-code elements # at the start or end of the segments. They should always in the middle, # or in the parent expression. segments = self.segments if self.can_start_end_non_code: pre_nc, segments, post_nc = trim_non_code_segments(segments) else: pre_nc = () post_nc = () if (not segments[0].is_code) and (not segments[0].is_meta): raise ValueError( "Segment {} starts with non code segment: {!r}.\n{!r}".format( self, segments[0].raw, segments ) ) if (not segments[-1].is_code) and (not segments[-1].is_meta): raise ValueError( "Segment {} ends with non code segment: {!r}.\n{!r}".format( self, segments[-1].raw, segments ) ) # NOTE: No match_depth kwarg, because this is the start of the matching. with parse_context.matching_segment(self.__class__.__name__) as ctx: m = parse_grammar.match(segments=segments, parse_context=ctx) if not isinstance(m, MatchResult): raise TypeError( "[PD:{}] {}.match. Result is {}, not a MatchResult!".format( parse_context.parse_depth, self.__class__.__name__, type(m) ) ) # Basic Validation, that we haven't dropped anything. check_still_complete(segments, m.matched_segments, m.unmatched_segments) if m.has_match(): if m.is_complete(): # Complete match, happy days! self.segments = pre_nc + m.matched_segments + post_nc else: # Incomplete match. # For now this means the parsing has failed. Lets add the unmatched bit at the # end as something unparsable. # TODO: Do something more intelligent here. self.segments = ( pre_nc + m.matched_segments + ( UnparsableSegment( segments=m.unmatched_segments + post_nc, expected="Nothing...", ), ) ) elif self.allow_empty and not segments: # Very edge case, but some segments are allowed to be empty other than non-code self.segments = pre_nc + post_nc else: # If there's no match at this stage, then it's unparsable. That's # a problem at this stage so wrap it in an unparsable segment and carry on. self.segments = ( pre_nc + ( UnparsableSegment( segments=segments, expected=self.name, ), # NB: tuple ) + post_nc ) # Recurse if allowed (using the expand method to deal with the expansion) parse_context.logger.debug( "{}.parse: Done Parse. Plotting Recursion. Recurse={!r}".format( self.__class__.__name__, parse_context.recurse ) ) parse_depth_msg = "###\n#\n# Beginning Parse Depth {}: {}\n#\n###\nInitial Structure:\n{}".format( parse_context.parse_depth + 1, self.__class__.__name__, self.stringify() ) if parse_context.may_recurse(): parse_context.logger.debug(parse_depth_msg) with parse_context.deeper_parse() as ctx: self.segments = self.expand(self.segments, parse_context=ctx) return self def apply_fixes(self, fixes): """Apply an iterable of fixes to this segment. Used in applying fixes if we're fixing linting errors. If anything changes, this should return a new version of the segment rather than mutating the original. Note: We need to have fixes to apply AND this must have children. In the case of raw segments, they will be replaced or removed by their parent and so this function should just return self. """ if fixes and not self.is_raw(): # Get a reference to self to start with, but this will rapidly # become a working copy. r = self # Make a working copy seg_buffer = [] todo_buffer = list(self.segments) while True: if len(todo_buffer) == 0: break else: seg = todo_buffer.pop(0) fix_buff = fixes.copy() unused_fixes = [] while fix_buff: f = fix_buff.pop() # Look for identity not just equality. # This handles potential positioning ambiguity. if f.anchor is seg: linter_logger.debug( "Matched fix against segment: %s -> %s", f, seg ) if f.edit_type == "delete": # We're just getting rid of this segment. seg = None elif f.edit_type in ("edit", "create"): # We're doing a replacement (it could be a single segment or an iterable) if isinstance(f.edit, BaseSegment): seg_buffer.append(f.edit) else: for s in f.edit: seg_buffer.append(s) if f.edit_type == "create": # in the case of a creation, also add this segment on the end seg_buffer.append(seg) else: raise ValueError( "Unexpected edit_type: {!r} in {!r}".format( f.edit_type, f ) ) # We've applied a fix here. Move on, this also consumes the fix # TODO: Maybe deal with overlapping fixes later. break else: # We've not used the fix so we should keep it in the list for later. unused_fixes.append(f) else: seg_buffer.append(seg) # Switch over the the unused list fixes = unused_fixes + fix_buff # Invalidate any caches self.invalidate_caches() # Then recurse (i.e. deal with the children) (Requeueing) seg_queue = seg_buffer seg_buffer = [] for seg in seg_queue: s, fixes = seg.apply_fixes(fixes) seg_buffer.append(s) # Reform into a new segment r = r.__class__( # Realign the segments within segments=self._position_segments( tuple(seg_buffer), parent_pos=r.pos_marker ), pos_marker=r.pos_marker, # Pass through any additional kwargs **{k: getattr(self, k) for k in self.additional_kwargs}, ) # Return the new segment with any unused fixes. return r, fixes else: return self, fixes def iter_patches(self, templated_str: str) -> Iterator[FixPatch]: """Iterate through the segments generating fix patches. The patches are generated in TEMPLATED space. This is important so that we defer dealing with any loops until later. At this stage everything *should* happen in templated order. Occasionally we have an insertion around a placeholder, so we also return a hint to deal with that. """ # Does it match? If so we can ignore it. matches = self.raw == templated_str[self.pos_marker.templated_slice] if matches: return # If we're here, the segment doesn't match the original. # If it's all literal, then we don't need to recurse. if self.pos_marker.is_literal(): # Yield the position in the source file and the patch yield FixPatch( self.pos_marker.templated_slice, self.raw, patch_category="literal" ) # Can we go deeper? elif not self.segments: # It's not literal, but it's also a raw segment. If were going # to yield a change, we would have done it from the parent, so # we just abort from here. return else: # This segment isn't a literal, but has changed, we need to go deeper. # Iterate through the child segments templated_idx = self.pos_marker.templated_slice.start insert_buff = "" for seg_idx, segment in enumerate(self.segments): # First check for insertions. # We know it's an insertion if it has length but not in the templated file. if segment.raw and segment.pos_marker.is_point(): # Add it to the insertion buffer if it has length: if segment.raw: insert_buff += segment.raw linter_logger.debug( "Appending insertion buffer. %r @idx: %s", insert_buff, templated_idx, ) continue # If we get here, then we know it's an original. # Check for deletions at the before this segment (vs the TEMPLATED). start_diff = segment.pos_marker.templated_slice.start - templated_idx # Check to see whether there's a discontinuity before the current segment if start_diff > 0 or insert_buff: # If we have an insert buffer, then it's an edit, otherwise a deletion. yield FixPatch( slice( segment.pos_marker.templated_slice.start - max(start_diff, 0), segment.pos_marker.templated_slice.start, ), insert_buff, patch_category="mid_point", ) insert_buff = "" # Now we deal with any changes *within* the segment itself. yield from segment.iter_patches(templated_str=templated_str) # Once we've dealt with any patches from the segment, update # our position markers. templated_idx = segment.pos_marker.templated_slice.stop # After the loop, we check whether there's a trailing deletion # or insert. Also valid if we still have an insertion buffer here. end_diff = self.pos_marker.templated_slice.stop - templated_idx if end_diff or insert_buff: yield FixPatch( slice( self.pos_marker.templated_slice.stop - end_diff, self.pos_marker.templated_slice.stop, ), insert_buff, patch_category="end_point", ) class BracketedSegment(BaseSegment): """A segment containing a bracketed expression.""" type = "bracketed" additional_kwargs = ["start_bracket", "end_bracket"] def __init__( self, *args, # These are tuples of segments but we're expecting them to # be tuples of length 1. This is because we'll almost always # be doing tuple arithmetic with the results and constructing # 1-tuples on the fly is very easy to misread. start_bracket: Tuple[BaseSegment] = None, end_bracket: Tuple[BaseSegment] = None, **kwargs, ): """Stash the bracket segments for later.""" if not start_bracket or not end_bracket: raise ValueError( "Attempted to construct Bracketed segment without specifying brackets." ) self.start_bracket = start_bracket self.end_bracket = end_bracket super().__init__(*args, **kwargs) @classmethod def simple(cls, parse_context: ParseContext) -> Optional[List[str]]: """Simple methods for bracketed and the persitent brackets.""" start_brackets = [ start_bracket for _, start_bracket, _, persistent in parse_context.dialect.sets( "bracket_pairs" ) if persistent ] start_simple = [] for ref in start_brackets: start_simple += parse_context.dialect.ref(ref).simple(parse_context) return start_simple @classmethod def match( cls, segments: Tuple["BaseSegment", ...], parse_context: ParseContext ) -> MatchResult: """Only useful as a terminator.""" if segments and isinstance(segments[0], cls): return MatchResult((segments[0],), segments[1:]) return MatchResult.from_unmatched(segments) class UnparsableSegment(BaseSegment): """This is a segment which can't be parsed. It indicates a error during parsing.""" type = "unparsable" # From here down, comments are printed separately. comment_seperate = True _expected = "" def __init__(self, *args, expected="", **kwargs): self._expected = expected super().__init__(*args, **kwargs) def _suffix(self): """Return any extra output required at the end when logging. NB Override this for specific subclasses if we want extra output. """ return f"!! Expected: {self._expected!r}" def iter_unparsables(self): """Iterate through any unparsables. As this is an unparsable, it should yield itself. """ yield self
the-stack_0_5996
from typing import Any, Optional, Union from castutils.builtins.strings import to_str from castutils.types import GenericType def as_float(obj: Any, /) -> float: if isinstance(obj, float): return obj else: raise TypeError("Object is not of instance float") def as_float_or(obj: Any, fallback: GenericType, /) -> Union[float, GenericType]: try: return as_float(obj) except TypeError: return fallback def to_float( obj: Any, /, encoding: Optional[str] = None, errors: Optional[str] = None, ) -> float: try: if isinstance(obj, float): return obj elif isinstance(obj, (str, bytes)): return float(to_str(obj, encoding=encoding, errors=errors)) elif isinstance(obj, bool): return float(obj) return float(obj) except Exception as exception: raise ValueError("Object cannot transform to float") from exception def to_float_or( obj: Any, fallback: GenericType, /, encoding: Optional[str] = None, errors: Optional[str] = None, ) -> Union[float, GenericType]: try: return to_float(obj, encoding=encoding, errors=errors) except ValueError: return fallback
the-stack_0_5999
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # # slice paddle model generator # import sys import os import numpy as np import paddle from save_model import exportModel from save_model import saveModel data_type = 'float32' def slice(name : str, x, axes : list, start : list, end : list): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) out = paddle.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, fetch_list=[out]) saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] def slice_dyn(test_shape=[2,8,10,10]): paddle.disable_static() data = paddle.rand(shape=test_shape, dtype='float32') ''' slice w/ decrease_axis ''' @paddle.jit.to_static def test_slice_decrease_axis(x): return x[0, 1:3, :, 5] exportModel('slice_decrease_axis', test_slice_decrease_axis, [data], target_dir=sys.argv[1]) # output shape (2, 10) ''' slice w/o decrease_axis ''' @paddle.jit.to_static def test_slice(x): return paddle.slice(x, axes=[0,1,3], starts=[0,1,5], ends=[1,3,6]) # exportModel('slice_dyn', test_slice, [data], target_dir=sys.argv[1]) # output shape (1, 2, 10, 1) # disable it by default as this kind of test model already there. It's for comparsion only. ''' slice w/ decrease_axis of all dims ''' @paddle.jit.to_static def test_slice_decrease_axis_all(x): return x[0, 0, 0, 0] exportModel('slice_decrease_axis_all', test_slice_decrease_axis_all, [data], target_dir=sys.argv[1]) # output shape (1,) ''' slice w/o decrease_axis of all dims ''' @paddle.jit.to_static def test_slice_alldim(x): return paddle.slice(x, axes=[0,1,2,3], starts=[0,0,0,0], ends=[1,1,1,1]) # exportModel('slice_alldim', test_slice_alldim, [data], target_dir=sys.argv[1]) # output shape (1, 1, 1, 1) # disable it by default as this kind of test model already there. It's for comparsion only. ''' a test case simulating the last reshape2 of ocrnet which accepts slice (with decrease_axes in all dims) as its parents. ''' def slice_reshape(B=1, C=256, H=16, W=32): paddle.disable_static() data = paddle.rand(shape=[B, C, H*W], dtype='float32') @paddle.jit.to_static def test_model(x): x2 = paddle.assign([-1, -1, 16, 32]).astype('int32') node_reshape = paddle.reshape(x, [0, 256, x2[2], x2[3]]) return node_reshape exportModel('slice_reshape', test_model, [data], target_dir=sys.argv[1]) def main(): x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(4, 3, 5).astype(data_type) slice("slice", x, axes=[1, 2], start=(0, 1), end=(-1, 3)) x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(2, 30).astype(data_type) slice("slice_1d", x, axes=[0], start=[0], end=[1]) if __name__ == "__main__": main() slice_dyn() slice_reshape()
the-stack_0_6000
# -*- coding: utf-8 -*- ''' Utilities to enable exception reraising across the master commands ''' # Import python libs import exceptions # Import salt libs import salt.exceptions def raise_error(name=None, args=None, message=''): ''' Raise an exception with __name__ from name, args from args If args is None Otherwise message from message\ If name is empty then use "Exception" ''' name = name or 'Exception' if hasattr(salt.exceptions, name): ex = getattr(salt.exceptions, name) elif hasattr(exceptions, name): ex = getattr(exceptions, name) else: name = 'SaltException' ex = getattr(salt.exceptions, name) if args is not None: raise ex(*args) else: raise ex(message) def pack_exception(exc): if hasattr(exc, 'pack'): packed_exception = exc.pack() else: packed_exception = {'message': exc.__unicode__(), 'args': exc.args} return packed_exception def fire_exception(exc, opts, job=None, node='minion'): ''' Fire raw exception across the event bus ''' if job is None: job = {} event = salt.utils.event.SaltEvent(node, opts=opts) event.fire_event(pack_exception, '_salt_error')
the-stack_0_6003
from pdb import set_trace as breakpoint class Dog(): def __init__(self, name, age, housebroken = True): self.name = name self.age = age self.housebroken = housebroken def bark(self): print(f'{self.name} likes to bark!') class Beagle(Dog): def __init__(self, name, age, housebroken=True, barks_alot=True): super().__init__(name, age, housebroken) self.barks_alot = barks_alot def bark(self): if self.barks_alot == True: print(f'{self.name} likes to bark!') else : print(f'{self.name} hates to bark!') if __name__ == "__main__": lucky = Dog("Lucky", 3) spike = Beagle("Spike", 7, barks_alot=False) breakpoint()
the-stack_0_6007
# -*- coding: utf-8 -*- # # Copyright 2017 Ricequant, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from rqalpha.model.base_account import BaseAccount from rqalpha.environment import Environment from rqalpha.events import EVENT from rqalpha.const import DEFAULT_ACCOUNT_TYPE, POSITION_EFFECT, SIDE from rqalpha.utils.i18n import gettext as _ from rqalpha.utils.logger import user_system_log from ..api.api_future import order def margin_of(order_book_id, quantity, price): env = Environment.get_instance() margin_info = env.data_proxy.get_margin_info(order_book_id) margin_multiplier = env.config.base.margin_multiplier margin_rate = margin_info['long_margin_ratio'] * margin_multiplier contract_multiplier = env.get_instrument(order_book_id).contract_multiplier return quantity * contract_multiplier * price * margin_rate class FutureAccount(BaseAccount): __abandon_properties__ = [ "daily_holding_pnl", "daily_realized_pnl" ] def register_event(self): event_bus = Environment.get_instance().event_bus event_bus.add_listener(EVENT.SETTLEMENT, self._settlement) event_bus.add_listener(EVENT.ORDER_PENDING_NEW, self._on_order_pending_new) event_bus.add_listener(EVENT.ORDER_CREATION_REJECT, self._on_order_creation_reject) event_bus.add_listener(EVENT.ORDER_CANCELLATION_PASS, self._on_order_unsolicited_update) event_bus.add_listener(EVENT.ORDER_UNSOLICITED_UPDATE, self._on_order_unsolicited_update) event_bus.add_listener(EVENT.TRADE, self._on_trade) if self.AGGRESSIVE_UPDATE_LAST_PRICE: event_bus.add_listener(EVENT.BAR, self._on_bar) event_bus.add_listener(EVENT.TICK, self._on_tick) def fast_forward(self, orders, trades=list()): # 计算 Positions for trade in trades: if trade.exec_id in self._backward_trade_set: continue self._apply_trade(trade) # 计算 Frozen Cash self._frozen_cash = sum(self._frozen_cash_of_order(order) for order in orders if order.is_active()) def order(self, order_book_id, quantity, style, target=False): position = self.positions[order_book_id] if target: # For order_to quantity = quantity - position.buy_quantity + position.sell_quantity orders = [] if quantity > 0: # 平昨仓 if position.sell_old_quantity > 0: soq = position.sell_old_quantity orders.append(order( order_book_id, min(quantity, position.sell_old_quantity), SIDE.BUY, POSITION_EFFECT.CLOSE, style )) quantity -= soq if quantity <= 0: return orders # 平今仓 if position.sell_today_quantity > 0: stq = position.sell_today_quantity orders.append(order( order_book_id, min(quantity, position.sell_today_quantity), SIDE.BUY, POSITION_EFFECT.CLOSE_TODAY, style )) quantity -= stq if quantity <= 0: return orders # 开多仓 orders.append(order( order_book_id, quantity, SIDE.BUY, POSITION_EFFECT.OPEN, style )) return orders else: # 平昨仓 quantity *= -1 if position.buy_old_quantity > 0: boq = position.buy_old_quantity orders.append(order( order_book_id, min(quantity, position.buy_old_quantity), SIDE.SELL, POSITION_EFFECT.CLOSE, style )) quantity -= boq if quantity <= 0: return orders # 平今仓 if position.buy_today_quantity > 0: btq = position.buy_today_quantity orders.append(order( order_book_id, min(quantity, position.buy_today_quantity), SIDE.SELL, POSITION_EFFECT.CLOSE_TODAY, style )) quantity -= btq if quantity <= 0: return orders # 开空仓 orders.append(order( order_book_id, quantity, SIDE.SELL, POSITION_EFFECT.OPEN, style )) return orders def get_state(self): return { 'positions': { order_book_id: position.get_state() for order_book_id, position in six.iteritems(self._positions) }, 'frozen_cash': self._frozen_cash, 'total_cash': self._total_cash, 'backward_trade_set': list(self._backward_trade_set), 'transaction_cost': self._transaction_cost, } def set_state(self, state): self._frozen_cash = state['frozen_cash'] self._backward_trade_set = set(state['backward_trade_set']) self._transaction_cost = state['transaction_cost'] margin_changed = 0 self._positions.clear() for order_book_id, v in six.iteritems(state['positions']): position = self._positions.get_or_create(order_book_id) position.set_state(v) if 'margin_rate' in v and abs(v['margin_rate'] - position.margin_rate) > 1e-6: margin_changed += position.margin * (v['margin_rate'] - position.margin_rate) / position.margin_rate self._total_cash = state['total_cash'] + margin_changed @property def type(self): return DEFAULT_ACCOUNT_TYPE.FUTURE.name @staticmethod def _frozen_cash_of_order(order): if order.position_effect == POSITION_EFFECT.OPEN: return margin_of(order.order_book_id, order.unfilled_quantity, order.frozen_price) else: return 0 @staticmethod def _frozen_cash_of_trade(trade): if trade.position_effect == POSITION_EFFECT.OPEN: return margin_of(trade.order_book_id, trade.last_quantity, trade.frozen_price) else: return 0 @property def total_value(self): return self._total_cash + self.margin + self.holding_pnl # -- Margin 相关 @property def margin(self): """ [float] 总保证金 """ return sum(position.margin for position in six.itervalues(self._positions)) @property def buy_margin(self): """ [float] 买方向保证金 """ return sum(position.buy_margin for position in six.itervalues(self._positions)) @property def sell_margin(self): """ [float] 卖方向保证金 """ return sum(position.sell_margin for position in six.itervalues(self._positions)) # -- PNL 相关 @property def daily_pnl(self): """ [float] 当日盈亏 """ return self.realized_pnl + self.holding_pnl - self.transaction_cost @property def holding_pnl(self): """ [float] 浮动盈亏 """ return sum(position.holding_pnl for position in six.itervalues(self._positions)) @property def realized_pnl(self): """ [float] 平仓盈亏 """ return sum(position.realized_pnl for position in six.itervalues(self._positions)) def _settlement(self, event): total_value = self.total_value for position in list(self._positions.values()): order_book_id = position.order_book_id if position.is_de_listed() and position.buy_quantity + position.sell_quantity != 0: user_system_log.warn( _(u"{order_book_id} is expired, close all positions by system").format(order_book_id=order_book_id)) del self._positions[order_book_id] elif position.buy_quantity == 0 and position.sell_quantity == 0: del self._positions[order_book_id] else: position.apply_settlement() self._total_cash = total_value - self.margin - self.holding_pnl self._transaction_cost = 0 # todo:修改settlement # 如果 total_value <= 0 则认为已爆仓,清空仓位,资金归0 if total_value <= 0: self._positions.clear() self._total_cash = 0 self._backward_trade_set.clear() def _on_bar(self, event): for position in self._positions.values(): position.update_last_price() def _on_tick(self, event): for position in self._positions.values(): position.update_last_price() def _on_order_pending_new(self, event): if self != event.account: return self._frozen_cash += self._frozen_cash_of_order(event.order) def _on_order_creation_reject(self, event): if self != event.account: return self._frozen_cash -= self._frozen_cash_of_order(event.order) def _on_order_unsolicited_update(self, event): if self != event.account: return self._frozen_cash -= self._frozen_cash_of_order(event.order) def _on_trade(self, event): if self != event.account: return self._apply_trade(event.trade) def _apply_trade(self, trade): if trade.exec_id in self._backward_trade_set: return order_book_id = trade.order_book_id position = self._positions.get_or_create(order_book_id) delta_cash = position.apply_trade(trade) self._transaction_cost += trade.transaction_cost self._total_cash -= trade.transaction_cost self._total_cash += delta_cash self._frozen_cash -= self._frozen_cash_of_trade(trade) self._backward_trade_set.add(trade.exec_id) # ------------------------------------ Abandon Property ------------------------------------ @property def daily_holding_pnl(self): """ [已弃用] 请使用 holding_pnl """ user_system_log.warn(_(u"[abandon] {} is no longer used.").format('future_account.daily_holding_pnl')) return self.holding_pnl @property def daily_realized_pnl(self): """ [已弃用] 请使用 realized_pnl """ user_system_log.warn(_(u"[abandon] {} is no longer used.").format('future_account.daily_realized_pnl')) return self.realized_pnl
the-stack_0_6009
def build_poetry_assistant(words_to_phonemes): # complete the function body (8 MARKS) ''' (dict of {str: list of str}) -> dict of {tuple of str: list of str} Return a poetry assistant dictionary from the words to phonemes in words_to_phonemes. >>> word_to_phonemes = {'BEFORE': ['B', 'IH0', 'F', 'AO1', 'R'],'THE': ['DH', 'AH0'], ... 'A': ['AH0'], ... 'POEM': ['P', 'OW1', 'AH0', 'M'], ... 'OR': ['AO1', 'R']} >>> actual = build_poetry_assistant(words_to_phonemes) >>> expected = {('AH0',): ['THE', 'A'], ('AH0', M'): ['POEM'], ... ('AO1', 'R'): ['BEFORE', 'OR']} >>> actual == expected True ''' rhyme_dict = {} for word in words_to_phomemes: # extract all phonemes for each key, as a tuple. end = tuple(last_phonemes(words_to_phonemes[word])) if end not in rhyme_dict: # insert an entry of the tuple in the dictionary rhyme_dict[end] = [] # if word's values contains the tuple, append the word to the list. rhyme_dict[end].append(word) return rhyme_dict def last_phonemes(phoneme_list): ''' (list of str) -> list of str Return the last vowel phoneme and any subequent consonant phoneme(s) from phoneme_list, in the same order as they appear in phoneme_list. >>> last_phonemes(['AE1', 'B', 'S', 'IH0', 'N', 'TH']) ['IH0', 'N', 'TH'] >>> last_phonemes(['IH0', 'N']) ['IH0', 'N'] ''' vowels = 'AEIOU' last_phonemes_list = [] candidate_phoneme = '' for i in range(len(phoneme_list)): if phoneme_list[i][0] in vowels: if phoneme_list[i] > candidate_phoneme: candidate_phoneme = phoneme_list[i] last_phonemes_list = phoneme_list[phoneme_list.index(candidate_phoneme):] return last_phonemes_list def find_rhymes(phonemes_to_words, word): # complete the function body (4 MARKS) ''' (dict of {tuple of str: list of str}, str) -> list of str Precondition: word.isalpha() and word.isupper() are True, and word appears in exactly one value list in the phonemes_to_words dictionary Return a list of all words in phonemes_to_words that rhyme with word. Do not include word in the list. >>> phonemes_to_words = {('AO1', 'R'): ['BEFORE', 'OR'], ... ('AH0', 'M'): ['POEM'], ('AH0',): ['THE', 'A']} >>> find_rhymes(phonemes_to_words, 'OR') ['BEFORE'] >>> find_rhymes(phonemes_to_words, 'POEM') [] ''' rhymes = [] for phoneme in phonemes_to_words: if word in phonemes_to_words[phoneme]: for words in phonemse_to_words[phoneme]: if words != word: rhymes.append[words] return rhymes
the-stack_0_6011
import datetime import functools import json import operator import re import requests from django.conf import settings from django.contrib import auth from django.core import signing from django.db import transaction from django.db.models import Q, F from django.http import Http404, HttpResponseForbidden, HttpResponse from django.shortcuts import get_object_or_404, redirect from django.urls import reverse from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from django.views import generic from password_reset.views import Recover from tagging.models import Tag, TaggedItem from tagging.utils import calculate_cloud, get_tag from . import utils from .constants import (MACHINETAGS_FROM_FIELDS, IMPROVIDERS_DICT, SERVICES_DICT) from .forms import (SkillsForm, SignupForm, PortfolioForm, BioForm, LocationForm, FindingForm, AccountForm, PasswordForm, DeletionRequestForm, AccountDeletionForm) from .models import DjangoPerson, Country, User, Region, PortfolioSite from ..django_openidauth.models import associate_openid, UserOpenID from ..machinetags.utils import tagdict from ..machinetags.models import MachineTaggedItem NOTALPHA_RE = re.compile('[^a-zA-Z0-9]') @utils.simple_decorator def must_be_owner(view): def inner(request, *args, **kwargs): if 'username' in kwargs: if (not request.user or request.user.is_anonymous or request.user.username != kwargs['username']): return HttpResponseForbidden('Not allowed') else: if ( not request.user or request.user.is_anonymous or request.user.username != args[0] ): return HttpResponseForbidden('Not allowed') return view(request, *args, **kwargs) return inner class IndexView(generic.TemplateView): template_name = 'index.html' def get_context_data(self, **kwargs): people = DjangoPerson.objects.all().select_related() people = people.order_by('-id')[:100] ctx = super().get_context_data(**kwargs) ctx.update({ 'people_list': people, 'people_list_limited': people[:4], 'total_people': DjangoPerson.objects.count(), 'countries': Country.objects.top_countries(), 'home': True, }) return ctx index = IndexView.as_view() class AboutView(generic.TemplateView): template_name = 'about.html' def get_context_data(self, **kwargs): ctx = super().get_context_data(**kwargs) ctx.update({ 'total_people': DjangoPerson.objects.count(), 'countries': Country.objects.top_countries(), }) return ctx about = AboutView.as_view() class RecentView(generic.TemplateView): template_name = 'recent.html' def get_context_data(self, **kwargs): ctx = super().get_context_data(**kwargs) people = DjangoPerson.objects.all().select_related() ctx.update({ 'people': people.order_by('-auth_user.date_joined')[:50], }) return ctx recent = RecentView.as_view() def redirect_to_logged_in_user_profile(request): if request.user.is_authenticated: url = reverse('user_profile', kwargs={'username': request.user}) else: url = reverse('index') return redirect(url) def logout(request): auth.logout(request) request.session['openids'] = [] return redirect(reverse('index')) class RecoverView(Recover): search_fields = ['username'] recover = RecoverView.as_view() class OpenIDWhatNext(generic.RedirectView): """ If user is already logged in, send them to /openid/associations/ Otherwise, send them to the signup page """ permanent = False def get_redirect_url(self): if not self.request.openid: return reverse('index') if self.request.user.is_anonymous: # Have they logged in with an OpenID that matches an account? try: user_openid = UserOpenID.objects.get( openid=str(self.request.openid), ) except UserOpenID.DoesNotExist: return reverse('signup') # Log the user in user = user_openid.user user.backend = 'django.contrib.auth.backends.ModelBackend' auth.login(self.request, user) return reverse('user_profile', args=[user.username]) return reverse('openid_associations') openid_whatnext = OpenIDWhatNext.as_view() class SignupView(generic.FormView): form_class = SignupForm template_name = 'signup.html' def dispatch(self, request, *args, **kwargs): if not request.user.is_anonymous: return redirect(reverse('index')) return super().dispatch(request, *args, **kwargs) def form_valid(self, form): creation_args = { 'username': form.cleaned_data['username'], 'email': form.cleaned_data['email'], } user = User.objects.create(**creation_args) if form.cleaned_data.get('password1'): user.set_password(form.cleaned_data['password1']) user.first_name = form.cleaned_data['first_name'] user.last_name = form.cleaned_data['last_name'] user.save() if self.request.openid: associate_openid(user, str(self.request.openid)) region = None if form.cleaned_data['region']: region = Region.objects.get( country__iso_code=form.cleaned_data['country'], code=form.cleaned_data['region'], ) # Now create the DjangoPerson person = DjangoPerson.objects.create( user=user, bio=form.cleaned_data['bio'], country=Country.objects.get( iso_code=form.cleaned_data['country'], ), region=region, latitude=form.cleaned_data['latitude'], longitude=form.cleaned_data['longitude'], location_description=form.cleaned_data['location_description'], ) # Set up the various machine tags for fieldname, (namespace, predicate) in MACHINETAGS_FROM_FIELDS.items(): if ( fieldname in form.cleaned_data and form.cleaned_data[fieldname].strip() ): value = form.cleaned_data[fieldname].strip() person.add_machinetag(namespace, predicate, value) # Finally, set their skill tags person.skilltags = form.cleaned_data['skilltags'] # Log them in and redirect to their profile page user.backend = 'django.contrib.auth.backends.ModelBackend' auth.login(self.request, user) self.person = person return super().form_valid(form) def get_success_url(self): return self.person.get_absolute_url() def get_form_kwargs(self): kwargs = super().get_form_kwargs() if self.request.openid: kwargs['openid'] = self.request.openid return kwargs def get_initial(self): initial = super().get_initial() if self.request.openid and self.request.openid.sreg: sreg = self.request.openid.sreg first_name = '' last_name = '' username = '' if sreg.get('fullname'): bits = sreg['fullname'].split() first_name = bits[0] if len(bits) > 1: last_name = ' '.join(bits[1:]) # Find a not-taken username if sreg.get('nickname'): username = derive_username(sreg['nickname']) initial.update({ 'first_name': first_name, 'last_name': last_name, 'email': sreg.get('email', ''), 'username': username, }) return initial def get_context_data(self, **kwargs): ctx = super().get_context_data(**kwargs) ctx.update({ 'openid': self.request.openid, }) return ctx signup = SignupView.as_view() signup = transaction.atomic(signup) def derive_username(nickname): nickname = NOTALPHA_RE.sub('', nickname) if not nickname: return '' base_nickname = nickname to_add = 1 while True: try: DjangoPerson.objects.get(user__username=nickname) except DjangoPerson.DoesNotExist: break nickname = base_nickname + str(to_add) to_add += 1 return nickname class CleverPaginator(object): """ A paginator that triggers pagination only if the 2nd page is worth displaying. """ paginate_by = 100 def get_count(self): raise NotImplementedError def get_paginate_by(self, queryset): count = self.get_count() if count > self.paginate_by * 1.5: return self.paginate_by return count class CountryView(CleverPaginator, generic.ListView): template_name = 'country.html' context_object_name = 'people_list' def get_queryset(self): self.country = get_object_or_404( Country, iso_code=self.kwargs['country_code'].upper() ) self.all_people = self.country.djangoperson_set.select_related( 'country', 'user' ).order_by('user__first_name', 'user__last_name') return self.all_people def get_count(self): return self.country.num_people def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'regions': self.country.top_regions(), 'country': self.country, 'people_list': self.all_people, }) return context country = CountryView.as_view() class RegionView(CleverPaginator, generic.ListView): template_name = 'country.html' def get_queryset(self): self.region = get_object_or_404( Region, country__iso_code=self.kwargs['country_code'].upper(), code=self.kwargs['region_code'].upper(), ) self.all_people = self.region.djangoperson_set.select_related( 'user', 'country', ).order_by('user__first_name', 'user__last_name') return self.all_people def get_count(self): return self.region.num_people def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'country': self.region, 'people_list': self.all_people, }) return context region = RegionView.as_view() class CountrySitesView(generic.ListView): context_object_name = 'sites' template_name = 'country_sites.html' def get_queryset(self): self.country = get_object_or_404( Country, iso_code=self.kwargs['country_code'].upper(), ) return PortfolioSite.objects.select_related().filter( contributor__country=self.country, ).order_by('contributor') def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'country': self.country, }) return context country_sites = CountrySitesView.as_view() class ProfileView(generic.DetailView): context_object_name = 'person' template_name = 'profile.html' def get_object(self): person = get_object_or_404(DjangoPerson, user__username=self.kwargs['username']) DjangoPerson.objects.filter(pk=person.pk).update( profile_views=F('profile_views') + 1, ) return person def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) mtags = tagdict(self.object.machinetags.all()) # Set up convenient iterables for IM and services ims = [] for key, value in mtags.get('im', {}).items(): shortname, name, icon = IMPROVIDERS_DICT.get(key, ('', '', '')) if not shortname: continue # Bad machinetag ims.append({ 'shortname': shortname, 'name': name, 'value': value, }) ims.sort(key=lambda x: x['shortname']) services = [] for key, value in mtags.get('services', {}).items(): shortname, name, icon = SERVICES_DICT.get(key, ('', '', '')) if not shortname: continue # Bad machinetag services.append({ 'shortname': shortname, 'name': name, 'value': value, }) services.sort(key=lambda x: x['shortname']) # Set up vars that control privacy stuff privacy = { 'show_im': ( mtags['privacy']['im'] == 'public' or not self.request.user.is_anonymous ), 'show_email': ( mtags['privacy']['email'] == 'public' or (not self.request.user.is_anonymous and mtags['privacy']['email'] == 'private') ), 'hide_from_search': mtags['privacy']['search'] != 'public', 'show_last_irc_activity': bool(self.object.last_active_on_irc and self.object.irc_tracking_allowed()), } # Should we show the 'Finding X' section at all? show_finding = (services or privacy['show_email'] or (privacy['show_im'] and ims)) context.update({ 'is_owner': self.request.user.username == self.kwargs['username'], 'skills_form': SkillsForm(instance=self.object), 'mtags': mtags, 'ims': ims, 'services': services, 'privacy': privacy, 'show_finding': show_finding, 'people_list': self.object.get_nearest(), }) return context profile = ProfileView.as_view() class DjangoPersonEditViewBase(generic.UpdateView): def get_object(self): return get_object_or_404(DjangoPerson, user__username=self.kwargs['username']) def get_success_url(self): return reverse('user_profile', args=[self.kwargs['username']]) class EditFindingView(DjangoPersonEditViewBase): form_class = FindingForm template_name = 'edit_finding.html' def get_initial(self): mtags = tagdict(self.object.machinetags.all()) initial = { 'email': self.object.user.email, 'first_name': self.object.user.first_name, 'last_name': self.object.user.last_name, } # Fill in other initial fields from machinetags for fieldname, (namespace, predicate) in \ MACHINETAGS_FROM_FIELDS.items(): initial[fieldname] = mtags[namespace][predicate] return initial edit_finding = must_be_owner(EditFindingView.as_view()) class EditPortfolioView(DjangoPersonEditViewBase): form_class = PortfolioForm template_name = 'edit_portfolio.html' edit_portfolio = must_be_owner(EditPortfolioView.as_view()) class EditAccountView(DjangoPersonEditViewBase): form_class = AccountForm template_name = 'edit_account.html' edit_account = must_be_owner(EditAccountView.as_view()) class EditSkillsView(DjangoPersonEditViewBase): form_class = SkillsForm template_name = 'edit_skills.html' edit_skills = must_be_owner(EditSkillsView.as_view()) class EditPassword(generic.UpdateView): form_class = PasswordForm template_name = 'edit_password.html' def get_object(self): return get_object_or_404(User, username=self.kwargs['username']) def get_success_url(self): return reverse('user_profile', args=[self.kwargs['username']]) edit_password = must_be_owner(EditPassword.as_view()) class EditBioView(DjangoPersonEditViewBase): form_class = BioForm template_name = 'edit_bio.html' edit_bio = must_be_owner(EditBioView.as_view()) class EditLocationView(DjangoPersonEditViewBase): form_class = LocationForm template_name = 'edit_location.html' def get_initial(self): initial = super().get_initial() initial.update({ 'country': self.object.country.iso_code, }) return initial edit_location = must_be_owner(EditLocationView.as_view()) class SkillCloudView(generic.TemplateView): template_name = 'skills.html' def get_context_data(self, **kwargs): tags = DjangoPerson.skilltags.cloud(steps=5) calculate_cloud(tags, 5) context = super().get_context_data(**kwargs) context.update({ 'tags': tags, }) return context skill_cloud = SkillCloudView.as_view() class CountrySkillCloudView(generic.DetailView): context_object_name = 'country' template_name = 'skills.html' def get_object(self): return get_object_or_404(Country, iso_code=self.kwargs['country_code'].upper()) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) tags = Tag.objects.cloud_for_model(DjangoPerson, steps=5, filters={ 'country': self.object, }) calculate_cloud(tags, 5) context.update({ 'tags': tags, }) return context country_skill_cloud = CountrySkillCloudView.as_view() class TaggedObjectList(generic.ListView): related_tags = False related_tag_counts = True select_related = False def get_queryset(self): self.tag_instance = get_tag(self.kwargs['tag']) if self.tag_instance is None: raise Http404( _('No Tag found matching "%s".') % self.kwargs['tag'] ) queryset = TaggedItem.objects.get_by_model(self.model, self.tag_instance) if self.select_related: queryset = queryset.select_related(*self.select_related) filter_args = self.get_extra_filter_args() if filter_args: queryset = queryset.filter(**filter_args) return queryset def get_extra_filter_args(self): return {} def get_context_data(self, **kwargs): kwargs.update({ 'tag': self.kwargs['tag'], }) if self.related_tags: kwargs['related_tags'] = Tag.objects.related_for_model( self.tag_instance, self.model, counts=self.related_tag_counts ) ctx = super().get_context_data(**kwargs) return ctx class Skill(TaggedObjectList): model = DjangoPerson related_tags = True template_name = 'skill.html' context_object_name = 'people_list' select_related = ['user', 'country'] skill = Skill.as_view() class CountrySkill(TaggedObjectList): model = DjangoPerson related_tags = True template_name = 'skill.html' context_object_name = 'people_list' def get_context_data(self, **kwargs): kwargs['country'] = Country.objects.get( iso_code=self.kwargs['country_code'].upper() ) return super().get_context_data(**kwargs) def get_extra_filter_args(self): filters = super().get_extra_filter_args() filters['country__iso_code'] = self.kwargs['country_code'].upper() return filters country_skill = CountrySkill.as_view() class CountryLookingForView(generic.ListView): context_object_name = 'people' template_name = 'country_looking_for.html' def get_queryset(self): self.country = get_object_or_404( Country, iso_code=self.kwargs['country_code'].upper(), ) ids = [ o['object_id'] for o in MachineTaggedItem.objects.filter( namespace='profile', predicate='looking_for_work', value=self.kwargs['looking_for'], ).values('object_id') ] return DjangoPerson.objects.filter(country=self.country, id__in=ids) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'country': self.country, 'looking_for': self.kwargs['looking_for'], }) return context country_looking_for = CountryLookingForView.as_view() class SearchView(generic.ListView): context_object_name = 'people_list' template_name = 'search.html' def get_queryset(self): self.q = self.request.GET.get('q', '') self.has_badwords = [ w.strip() for w in self.q.split() if len(w.strip()) in (1, 2) ] if self.q: return self.search_people() return [] def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'q': self.q, 'has_badwords': self.has_badwords }) return context def search_people(self): words = [w.strip() for w in self.q.split() if len(w.strip()) > 2] if not words: return [] terms = [] for word in words: terms.append(Q( user__username__icontains=word) | Q(user__first_name__icontains=word) | Q(user__last_name__icontains=word) ) combined = functools.reduce(operator.and_, terms) return DjangoPerson.objects.filter( combined, ).select_related().distinct() search = SearchView.as_view() class IRCActiveView(generic.ListView): context_object_name = 'people_list' template_name = 'irc_active.html' def get_queryset(self): results = DjangoPerson.objects.filter( last_active_on_irc__gt=(timezone.now() - datetime.timedelta(hours=1)) ).order_by('-last_active_on_irc') # Filter out the people who don't want to be tracked (inefficient) return [r for r in results if r.irc_tracking_allowed()] irc_active = IRCActiveView.as_view() class RequestFormMixin(object): def get_form_kwargs(self): kwargs = super().get_form_kwargs() kwargs['request'] = self.request return kwargs class DeletionRequestView(RequestFormMixin, generic.FormView): form_class = DeletionRequestForm template_name = 'delete_account_request.html' def form_valid(self, form): form.save() return redirect(reverse('delete_account_next', args=[self.request.user.username])) delete_account_request = must_be_owner(DeletionRequestView.as_view()) class DeletionNext(generic.TemplateView): template_name = 'delete_account_next.html' delete_account_next = must_be_owner(DeletionNext.as_view()) class AccountDeletionView(RequestFormMixin, generic.FormView): form_class = AccountDeletionForm template_name = 'delete_account.html' def dispatch(self, request, *args, **kwargs): try: self.key = signing.loads(kwargs['key'], max_age=3600, salt='delete_account') except signing.SignatureExpired: return redirect(reverse('delete_account_request', args=[request.user.username])) except signing.BadSignature: raise Http404 return super().dispatch(request, *args, **kwargs) def form_valid(self, form): form.save() return redirect(reverse('delete_account_done', args=[self.request.user.username])) def get_context_data(self, **kwargs): ctx = super().get_context_data(**kwargs) ctx['key'] = self.kwargs['key'] return ctx delete_account = must_be_owner(AccountDeletionView.as_view()) class DeletionDone(generic.TemplateView): template_name = 'delete_account_done.html' def dispatch(self, request, *args, **kwargs): if User.objects.filter(username=kwargs['username']).exists(): raise Http404 return super().dispatch(request, *args, **kwargs) delete_account_done = DeletionDone.as_view() def geonames(request): params = dict(request.GET) params['username'] = settings.GEONAMES_USERNAME response = requests.get('http://ws.geonames.org/findNearbyPlaceNameJSON', params=params) return HttpResponse(json.dumps(response.json()), content_type='application/json')
the-stack_0_6012
import game, server, menu_utils, df_utils, items from srabuilder import rules import functools import dragonfly as df wrapper = menu_utils.InventoryMenuWrapper() async def get_shipping_menu(): menu = await menu_utils.get_active_menu(menu_type='itemsToGrabMenu') if not menu['shippingBin']: raise menu_utils.InvalidMenuOption() return menu async def focus_item(new_row, new_col): menu = await get_shipping_menu() submenu = menu['inventoryMenu'] await wrapper.focus_box(submenu, new_row, new_col) mapping = { "item <positive_index>": df_utils.async_action(focus_item, None, 'positive_index'), "row <positive_index>": df_utils.async_action(focus_item, 'positive_index', None), "ok": df_utils.async_action(menu_utils.click_menu_button, 'okButton', get_shipping_menu), "undo": df_utils.async_action(menu_utils.click_menu_button, 'lastShippedHolder', get_shipping_menu), } @menu_utils.valid_menu_test def is_active(): menu = game.get_context_menu('itemsToGrabMenu') return menu['shippingBin'] def load_grammar(): grammar = df.Grammar("shipping_bin_menu") main_rule = df.MappingRule( name="shipping_bin_menu_rule", mapping=mapping, extras=[rules.num, df_utils.positive_index], context=is_active ) grammar.add_rule(main_rule) grammar.load()
the-stack_0_6013
import pytest from django.urls import resolve, reverse from pinterest.users.models import User pytestmark = pytest.mark.django_db def test_detail(user: User): assert ( reverse("users:detail", kwargs={"username": user.username}) == f"/users/{user.username}/" ) assert resolve(f"/users/{user.username}/").view_name == "users:detail" def test_update(): assert reverse("users:update") == "/users/~update/" assert resolve("/users/~update/").view_name == "users:update" def test_redirect(): assert reverse("users:redirect") == "/users/~redirect/" assert resolve("/users/~redirect/").view_name == "users:redirect"
the-stack_0_6014
"""SIGMET""" # stdlib from collections import defaultdict # 3rd Party import pytest # this from pyiem.exceptions import SIGMETException from pyiem.nws.products.sigmet import parser, compute_esol from pyiem.util import utc, get_test_file def mydict(): """return dict.""" return dict(lon=-85.50, lat=42.79) NWSLI_PROVIDER = defaultdict(mydict) def test_opairs(): """Test that exception is raised.""" utcnow = utc(2021, 1, 9, 7, 58) with pytest.raises(SIGMETException): parser( get_test_file("SIGMETS/SIGAK3.txt"), utcnow, nwsli_provider=NWSLI_PROVIDER, ) def test_190503_badgeom(): """This SIGMET produced a traceback in prod.""" utcnow = utc(2019, 5, 3, 18, 25) tp = parser( get_test_file("SIGMETS/SIGC_badgeom.txt"), utcnow, nwsli_provider=NWSLI_PROVIDER, ) assert len(tp.sigmets) == 4 def test_170815_pywwa_issue3(): """This example was in pyWWA issues list, so lets test here""" utcnow = utc(2015, 9, 30, 16, 56) tp = parser( get_test_file("SIGMETS/SIGE.txt"), utcnow, nwsli_provider=NWSLI_PROVIDER, ) assert len(tp.sigmets) == 4 def test_150930_sigak2(): """Got an error with this product""" utcnow = utc(2015, 9, 30, 16, 56) tp = parser(get_test_file("SIGMETS/SIGAK2.txt"), utcnow) assert not tp.sigmets def test_150921_sigpas(): """Got an error with this product""" utcnow = utc(2015, 9, 21, 10, 57) tp = parser(get_test_file("SIGMETS/SIGPAS.txt"), utcnow) assert len(tp.sigmets) == 1 def test_150917_cancel(): """Don't error out on a CANCELs SIGMET""" utcnow = utc(2015, 9, 17, 0, 0) tp = parser(get_test_file("SIGMETS/SIGPAP_cancel.txt"), utcnow) assert not tp.sigmets def test_compute_esol(): """Test our algo on either side of a line""" pts = [[0, 0], [5, 0]] pts = compute_esol(pts, 111) print(pts) assert abs(pts[0][0] - 0.00) < 0.01 assert abs(pts[0][1] - 1.00) < 0.01 assert abs(pts[1][0] - 5.00) < 0.01 assert abs(pts[1][1] - 1.00) < 0.01 assert abs(pts[2][0] - 5.00) < 0.01 assert abs(pts[2][1] - -1.00) < 0.01 assert abs(pts[3][0] - 0.00) < 0.01 assert abs(pts[3][1] - -1.00) < 0.01 assert abs(pts[4][0] - 0.00) < 0.01 assert abs(pts[4][1] - 1.00) < 0.01 def test_150915_line(): """See about parsing a SIGMET LINE""" utcnow = utc(2015, 9, 15, 2, 55) ugc_provider = {} nwsli_provider = { "MSP": dict(lon=-83.39, lat=44.45), "MCW": dict(lon=-85.50, lat=42.79), } tp = parser( get_test_file("SIGMETS/SIGC_line.txt"), utcnow, ugc_provider, nwsli_provider, ) assert abs(tp.sigmets[0].geom.area - 0.47) < 0.01 def test_150915_isol(): """See about parsing a SIGMET ISOL""" utcnow = utc(2015, 9, 12, 23, 55) ugc_provider = {} nwsli_provider = { "FTI": dict(lon=-83.39, lat=44.45), "CME": dict(lon=-85.50, lat=42.79), } tp = parser( get_test_file("SIGMETS/SIGC_ISOL.txt"), utcnow, ugc_provider, nwsli_provider, ) assert abs(tp.sigmets[0].geom.area - 0.30) < 0.01 assert abs(tp.sigmets[1].geom.area - 0.30) < 0.01 def test_150915_nospace(): """See about parsing a SIGMET that has no spaces""" utcnow = utc(2015, 9, 15, 15, 41) tp = parser(get_test_file("SIGMETS/SIGAX.txt"), utcnow) assert abs(tp.sigmets[0].geom.area - 23.47) < 0.01 def test_140907_circle(): """See about parsing a SIGMET that is circle?""" utcnow = utc(2014, 9, 6, 22, 15) tp = parser(get_test_file("SIGMETS/SIGP0H.txt"), utcnow) assert abs(tp.sigmets[0].geom.area - 11.70) < 0.01 def test_140813_line(): """See about parsing a SIGMET that is a either side of line""" utcnow = utc(2014, 8, 12, 13, 15) tp = parser(get_test_file("SIGMETS/SIGP0A_line.txt"), utcnow) assert abs(tp.sigmets[0].geom.area - 4.32) < 0.01 def test_140815_cancel(): """See about parsing a SIGMET that is a either side of line""" utcnow = utc(2014, 8, 15, 23, 41) tp = parser(get_test_file("SIGMETS/SIG_cancel.txt"), utcnow) assert not tp.sigmets def test_sigaoa(): """SIGAOA""" utcnow = utc(2014, 8, 11, 19, 15) tp = parser(get_test_file("SIGMETS/SIGA0A.txt"), utcnow) assert abs(tp.sigmets[0].geom.area - 24.35) < 0.01 def test_sigaob(): """See about parsing 50E properly""" utcnow = utc(2014, 8, 11, 19, 15) tp = parser(get_test_file("SIGMETS/SIGA0B.txt"), utcnow) assert not tp.sigmets @pytest.mark.parametrize("database", ["postgis"]) def test_50e(dbcursor): """See about parsing 50E properly""" utcnow = utc(2014, 8, 11, 18, 55) ugc_provider = {} nwsli_provider = { "ASP": dict(lon=-83.39, lat=44.45), "ECK": dict(lon=-82.72, lat=43.26), "GRR": dict(lon=-85.50, lat=42.79), } tp = parser( get_test_file("SIGMETS/SIGE3.txt"), utcnow, ugc_provider, nwsli_provider, ) assert abs(tp.sigmets[0].geom.area - 2.15) < 0.01 tp.sql(dbcursor) def test_sigc(): """See about parsing SIGC""" utcnow = utc(2014, 8, 11, 16, 55) ugc_provider = {} nwsli_provider = {} for sid in ( "MSL,SJI,MLU,LIT,BTR,LEV,LCH,IAH,YQT,SAW,SAT,DYC,AXC," "ODI,DEN,TBE,ADM,JCT,INK,ELP" ).split(","): nwsli_provider[sid] = dict(lon=-99, lat=45) tp = parser( get_test_file("SIGMETS/SIGC.txt"), utcnow, ugc_provider, nwsli_provider ) j = tp.get_jabbers("http://localhost", "http://localhost") assert tp.sigmets[0].ets == utc(2014, 8, 11, 18, 55) ans = "KKCI issues SIGMET 62C for AL MS LA AR till 1855 UTC" assert j[0][0] == ans ans = ( "KKCI issues SIGMET 63C for LA TX AND MS LA TX CSTL WTRS till 1855 UTC" ) assert j[1][0] == ans def test_sigpat(): """Make sure we don't have another failure with geom parsing""" utcnow = utc(2014, 8, 11, 12, 34) tp = parser(get_test_file("SIGMETS/SIGPAT.txt"), utcnow) j = tp.get_jabbers("http://localhost", "http://localhost") assert abs(tp.sigmets[0].geom.area - 33.71) < 0.01 assert tp.sigmets[0].sts == utc(2014, 8, 11, 12, 35) assert tp.sigmets[0].ets == utc(2014, 8, 11, 16, 35) assert j[0][0] == "PHFO issues SIGMET TANGO 1 till 1635 UTC"
the-stack_0_6015
import hashlib import json from sanic import response from datasette.utils import ( CustomJSONEncoder, InterruptedError, detect_primary_keys, detect_fts, ) from datasette.version import __version__ from .base import HASH_LENGTH, RenderMixin class IndexView(RenderMixin): name = "index" def __init__(self, datasette): self.ds = datasette async def get(self, request, as_format): databases = [] for name, db in self.ds.databases.items(): table_counts = await db.table_counts(5) views = await db.view_names() tables = {} hidden_table_names = set(await db.hidden_table_names()) for table in table_counts: table_columns = await self.ds.table_columns(name, table) tables[table] = { "name": table, "columns": table_columns, "primary_keys": await self.ds.execute_against_connection_in_thread( name, lambda conn: detect_primary_keys(conn, table) ), "count": table_counts[table], "hidden": table in hidden_table_names, "fts_table": await self.ds.execute_against_connection_in_thread( name, lambda conn: detect_fts(conn, table) ), } hidden_tables = [t for t in tables.values() if t["hidden"]] databases.append( { "name": name, "hash": db.hash, "color": db.hash[:6] if db.hash else hashlib.md5(name.encode("utf8")).hexdigest()[:6], "path": self.database_url(name), "tables_truncated": sorted( tables.values(), key=lambda t: t["count"] or 0, reverse=True )[:5], "tables_count": len(tables), "tables_more": len(tables) > 5, "table_rows_sum": sum((t["count"] or 0) for t in tables.values()), "hidden_table_rows_sum": sum(t["count"] for t in hidden_tables), "hidden_tables_count": len(hidden_tables), "views_count": len(views), } ) if as_format: headers = {} if self.ds.cors: headers["Access-Control-Allow-Origin"] = "*" return response.HTTPResponse( json.dumps({db["name"]: db for db in databases}, cls=CustomJSONEncoder), content_type="application/json", headers=headers, ) else: return self.render( ["index.html"], databases=databases, metadata=self.ds.metadata(), datasette_version=__version__, )
the-stack_0_6018
# -*- coding: utf-8 -*- # # Copyright © Spyder Project Contributors # Licensed under the terms of the MIT License # (see spyder/__init__.py for details) """pydoc widget""" # Standard library imports import os.path as osp import sys # Third party imports from qtpy.QtCore import Qt, QThread, QUrl, Signal from qtpy.QtGui import QCursor from qtpy.QtWidgets import QApplication # Local imports from spyder.config.base import _ from spyder.py3compat import PY3, to_text_string from spyder.utils.misc import select_port from spyder.widgets.browser import WebBrowser class PydocServer(QThread): """Pydoc server""" server_started = Signal() def __init__(self, port=7464): QThread.__init__(self) self.port = port self.server = None self.complete = False def run(self): import pydoc if PY3: # Python 3 try: self.callback(pydoc._start_server(pydoc._url_handler, port=self.port)) except TypeError: # Python 3.7 self.callback(pydoc._start_server(pydoc._url_handler, hostname='localhost', port=self.port)) else: # Python 2 pydoc.serve(self.port, self.callback, self.completer) def callback(self, server): self.server = server self.server_started.emit() def completer(self): self.complete = True def quit_server(self): if PY3: # Python 3 if self.server.serving: self.server.stop() else: # Python 2 self.server.quit = 1 class PydocBrowser(WebBrowser): """ pydoc widget """ DEFAULT_PORT = 30128 def __init__(self, parent, options_button=None): WebBrowser.__init__(self, parent, options_button=options_button) self.server = None self.port = None def initialize(self): """Start pydoc server""" QApplication.setOverrideCursor(QCursor(Qt.WaitCursor)) QApplication.processEvents() self.start_server() # Initializing continues in `initialize_continued` method... def initialize_continued(self): """Load home page""" self.go_home() QApplication.restoreOverrideCursor() def is_server_running(self): """Return True if pydoc server is already running""" return self.server is not None def closeEvent(self, event): self.server.quit_server() # while not self.server.complete: #XXX Is it really necessary? # pass event.accept() #------ Public API ----------------------------------------------------- def start_server(self): """Start pydoc server""" if self.server is None: self.port = select_port(default_port=self.DEFAULT_PORT) self.set_home_url('http://localhost:%d/' % self.port) elif self.server.isRunning(): self.server.server_started.disconnect(self.initialize_continued) self.server.quit() self.server = PydocServer(port=self.port) self.server.server_started.connect(self.initialize_continued) self.server.start() #------ WebBrowser API ----------------------------------------------------- def get_label(self): """Return address label text""" return _("Module or package:") def reload(self): """Reload page""" self.start_server() WebBrowser.reload(self) def text_to_url(self, text): """Convert text address into QUrl object""" if text.startswith('/'): text = text[1:] return QUrl(self.home_url.toString()+text+'.html') def url_to_text(self, url): """Convert QUrl object to displayed text in combo box""" return osp.splitext(to_text_string(url.path()))[0][1:] def test(): """Run web browser""" from spyder.utils.qthelpers import qapplication app = qapplication(test_time=8) widget = PydocBrowser(None) widget.show() widget.initialize() sys.exit(app.exec_()) if __name__ == '__main__': test()
the-stack_0_6026
"""Definition of the Element Summation Component.""" import collections import numpy as np from scipy import sparse as sp from six import string_types from openmdao.core.explicitcomponent import ExplicitComponent class SumComp(ExplicitComponent): r""" Compute a vectorized summation. Use the add_equation method to define any number of summations User defines the names of the input and output variables using add_equation(output_name='my_output', input_name='my_input') Use option axis = None to sum over all array elements. Default behavior sums along the columns. .. math:: \textrm{result}_j = \sum_{i=1} ^\text{vec_size} a_{ij} * \textrm{scaling factor} where - a is shape (vec_size, n) - b is of shape (vec_size, n) - c is of shape (vec_size, n) Result is of shape (1, n) or (1, ) Attributes ---------- _add_systems : list List of equation systems to be initialized with the system. """ def __init__(self, output_name=None, input_name=None, vec_size=1, length=1, val=1.0, scaling_factor=1, **kwargs): """ Allow user to create an multiplication system with one-liner. Parameters ---------- output_name : str (required) name of the result variable in this component's namespace. input_name : str (required) name of the input variable for this system vec_size : int Length of the first dimension of the input and output vectors (i.e number of rows, or vector length for a 1D vector) Default is 1 length : int Length of the second dimension of the input and ouptut vectors (i.e. number of columns) Default is 1 which results in input/output vectors of size (vec_size,) scaling_factor : numeric Scaling factor to apply to the whole system Default is 1 val : float or list or tuple or ndarray The initial value of the variable being added in user-defined units. Default is 1.0. **kwargs : str Any other arguments to pass to the addition system (same as add_output method for ExplicitComponent) Examples include units (str or None), desc (str) """ axis = kwargs.pop('axis', 0) super(SumComp, self).__init__(axis=axis) self._add_systems = [] if isinstance(output_name, string_types): self._add_systems.append((output_name, input_name, vec_size, length, val, scaling_factor, kwargs)) elif isinstance(output_name, collections.Iterable): raise NotImplementedError('Declaring multiple systems ' 'on initiation is not implemented.' 'Use a string to name a single addition relationship or use ' 'multiple add_equation calls') elif output_name is None: pass else: raise ValueError( "first argument to init must be either of type " "`str' or 'None'") def initialize(self): """ Declare options. Parameters ---------- axis : int or None Sum along this axis. Default 0 sums along first dimension. None sums all elements into a scalar. 1 sums along rows. """ self.options.declare('axis', default=0, desc="Axis along which to sum") def add_equation(self, output_name, input_name, vec_size=1, length=1, val=1.0, units=None, res_units=None, desc='', lower=None, upper=None, ref=1.0, ref0=0.0, res_ref=None, scaling_factor=1): """ Add a multiplication relation. Parameters ---------- output_name : str (required) name of the result variable in this component's namespace. input_name : iterable of str (required) names of the input variables for this system vec_size : int Length of the first dimension of the input and output vectors (i.e number of rows, or vector length for a 1D vector) Default is 1 length : int Length of the second dimension of the input and ouptut vectors (i.e. number of columns) Default is 1 which results in input/output vectors of size (vec_size,) scaling_factor : numeric Scaling factor to apply to the whole system Default is 1 val : float or list or tuple or ndarray The initial value of the variable being added in user-defined units. Default is 1.0. units : str or None Units in which the output variables will be provided to the component during execution. Default is None, which means it has no units. res_units : str or None Units in which the residuals of this output will be given to the user when requested. Default is None, which means it has no units. desc : str description of the variable. lower : float or list or tuple or ndarray or Iterable or None lower bound(s) in user-defined units. It can be (1) a float, (2) an array_like consistent with the shape arg (if given), or (3) an array_like matching the shape of val, if val is array_like. A value of None means this output has no lower bound. Default is None. upper : float or list or tuple or ndarray or or Iterable None upper bound(s) in user-defined units. It can be (1) a float, (2) an array_like consistent with the shape arg (if given), or (3) an array_like matching the shape of val, if val is array_like. A value of None means this output has no upper bound. Default is None. ref : float or ndarray Scaling parameter. The value in the user-defined units of this output variable when the scaled value is 1. Default is 1. ref0 : float or ndarray Scaling parameter. The value in the user-defined units of this output variable when the scaled value is 0. Default is 0. res_ref : float or ndarray Scaling parameter. The value in the user-defined res_units of this output's residual when the scaled value is 1. Default is 1. """ kwargs = {'units': units, 'res_units': res_units, 'desc': desc, 'lower': lower, 'upper': upper, 'ref': ref, 'ref0': ref0, 'res_ref': res_ref} self._add_systems.append((output_name, input_name, vec_size, length, val, scaling_factor, kwargs)) def add_output(self): """ Use add_equation instead of add_output to define equation systems. """ raise NotImplementedError('Use add_equation method, not add_output method' 'to create an multliplication/division relation') def setup(self): """ Set up the addition/subtraction system at run time. """ axis = self.options['axis'] for (output_name, input_name, vec_size, length, val, scaling_factor, kwargs) in self._add_systems: units = kwargs.get('units', None) desc = kwargs.get('desc', '') if length == 1: shape = (vec_size,) else: shape = (vec_size, length) self.add_input(input_name, shape=shape, units=units, desc=desc + '_inp_' + input_name) if axis is None: rowidx = np.zeros(vec_size * length) output_shape = (1,) elif axis == 0: output_arange = np.arange(0, length) rowidx = np.tile(output_arange, vec_size) if length == 1: output_shape = (1,) else: output_shape = (1, length) elif axis == 1: output_arange = np.arange(0, vec_size) rowidx = np.repeat(output_arange, length) output_shape = (vec_size,) else: raise ValueError('Summation is allowed only over axis=0, 1 or None') colidx = np.arange(0, vec_size * length) self.declare_partials([output_name], [input_name], rows=rowidx, cols=colidx, val=scaling_factor * np.ones(vec_size * length)) super(SumComp, self).add_output(output_name, val, shape=output_shape, **kwargs) def compute(self, inputs, outputs): """ Compute the summation using numpy. Parameters ---------- inputs : Vector unscaled, dimensional input variables read via inputs[key] outputs : Vector unscaled, dimensional output variables read via outputs[key] """ axis = self.options['axis'] for (output_name, input_name, vec_size, length, val, scaling_factor, kwargs) in self._add_systems: if axis is None: output_shape = (1,) elif axis == 0: if length == 1: output_shape = (1,) else: output_shape = (1, length) elif axis == 1: output_shape = (vec_size,) result = np.sum(inputs[input_name], axis=axis) * scaling_factor outputs[output_name] = result.reshape(output_shape)
the-stack_0_6028
# 不觉得代码顶头没有几句`import`很难受吗? # 有条件者可使用PyPy运行。 result = set() with open('words_alpha.txt', encoding='utf-8') as f: for word in f.read().splitlines(): result.add(word) with open('out.txt', 'wb') as f: for word in sorted(result): if len(word) >= 5: # 过滤单词! try: f.write(word.encode('ascii')) f.write(b'\n') except Exception as e: print(e, word) exit()
the-stack_0_6030
# Copyright (c) 2020, NVIDIA CORPORATION. import itertools import warnings import numpy as np import pandas as pd import cudf import cudf._lib as libcudf from cudf._lib.join import compute_result_col_names from cudf.core.dtypes import CategoricalDtype class Merge(object): def __init__( self, lhs, rhs, on, left_on, right_on, left_index, right_index, how, sort, lsuffix, rsuffix, method, indicator, suffixes, ): """ Manage the merging of two Frames. Parameters ---------- lhs : Series or DataFrame The left operand of the merge rhs : Series or DataFrame The right operand of the merge on : string or list like A set of key columns in the left and right operands elements must be common to both frames left_on : string or list like A set of key columns in the left operand. Must be specified with right_on or right_index concurrently right_on : string or list like A set of key columns in the right operand. Must be specified with left_on or left_index concurrently left_index : bool Boolean flag indicating the left index column or columns are to be used as join keys in order. right_index : bool Boolean flag indicating the right index column or coumns are to be used as join keys in order. how : string The type of join. Possible values are 'inner', 'outer', 'left', 'leftsemi' and 'leftanti' sort : bool Boolean flag indicating if the output Frame is to be sorted on the output's join keys, in left to right order. lsuffix : string The suffix to be appended to left hand column names that are found to exist in the right frame, but are not specified as join keys themselves. rsuffix : string The suffix to be appended to right hand column names that are found to exist in the left frame, but are not specified as join keys themselves. suffixes : list like Left and right suffixes specified together, unpacked into lsuffix and rsuffix. """ self.lhs = lhs self.rhs = rhs self.left_index = left_index self.right_index = right_index self.method = method self.sort = sort # check that the merge is valid self.validate_merge_cfg( lhs, rhs, on, left_on, right_on, left_index, right_index, how, lsuffix, rsuffix, suffixes, ) self.how = how self.preprocess_merge_params( on, left_on, right_on, lsuffix, rsuffix, suffixes ) def perform_merge(self): """ Call libcudf to perform a merge between the operands. If necessary, cast the input key columns to compatible types. Potentially also cast the output back to categorical. """ output_dtypes = self.compute_output_dtypes() self.typecast_input_to_libcudf() libcudf_result = libcudf.join.join( self.lhs, self.rhs, self.how, self.method, left_on=self.left_on, right_on=self.right_on, left_index=self.left_index, right_index=self.right_index, ) result = self.out_class._from_table(libcudf_result) result = self.typecast_libcudf_to_output(result, output_dtypes) return result[compute_result_col_names(self.lhs, self.rhs, self.how)] def preprocess_merge_params( self, on, left_on, right_on, lsuffix, rsuffix, suffixes ): """ Translate a valid configuration of user input parameters into the subset of input configurations handled by the cython layer. Apply suffixes to columns. """ self.out_class = cudf.DataFrame if on: on = [on] if isinstance(on, str) else list(on) left_on = right_on = on else: if left_on: left_on = ( [left_on] if isinstance(left_on, str) else list(left_on) ) if right_on: right_on = ( [right_on] if isinstance(right_on, str) else list(right_on) ) same_named_columns = set(self.lhs._data.keys()) & set( self.rhs._data.keys() ) if not (left_on or right_on) and not ( self.left_index and self.right_index ): left_on = right_on = list(same_named_columns) no_suffix_cols = [] if left_on and right_on: no_suffix_cols = [ left_name for left_name, right_name in zip(left_on, right_on) if left_name == right_name and left_name in same_named_columns ] if suffixes: lsuffix, rsuffix = suffixes for name in same_named_columns: if name not in no_suffix_cols: self.lhs.rename({name: f"{name}{lsuffix}"}, inplace=True) self.rhs.rename({name: f"{name}{rsuffix}"}, inplace=True) if left_on and name in left_on: left_on[left_on.index(name)] = f"{name}{lsuffix}" if right_on and name in right_on: right_on[right_on.index(name)] = f"{name}{rsuffix}" self.left_on = left_on if left_on is not None else [] self.right_on = right_on if right_on is not None else [] self.lsuffix = lsuffix self.rsuffix = rsuffix @staticmethod def validate_merge_cfg( lhs, rhs, on, left_on, right_on, left_index, right_index, how, lsuffix, rsuffix, suffixes, ): """ Error for various invalid combinations of merge input parameters """ # must actually support the requested merge type if how not in {"left", "inner", "outer", "leftanti", "leftsemi"}: raise NotImplementedError(f"{how} merge not supported yet") # Passing 'on' with 'left_on' or 'right_on' is ambiguous if on and (left_on or right_on): raise ValueError( 'Can only pass argument "on" OR "left_on" ' 'and "right_on", not a combination of both.' ) # Can't merge on unnamed Series if (isinstance(lhs, cudf.Series) and not lhs.name) or ( isinstance(rhs, cudf.Series) and not rhs.name ): raise ValueError("Can not merge on unnamed Series") # Keys need to be in their corresponding operands if on: if isinstance(on, str): on_keys = [on] elif isinstance(on, tuple): on_keys = list(on) else: on_keys = on for key in on_keys: if not (key in lhs._data.keys() and key in rhs._data.keys()): raise KeyError(f"on key {on} not in both operands") elif left_on and right_on: left_on_keys = ( [left_on] if not isinstance(left_on, list) else left_on ) right_on_keys = ( [right_on] if not isinstance(right_on, list) else right_on ) for key in left_on_keys: if key not in lhs._data.keys(): raise KeyError(f'Key "{key}" not in left operand') for key in right_on_keys: if key not in rhs._data.keys(): raise KeyError(f'Key "{key}" not in right operand') # Require same total number of columns to join on in both operands len_left_on = 0 len_right_on = 0 if left_on: len_left_on += ( len(left_on) if pd.api.types.is_list_like(left_on) else 1 ) if right_on: len_right_on += ( len(right_on) if pd.api.types.is_list_like(right_on) else 1 ) if not (len_left_on + left_index * lhs._num_indices) == ( len_right_on + right_index * rhs._num_indices ): raise ValueError( "Merge operands must have same number of join key columns" ) # If nothing specified, must have common cols to use implicitly same_named_columns = set(lhs._data.keys()) & set(rhs._data.keys()) if ( not (left_index or right_index) and not (left_on or right_on) and len(same_named_columns) == 0 ): raise ValueError("No common columns to perform merge on") if suffixes: lsuffix, rsuffix = suffixes for name in same_named_columns: if name == left_on == right_on: continue elif left_on and right_on: if (name in left_on and name in right_on) and ( left_on.index(name) == right_on.index(name) ): continue else: if not (lsuffix or rsuffix): raise ValueError( "there are overlapping columns but " "lsuffix and rsuffix are not defined" ) def typecast_input_to_libcudf(self): """ Check each pair of join keys in the left and right hand operands and apply casting rules to match their types before passing the result to libcudf. """ lhs_keys, rhs_keys, lhs_cols, rhs_cols = [], [], [], [] if self.left_index: lhs_keys.append(self.lhs.index._data.keys()) lhs_cols.append(self.lhs.index) if self.right_index: rhs_keys.append(self.rhs.index._data.keys()) rhs_cols.append(self.rhs.index) if self.left_on: lhs_keys.append(self.left_on) lhs_cols.append(self.lhs) if self.right_on: rhs_keys.append(self.right_on) rhs_cols.append(self.rhs) for l_key_grp, r_key_grp, l_col_grp, r_col_grp in zip( lhs_keys, rhs_keys, lhs_cols, rhs_cols ): for l_key, r_key in zip(l_key_grp, r_key_grp): to_dtype = self.input_to_libcudf_casting_rules( l_col_grp._data[l_key], r_col_grp._data[r_key], self.how ) l_col_grp._data[l_key] = l_col_grp._data[l_key].astype( to_dtype ) r_col_grp._data[r_key] = r_col_grp._data[r_key].astype( to_dtype ) def input_to_libcudf_casting_rules(self, lcol, rcol, how): """ Determine what dtype the left and right hand input columns must be cast to for a libcudf join to proceed. """ cast_warn = ( "can't safely cast column from {} with type" " {} to {}, upcasting to {}" ) ctgry_err = ( "can't implicitly cast column {0} to categories" " from {1} during {1} join" ) dtype_l = lcol.dtype dtype_r = rcol.dtype libcudf_join_type = None if pd.api.types.is_dtype_equal(dtype_l, dtype_r): # if categorical and equal, children passed to libcudf libcudf_join_type = dtype_l elif isinstance(dtype_l, CategoricalDtype) and isinstance( dtype_r, CategoricalDtype ): # categories are not equal libcudf_join_type = np.dtype("O") elif how == "left": check_col = rcol.fillna(0) if not check_col.can_cast_safely(dtype_l): libcudf_join_type = self.input_to_libcudf_casting_rules( lcol, rcol, "inner" ) warnings.warn( cast_warn.format( "right", dtype_r, dtype_l, libcudf_join_type ) ) else: libcudf_join_type = dtype_l elif how == "right": check_col = lcol.fillna(0) if not check_col.can_cast_safely(dtype_r): libcudf_join_type = self.input_to_libcudf_casting_rules( lcol, rcol, "inner" ) warnings.warn( cast_warn.format( "left", dtype_l, dtype_r, libcudf_join_type ) ) else: libcudf_join_type = dtype_r elif isinstance(dtype_l, CategoricalDtype): if how == "right": raise ValueError(ctgry_err.format(rcol, "right")) libcudf_join_type = lcol.cat().categories.dtype elif isinstance(dtype_r, CategoricalDtype): if how == "left": raise ValueError(ctgry_err.format(lcol, "left")) libcudf_join_type = rcol.cat().categories.dtype elif how in {"inner", "outer"}: if (np.issubdtype(dtype_l, np.number)) and ( np.issubdtype(dtype_r, np.number) ): if dtype_l.kind == dtype_r.kind: # both ints or both floats libcudf_join_type = max(dtype_l, dtype_r) else: libcudf_join_type = np.find_common_type( [], [dtype_l, dtype_r] ) elif np.issubdtype(dtype_l, np.datetime64) and np.issubdtype( dtype_r, np.datetime64 ): libcudf_join_type = max(dtype_l, dtype_r) return libcudf_join_type def libcudf_to_output_casting_rules(self, lcol, rcol, how): """ Determine what dtype an output merge key column should be cast to after it has been processed by libcudf. Determine if a column should be promoted to a categorical datatype. """ dtype_l = lcol.dtype dtype_r = rcol.dtype merge_return_type = None # we currently only need to do this for categorical variables if isinstance(dtype_l, CategoricalDtype) and isinstance( dtype_r, CategoricalDtype ): if pd.api.types.is_dtype_equal(dtype_l, dtype_r): if how in {"inner", "left"}: merge_return_type = dtype_l elif how == "outer" and not ( dtype_l.ordered or dtype_r.ordered ): new_cats = cudf.concat( dtype_l.categories, dtype_r.categories ).unique() merge_return_type = cudf.core.dtypes.CategoricalDtype( categories=new_cats ) else: merge_return_type = "category" return merge_return_type def compute_output_dtypes(self): """ Determine what datatypes should be applied to the result of a libcudf join, baesd on the original left and right frames. """ index_dtypes = {} l_data_join_cols = {} r_data_join_cols = {} data_dtypes = { name: col.dtype for name, col in itertools.chain( self.lhs._data.items(), self.rhs._data.items() ) } if self.left_index and self.right_index: l_idx_join_cols = list(self.lhs.index._data.values()) r_idx_join_cols = list(self.rhs.index._data.values()) elif self.left_on and self.right_index: # Keep the orignal dtypes in the LEFT index if possible # should trigger a bunch of no-ops l_idx_join_cols = list(self.lhs.index._data.values()) r_idx_join_cols = list(self.lhs.index._data.values()) for i, name in enumerate(self.left_on): l_data_join_cols[name] = self.lhs._data[name] r_data_join_cols[name] = list(self.rhs.index._data.values())[i] elif self.left_index and self.right_on: # see above l_idx_join_cols = list(self.rhs.index._data.values()) r_idx_join_cols = list(self.rhs.index._data.values()) for i, name in enumerate(self.right_on): l_data_join_cols[name] = list(self.lhs.index._data.values())[i] r_data_join_cols[name] = self.rhs._data[name] if self.left_on and self.right_on: l_data_join_cols = self.lhs._data r_data_join_cols = self.rhs._data for i in range( (self.left_index or self.right_index) * len(self.lhs.index._data.items()) ): index_dtypes[i] = self.libcudf_to_output_casting_rules( l_idx_join_cols[i], r_idx_join_cols[i], self.how ) for name in itertools.chain(self.left_on, self.right_on): if name in self.left_on and name in self.right_on: data_dtypes[name] = self.libcudf_to_output_casting_rules( l_data_join_cols[name], r_data_join_cols[name], self.how ) return (index_dtypes, data_dtypes) def typecast_libcudf_to_output(self, output, output_dtypes): """ Apply precomputed output index and data column data types to the output of a libcudf join. """ index_dtypes, data_dtypes = output_dtypes if output._index and len(index_dtypes) > 0: for index_dtype, index_col_lbl, index_col in zip( index_dtypes.values(), output._index._data.keys(), output._index._data.values(), ): if index_dtype: output._index._data[ index_col_lbl ] = self._build_output_col(index_col, index_dtype) for data_col_lbl, data_col in output._data.items(): data_dtype = data_dtypes[data_col_lbl] if data_dtype: output._data[data_col_lbl] = self._build_output_col( data_col, data_dtype ) return output def _build_output_col(self, col, dtype): if isinstance( dtype, (cudf.core.dtypes.CategoricalDtype, pd.CategoricalDtype) ): outcol = cudf.core.column.build_categorical_column( categories=dtype.categories, codes=col.set_mask(None), mask=col.base_mask, ) else: outcol = col.astype(dtype) return outcol
the-stack_0_6031
""" Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 """ import re from cfnlint.rules import CloudFormationLintRule from cfnlint.rules import RuleMatch class SubNeeded(CloudFormationLintRule): """Check if a substitution string exists without a substitution function""" id = 'E1029' shortdesc = 'Sub is required if a variable is used in a string' description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.' source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html' tags = ['functions', 'sub'] # Free-form text properties to exclude from this rule # content is part of AWS::CloudFormation::Init excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init', 'CloudWatchAlarmDefinition', 'TopicRulePayload'] api_excludes = ['Uri', 'Body'] # IAM Policy has special variables that don't require !Sub, Check for these # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}', '${aws:TokenIssueTime}', '${aws:principaltype}', '${aws:SecureTransport}', '${aws:SourceIp}', '${aws:UserAgent}', '${aws:userid}', '${aws:username}', '${ec2:SourceInstanceARN}', '${iot:Connection.Thing.ThingName}', '${iot:Connection.Thing.ThingTypeName}', '${iot:Connection.Thing.IsAttached}', '${iot:ClientId}', '${transfer:HomeBucket}', '${transfer:HomeDirectory}', '${transfer:HomeFolder}', '${transfer:UserName}', '${redshift:DbUser}', '${cognito-identity.amazonaws.com:aud}', '${cognito-identity.amazonaws.com:sub}', '${cognito-identity.amazonaws.com:amr}'] # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html condition_excludes = [ '${redshift:DbUser}', ] def _match_values(self, searchRegex, cfnelem, path): """Recursively search for values matching the searchRegex""" values = [] if isinstance(cfnelem, dict): for key in cfnelem: pathprop = path[:] pathprop.append(key) values.extend(self._match_values(searchRegex, cfnelem[key], pathprop)) elif isinstance(cfnelem, list): for index, item in enumerate(cfnelem): pathprop = path[:] pathprop.append(index) values.extend(self._match_values(searchRegex, item, pathprop)) else: # Leaf node if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem): # Get all variables as seperate paths regex = re.compile(r'(\$\{.*?\.?.*?})') for variable in re.findall(regex, cfnelem): values.append(path + [variable]) return values def match_values(self, searchRegex, cfn): """ Search for values in all parts of the templates that match the searchRegex """ results = [] results.extend(self._match_values(searchRegex, cfn.template, [])) # Globals are removed during a transform. They need to be checked manually results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), [])) return results def _api_exceptions(self, value): """ Key value exceptions """ parameter_search = re.compile(r'^\$\{stageVariables\..*\}$') return re.match(parameter_search, value) def match(self, cfn): """Basic Rule Matching""" matches = [] # Generic regex to match a string containing at least one ${parameter} parameter_search = re.compile(r'^.*(\$\{.*\}.*(\$\{.*\}.*)*)$') # Get a list of paths to every leaf node string containing at least one ${parameter} parameter_string_paths = self.match_values(parameter_search, cfn) # We want to search all of the paths to check if each one contains an 'Fn::Sub' for parameter_string_path in parameter_string_paths: # Exxclude the special IAM variables variable = parameter_string_path[-1] if 'Resource' in parameter_string_path: if variable in self.resource_excludes: continue if 'Condition' in parameter_string_path: if variable in self.condition_excludes: continue # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html) if variable.startswith('${!'): continue found_sub = False # Does the path contain an 'Fn::Sub'? for step in parameter_string_path: if step in self.api_excludes: if self._api_exceptions(parameter_string_path[-1]): found_sub = True elif step == 'Fn::Sub' or step in self.excludes: found_sub = True # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly if not found_sub: # Remove the last item (the variable) to prevent multiple errors on 1 line errors path = parameter_string_path[:-1] message = 'Found an embedded parameter outside of an "Fn::Sub" at {}'.format( '/'.join(map(str, path))) matches.append(RuleMatch(path, message)) return matches
the-stack_0_6033
import clara import requests as r import os from flask import json from flask import jsonify from flask import Flask from flask import request app = Flask(__name__) telegram_key = '' @app.route("/") def main(): return "Personal Clara instance." @app.route("/new-message", methods=['POST']) def handle_message(): message = request.json brain.get_response(message['text'].lower()) data = { 'chat_id': message['chat']['id'], 'text': 'Test' } url = 'https://api.telegram.org/' + telegram_key + '/sendMessage' r.post('', data) return 'Thanks!' if __name__ == "__main__": port = int(os.environ.get("PORT", 2525)) app.run(host='0.0.0.0', port=port, debug=True)
the-stack_0_6034
# coding=utf-8 # This is a sample Python script. from aliyunIoT import Device import ujson import network import utime as time from driver import GPIO from driver import UART t1 = 30 gas_threshold = 5.0 liq_mdcn_alarm = False gas_alarm = False version = 'v0.0.1' uart1 = UART('serail1') liq_level = GPIO() gpio = GPIO() '''0 1 means cloud ctrl,2 local ctrl''' cloud_ctrl = 2 g_connect_status = False ini_file_name = '/user/cfg.txt' def on_4g_cb(args): global g_connect_status pdp = args[0] netwk_sta = args[1] if netwk_sta == 1: g_connect_status = True else: g_connect_status = False def connect_network(): global on_4g_cb,g_connect_status net = network.NetWorkClient() g_register_network = False if net._stagecode is not None and net._stagecode == 3 and net._subcode == 1: g_register_network = True else: g_register_network = False if g_register_network: net.on(1,on_4g_cb) net.connect(None) else: print('connect network failed') for i in range(30): if g_connect_status: print('connect network success') return True time.sleep(1) return False def read_cfg_file(): global t1,gas_threshold,ini_file_name try: f = open(ini_file_name,'r') except OSError: cfg_dict = {'gasstr':1.0,'t1':60} print('write',cfg_dict) f = open(ini_file_name,'w+') print(f) f.write(ujson.dumps(cfg_dict)) else: cfg_txt = f.read() cfg_dict = ujson.loads(cfg_txt) if isinstance(cfg_dict,dict) == False: print('cfg_dict not a dict') return print('read',cfg_dict) gas_threshold = cfg_dict['gasstr'] t1 = cfg_dict['t1'] print('gas',gas_threshold,'t1',t1) finally: f.close() print('close') return 0 def write_cfg_file(cloudstr): global t1,gas_threshold,ini_file_name if isinstance(cloudstr,str) == False: return try: f = open(ini_file_name,'r') except OSError: pass else: cfg_txt = f.read() f.close() finally: pass try: f = open(ini_file_name,'w+') except OSError: pass else: cfg_dict = ujson.loads(cfg_txt) cloud_dict = ujson.loads(cloudstr) if isinstance(cfg_dict,dict) == False: print('cfg_dict not a dict') return if isinstance(cloud_dict,dict) == False: print('cloud_dict not a dict') return for key in cloud_dict.keys(): if cfg_dict.get(key) != None: cfg_dict[key] = cloud_dict[key] if key == 'gasstr': gas_threshold = cfg_dict[key] if key == 't1': t1 = cfg_dict[key] f.seek(0) f.write(ujson.dumps(cfg_dict)) print(cfg_dict) pass finally: f.close() print('cloud cfg file close') return def on_connect(): print('linkkit is connected') def on_disconnect(): print('linkkit is disconnected') def on_props(request): print('clound req data is {}'.format(request)) global gpio global cloud_ctrl cloudmsg = ujson.loads(request) if 'powerstate' in cloudmsg: if cloudmsg['powerstate'] == 0: gpio.write(0) #pass cloud_ctrl = 0 print('led state {}'.format(cloudmsg['powerstate'])) else: cloud_ctrl = 1 gpio.write(1) #pass print('led state {}'.format(cloudmsg['powerstate'])) else: write_cfg_file(request) def on_service(id,request): print('clound req id is {} , req is {}'.format(id,request)) def on_error(err): print('err msg is {} '.format(err)) def gas_detec(): gas_val = 0.0 dotnum = 0 global uart1 len1 = 0 #sign = 0 uart1.open('serial1') readbuf1 = bytearray(9) writebuf1 = bytearray([0xd7]) readbuf2 = bytearray(13) writebuf2 = bytearray([0xff,0x01,0x87,0x00,0x00,0x00,0x00,0x00,0x78]) uart1.write(writebuf1) len1 = uart1.read(readbuf1) print('dotnum:',end='') print(readbuf1) if len1 != len(readbuf1): print('read dotnum err') uart1.close() return gas_val uart1.write(writebuf2) len1 = uart1.read(readbuf2) print('readlen:',len1,'dotnum:',end='') print(readbuf2) if len1 != len(readbuf2): print('read gas err') uart1.close() return gas_val uart1.close() dotnum = (readbuf1[6]&0xf0)>> 4 #sign = readbuf1[6]&0x0f gas_val = (readbuf2[2]*256.000 + readbuf2[3])*1.000/10**dotnum print('gasvalue:',end='') print(gas_val) return gas_val def liquid_level_detec(): lowval = liq_level.read() print('lowval',lowval) liq_meicn_remain = False if lowval == 1: liq_meicn_remain = True else: liq_meicn_remain = False return liq_meicn_remain def main(): global liq_level,cloud_ctrl,t1,liq_mdcn_alarm,gas_alarm ret = connect_network() print('network register sta {}'.format(ret)) productKey = 'xxx' productSecret = '' deviceName = 'haas505_demo_sn1' deviceSecret = 'xxx' key_info = { 'region' : 'cn-shanghai', 'productKey' : productKey, 'deviceName' : deviceName, 'deviceSecret' : deviceSecret, 'productSecret' : productSecret, 'keepaliveSec': 60 } device = Device() device.on(device.ON_CONNECT,on_connect) device.on(device.ON_DISCONNECT,on_disconnect) device.on(device.ON_PROPS,on_props) device.on(device.ON_SERVICE,on_service) device.on(device.ON_ERROR,on_error) device.connect(key_info) send_info = {'ver':version,'name':key_info['deviceName']} post_data = {'params':ujson.dumps(send_info)} device.postProps(post_data) read_cfg_file() time.sleep(2) led1 = GPIO() pump = GPIO() '''liqid level detec prompt led''' led1.open('led1') '''liquid level detec io''' liq_level.open('liq_level') '''control pump relay''' pump.open('pump') pump.write(1) '''cloud_flg is cloud down data led''' gpio.open('cloud_flg') time_cnt = 0 gas_value = 0.00 liq_mdcn_re_flg_chg = False need_send = False while True: time.sleep_ms(1000) time_cnt += 1 liq_mdcn_re_flg = liquid_level_detec() if liq_mdcn_re_flg == False: led1.write(0) if liq_mdcn_re_flg_chg == True: liq_mdcn_re_flg_chg = False need_send = True pass else: led1.write(1) need_send = True liq_mdcn_re_flg_chg = True print('need send') '''need send data to cloud''' pass if time_cnt%10 == 0: gas_value = gas_detec() if gas_value > gas_threshold: '''need send data to cloud''' gas_alarm = True need_send = True print('need send') else: gas_alarm = False pass if liq_mdcn_re_flg == True: need_send = False pump.write(1) cloud_ctrl = 2 print('close pump') post_data = {'params':{'liq_mdcn_re':0,'gasval':100,'gasalarm':0,'powerstate':0}} post_data['params']['liq_mdcn_re'] = 0 gas_value = gas_detec() post_data['params']['gasval'] = int(gas_value*100) if gas_alarm == True: post_data['params']['gasalarm'] = 1 post_data['params']['powerstate'] = gpio.read() post_data_dict = {'params':ujson.dumps(post_data['params'])} device.postProps(post_data_dict) continue if gas_alarm == False: if time_cnt%t1 == 0: if pump.read() == 1 : pump.write(0) print('open pump') else: pump.write(1) print('close pump') else: pass if cloud_ctrl == 0: pump.write(1) cloud_ctrl = 2 time_cnt = 0 print('cloud close pump') elif cloud_ctrl == 1: pump.write(0) cloud_ctrl = 2 time_cnt = 0 print('cloud open pump') elif gas_alarm == True: pump.write(1) print('gas alarm close pump') if need_send == True: need_send = False post_data1 = {'params':{'liq_mdcn_re':0,'gasval':100,'gasalarm':0,'powerstate':0}} if liq_mdcn_re_flg == True: post_data1['params']['liq_mdcn_re'] = 0 else: post_data1['params']['liq_mdcn_re'] = 1 post_data1['params']['gasval'] = int(gas_value*100) if gas_alarm == True: post_data1['params']['gasalarm'] = 1 post_data1['params']['powerstate'] = gpio.read() post_data1_dict = {'params':ujson.dumps(post_data1['params'])} device.postProps(post_data1_dict) if __name__ == '__main__': main()
the-stack_0_6035
# (C) Datadog, Inc. 2018 # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from .mixins import PrometheusScraperMixin from ..base import AgentCheck from ...errors import CheckException from six import string_types class PrometheusScraper(PrometheusScraperMixin): """ This class scrapes a prometheus endpoint and submits the metrics on behalf of a check. This class is used by checks that scrape more than one prometheus endpoint. """ def __init__(self, check): super(PrometheusScraper, self).__init__() self.check = check def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None): """ Submit a metric as a rate, additional tags provided will be added to the ones from the label provided via the metrics object. `custom_tags` is an array of 'tag:value' that will be added to the metric when sending the rate to Datadog. """ _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname) def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None): """ Submit a metric as a gauge, additional tags provided will be added to the ones from the label provided via the metrics object. `custom_tags` is an array of 'tag:value' that will be added to the metric when sending the gauge to Datadog. """ _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname) def _submit_monotonic_count(self, metric_name, val, metric, custom_tags=None, hostname=None): """ Submit a metric as a monotonic count, additional tags provided will be added to the ones from the label provided via the metrics object. `custom_tags` is an array of 'tag:value' that will be added to the metric when sending the monotonic count to Datadog. """ _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.monotonic_count('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname) def _metric_tags(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = [] if custom_tags is not None: _tags += custom_tags for label in metric.label: if self.exclude_labels is None or label.name not in self.exclude_labels: tag_name = label.name if self.labels_mapper is not None and label.name in self.labels_mapper: tag_name = self.labels_mapper[label.name] _tags.append('{}:{}'.format(tag_name, label.value)) return self._finalize_tags_to_submit( _tags, metric_name, val, metric, custom_tags=custom_tags, hostname=hostname ) def _submit_service_check(self, *args, **kwargs): self.check.service_check(*args, **kwargs) class GenericPrometheusCheck(AgentCheck): """ GenericPrometheusCheck is a class that helps instantiating PrometheusCheck only with YAML configurations. As each check has it own states it maintains a map of all checks so that the one corresponding to the instance is executed Minimal example configuration: instances: - prometheus_url: http://foobar/endpoint namespace: "foobar" metrics: - bar - foo """ def __init__(self, name, init_config, agentConfig, instances=None, default_instances=None, default_namespace=""): super(GenericPrometheusCheck, self).__init__(name, init_config, agentConfig, instances) self.scrapers_map = {} self.default_instances = default_instances if default_instances is not None else {} self.default_namespace = default_namespace for instance in instances: self.get_scraper(instance) def check(self, instance): endpoint = instance["prometheus_url"] scraper = self.get_scraper(instance) if not scraper.metrics_mapper: raise CheckException("You have to collect at least one metric from the endpoint: " + endpoint) scraper.process( endpoint, send_histograms_buckets=instance.get('send_histograms_buckets', True), send_monotonic_counter=instance.get('send_monotonic_counter', True), instance=instance, ignore_unmapped=True ) def _extract_rate_metrics(self, type_overrides): rate_metrics = [] for metric in type_overrides: if type_overrides[metric] == "rate": rate_metrics.append(metric) type_overrides[metric] = "gauge" return rate_metrics def get_scraper(self, instance): namespace = instance.get("namespace", "") # Check if we have a namespace if namespace == "": if self.default_namespace == "": raise CheckException("You have to define a namespace for each prometheus check") namespace = self.default_namespace # Retrieve potential default instance settings for the namespace default_instance = self.default_instances.get(namespace, {}) endpoint = instance.get("prometheus_url", default_instance.get("prometheus_url", "")) if endpoint == "": raise CheckException("Unable to find prometheus URL in config file.") # If we already created the corresponding scraper, return it if endpoint in self.scrapers_map: return self.scrapers_map[endpoint] # Otherwise we create the scraper scraper = PrometheusScraper(self) scraper.NAMESPACE = namespace # Metrics are preprocessed if no mapping metrics_mapper = {} # We merge list and dictionnaries from optional defaults & instance settings metrics = default_instance.get("metrics", []) + instance.get("metrics", []) for metric in metrics: if isinstance(metric, string_types): metrics_mapper[metric] = metric else: metrics_mapper.update(metric) scraper.metrics_mapper = metrics_mapper scraper.labels_mapper = default_instance.get("labels_mapper", {}) scraper.labels_mapper.update(instance.get("labels_mapper", {})) scraper.label_joins = default_instance.get("label_joins", {}) scraper.label_joins.update(instance.get("label_joins", {})) scraper.rate_metrics = self._extract_rate_metrics(default_instance.get("type_overrides", {})) scraper.rate_metrics.extend(self._extract_rate_metrics(instance.get("type_overrides", {}))) scraper.type_overrides = default_instance.get("type_overrides", {}) scraper.type_overrides.update(instance.get("type_overrides", {})) scraper.exclude_labels = default_instance.get("exclude_labels", []) + instance.get("exclude_labels", []) scraper.extra_headers = default_instance.get("extra_headers", {}) scraper.extra_headers.update(instance.get("extra_headers", {})) # For simple values instance settings overrides optional defaults scraper.prometheus_metrics_prefix = instance.get("prometheus_metrics_prefix", default_instance.get("prometheus_metrics_prefix", '')) scraper.label_to_hostname = instance.get("label_to_hostname", default_instance.get("label_to_hostname", None)) scraper.health_service_check = instance.get("health_service_check", default_instance.get("health_service_check", True)) scraper.ssl_cert = instance.get("ssl_cert", default_instance.get("ssl_cert", None)) scraper.ssl_private_key = instance.get("ssl_private_key", default_instance.get("ssl_private_key", None)) scraper.ssl_ca_cert = instance.get("ssl_ca_cert", default_instance.get("ssl_ca_cert", None)) scraper.set_prometheus_timeout(instance, default_instance.get("prometheus_timeout", 10)) self.scrapers_map[endpoint] = scraper return scraper
the-stack_0_6041
# -*- coding: utf-8 -*- # # Copyright 2014 Thomas Amland <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import logging import threading from collections import deque from watchdog.utils import DaemonThread from .inotify_c import Inotify class _Worker(DaemonThread): """ Thread that reads events from `inotify` and writes to `queue`. """ def __init__(self, inotify, queue): DaemonThread.__init__(self) self._read_events = inotify.read_events self._queue = queue def run(self): while self.should_keep_running(): inotify_events = self._read_events() for inotify_event in inotify_events: logging.debug("worker: in event %s", inotify_event) if inotify_event.is_moved_to: from_event = self._queue._catch(inotify_event.cookie) if from_event: self._queue._put((from_event, inotify_event)) else: logging.debug("worker: could not find maching move_from event") self._queue._put(inotify_event) else: self._queue._put(inotify_event) class InotifyBuffer(object): """ A wrapper for `Inotify` that keeps events in memory for `delay` seconds. IN_MOVED_FROM and IN_MOVED_TO events are paired during this time. """ def __init__(self, path, recursive=False): self.delay = 0.5 self._lock = threading.Lock() self._not_empty = threading.Condition(self._lock) self._queue = deque() self._inotify = Inotify(path, recursive) self._worker = _Worker(self._inotify, self) self._worker.start() def read_event(self): """ Returns a single event or a tuple of from/to events in case of a paired move event. """ while True: # wait for queue self._not_empty.acquire() while len(self._queue) == 0: self._not_empty.wait() head, insert_time = self._queue[0] self._not_empty.release() # wait for delay time_left = insert_time + self.delay - time.time() while time_left > 0: time.sleep(time_left) time_left = insert_time + self.delay - time.time() # return if event is still here self._lock.acquire() try: if len(self._queue) > 0 and self._queue[0][0] is head: self._queue.popleft() return head finally: self._lock.release() def close(self): self._worker.stop() self._inotify.close() self._worker.join() def _put(self, elem): self._lock.acquire() self._queue.append((elem, time.time())) self._not_empty.notify() self._lock.release() def _catch(self, cookie): self._lock.acquire() ret = None for i, elem in enumerate(self._queue): event, _ = elem try: if event.is_moved_from and event.cookie == cookie: ret = event del self._queue[i] break except AttributeError: pass self._lock.release() return ret
the-stack_0_6042
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from concurrent import futures import datetime import functools import inspect import logging import sys import threading import time from typing import ( Any, Callable, Dict, List, Iterable, Optional, Sequence, Tuple, Type, Union, ) import proto from google.api_core import operation from google.auth import credentials as auth_credentials from google.cloud.aiplatform import initializer from google.cloud.aiplatform import utils from google.cloud.aiplatform.compat.types import encryption_spec as gca_encryption_spec logging.basicConfig(level=logging.INFO, stream=sys.stdout) class Logger: """Logging wrapper class with high level helper methods.""" def __init__(self, name: str = ""): """Initializes logger with name. Args: name (str): Name to associate with logger. """ self._logger = logging.getLogger(name) def log_create_with_lro( self, cls: Type["VertexAiResourceNoun"], lro: Optional[operation.Operation] = None, ): """Logs create event with LRO. Args: cls (VertexAiResourceNoun): Vertex AI Resource Noun class that is being created. lro (operation.Operation): Optional. Backing LRO for creation. """ self._logger.info(f"Creating {cls.__name__}") if lro: self._logger.info( f"Create {cls.__name__} backing LRO: {lro.operation.name}" ) def log_create_complete( self, cls: Type["VertexAiResourceNoun"], resource: proto.Message, variable_name: str, ): """Logs create event is complete. Will also include code snippet to instantiate resource in SDK. Args: cls (VertexAiResourceNoun): Vertex AI Resource Noun class that is being created. resource (proto.Message): Vertex AI Resourc proto.Message variable_name (str): Name of variable to use for code snippet """ self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}") self._logger.info(f"To use this {cls.__name__} in another session:") self._logger.info( f"{variable_name} = aiplatform.{cls.__name__}('{resource.name}')" ) def log_create_complete_with_getter( self, cls: Type["VertexAiResourceNoun"], resource: proto.Message, variable_name: str, ): """Logs create event is complete. Will also include code snippet to instantiate resource in SDK. Args: cls (VertexAiResourceNoun): Vertex AI Resource Noun class that is being created. resource (proto.Message): Vertex AI Resourc proto.Message variable_name (str): Name of variable to use for code snippet """ self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}") self._logger.info(f"To use this {cls.__name__} in another session:") self._logger.info( f"{variable_name} = aiplatform.{cls.__name__}.get('{resource.name}')" ) def log_action_start_against_resource( self, action: str, noun: str, resource_noun_obj: "VertexAiResourceNoun" ): """Logs intention to start an action against a resource. Args: action (str): Action to complete against the resource ie: "Deploying". Can be empty string. noun (str): Noun the action acts on against the resource. Can be empty string. resource_noun_obj (VertexAiResourceNoun): Resource noun object the action is acting against. """ self._logger.info( f"{action} {resource_noun_obj.__class__.__name__} {noun}: {resource_noun_obj.resource_name}" ) def log_action_started_against_resource_with_lro( self, action: str, noun: str, cls: Type["VertexAiResourceNoun"], lro: operation.Operation, ): """Logs an action started against a resource with lro. Args: action (str): Action started against resource. ie: "Deploy". Can be empty string. noun (str): Noun the action acts on against the resource. Can be empty string. cls (VertexAiResourceNoun): Resource noun object the action is acting against. lro (operation.Operation): Backing LRO for action. """ self._logger.info( f"{action} {cls.__name__} {noun} backing LRO: {lro.operation.name}" ) def log_action_completed_against_resource( self, noun: str, action: str, resource_noun_obj: "VertexAiResourceNoun" ): """Logs action completed against resource. Args: noun (str): Noun the action acts on against the resource. Can be empty string. action (str): Action started against resource. ie: "Deployed". Can be empty string. resource_noun_obj (VertexAiResourceNoun): Resource noun object the action is acting against """ self._logger.info( f"{resource_noun_obj.__class__.__name__} {noun} {action}. Resource name: {resource_noun_obj.resource_name}" ) def __getattr__(self, attr: str): """Forward remainder of logging to underlying logger.""" return getattr(self._logger, attr) _LOGGER = Logger(__name__) class FutureManager(metaclass=abc.ABCMeta): """Tracks concurrent futures against this object.""" def __init__(self): self.__latest_future_lock = threading.Lock() # Always points to the latest future. All submitted futures will always # form a dependency on the latest future. self.__latest_future = None # Caches Exception of any executed future. Once one exception occurs # all additional futures should fail and any additional invocations will block. self._exception = None def _raise_future_exception(self): """Raises exception if one of the object's futures has raised.""" with self.__latest_future_lock: if self._exception: raise self._exception def _complete_future(self, future: futures.Future): """Checks for exception of future and removes the pointer if it's still latest. Args: future (futures.Future): Required. A future to complete. """ with self.__latest_future_lock: try: future.result() # raises except Exception as e: self._exception = e if self.__latest_future is future: self.__latest_future = None def _are_futures_done(self) -> bool: """Helper method to check to all futures are complete. Returns: True if no latest future. """ with self.__latest_future_lock: return self.__latest_future is None def wait(self): """Helper method to that blocks until all futures are complete.""" future = self.__latest_future if future: futures.wait([future], return_when=futures.FIRST_EXCEPTION) self._raise_future_exception() @property def _latest_future(self) -> Optional[futures.Future]: """Get the latest future if it exists.""" with self.__latest_future_lock: return self.__latest_future @_latest_future.setter def _latest_future(self, future: Optional[futures.Future]): """Optionally set the latest future and add a complete_future callback.""" with self.__latest_future_lock: self.__latest_future = future if future: future.add_done_callback(self._complete_future) def _submit( self, method: Callable[..., Any], args: Sequence[Any], kwargs: Dict[str, Any], additional_dependencies: Optional[Sequence[futures.Future]] = None, callbacks: Optional[Sequence[Callable[[futures.Future], Any]]] = None, internal_callbacks: Iterable[Callable[[Any], Any]] = None, ) -> futures.Future: """Submit a method as a future against this object. Args: method (Callable): Required. The method to submit. args (Sequence): Required. The arguments to call the method with. kwargs (dict): Required. The keyword arguments to call the method with. additional_dependencies (Optional[Sequence[futures.Future]]): Optional. Additional dependent futures to wait on before executing method. Note: No validation is done on the dependencies. callbacks (Optional[Sequence[Callable[[futures.Future], Any]]]): Optional. Additional Future callbacks to execute once this created Future is complete. Returns: future (Future): Future of the submitted method call. """ def wait_for_dependencies_and_invoke( deps: Sequence[futures.Future], method: Callable[..., Any], args: Sequence[Any], kwargs: Dict[str, Any], internal_callbacks: Iterable[Callable[[Any], Any]], ) -> Any: """Wrapper method to wait on any dependencies before submitting method. Args: deps (Sequence[futures.Future]): Required. Dependent futures to wait on before executing method. Note: No validation is done on the dependencies. method (Callable): Required. The method to submit. args (Sequence[Any]): Required. The arguments to call the method with. kwargs (Dict[str, Any]): Required. The keyword arguments to call the method with. internal_callbacks: (Callable[[Any], Any]): Callbacks that take the result of method. """ for future in set(deps): future.result() result = method(*args, **kwargs) # call callbacks from within future if internal_callbacks: for callback in internal_callbacks: callback(result) return result # Retrieves any dependencies from arguments. deps = [ arg._latest_future for arg in list(args) + list(kwargs.values()) if isinstance(arg, FutureManager) ] # Retrieves exceptions and raises # if any upstream dependency has an exception exceptions = [ arg._exception for arg in list(args) + list(kwargs.values()) if isinstance(arg, FutureManager) and arg._exception ] if exceptions: raise exceptions[0] # filter out objects that do not have pending tasks deps = [dep for dep in deps if dep] if additional_dependencies: deps.extend(additional_dependencies) with self.__latest_future_lock: # form a dependency on the latest future of this object if self.__latest_future: deps.append(self.__latest_future) self.__latest_future = initializer.global_pool.submit( wait_for_dependencies_and_invoke, deps=deps, method=method, args=args, kwargs=kwargs, internal_callbacks=internal_callbacks, ) future = self.__latest_future # Clean up callback captures exception as well as removes future. # May execute immediately and take lock. future.add_done_callback(self._complete_future) if callbacks: for c in callbacks: future.add_done_callback(c) return future @classmethod @abc.abstractmethod def _empty_constructor(cls) -> "FutureManager": """Should construct object with all non FutureManager attributes as None.""" pass @abc.abstractmethod def _sync_object_with_future_result(self, result: "FutureManager"): """Should sync the object from _empty_constructor with result of future.""" def __repr__(self) -> str: if self._exception: return f"{object.__repr__(self)} failed with {str(self._exception)}" if self.__latest_future: return f"{object.__repr__(self)} is waiting for upstream dependencies to complete." return object.__repr__(self) class VertexAiResourceNoun(metaclass=abc.ABCMeta): """Base class the Vertex AI resource nouns. Subclasses require two class attributes: client_class: The client to instantiate to interact with this resource noun. _is_client_prediction_client: Flag to indicate if the client requires a prediction endpoint. Subclass is required to populate private attribute _gca_resource which is the service representation of the resource noun. """ @property @classmethod @abc.abstractmethod def client_class(cls) -> Type[utils.VertexAiServiceClientWithOverride]: """Client class required to interact with resource with optional overrides.""" pass @property @classmethod @abc.abstractmethod def _is_client_prediction_client(cls) -> bool: """Flag to indicate whether to use prediction endpoint with client.""" pass @property @abc.abstractmethod def _getter_method(cls) -> str: """Name of getter method of client class for retrieving the resource.""" pass @property @abc.abstractmethod def _delete_method(cls) -> str: """Name of delete method of client class for deleting the resource.""" pass @property @abc.abstractmethod def _resource_noun(cls) -> str: """Resource noun.""" pass def __init__( self, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, resource_name: Optional[str] = None, ): """Initializes class with project, location, and api_client. Args: project(str): Project of the resource noun. location(str): The location of the resource noun. credentials(google.auth.crendentials.Crendentials): Optional custom credentials to use when accessing interacting with resource noun. resource_name(str): A fully-qualified resource name or ID. """ if resource_name: project, location = self._get_and_validate_project_location( resource_name=resource_name, project=project, location=location ) self.project = project or initializer.global_config.project self.location = location or initializer.global_config.location self.credentials = credentials or initializer.global_config.credentials self.api_client = self._instantiate_client(self.location, self.credentials) @classmethod def _instantiate_client( cls, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, ) -> utils.VertexAiServiceClientWithOverride: """Helper method to instantiate service client for resource noun. Args: location (str): The location of the resource noun. credentials (google.auth.credentials.Credentials): Optional custom credentials to use when accessing interacting with resource noun. Returns: client (utils.VertexAiServiceClientWithOverride): Initialized service client for this service noun with optional overrides. """ return initializer.global_config.create_client( client_class=cls.client_class, credentials=credentials, location_override=location, prediction_client=cls._is_client_prediction_client, ) def _get_and_validate_project_location( self, resource_name: str, project: Optional[str] = None, location: Optional[str] = None, ) -> Tuple: """Validate the project and location for the resource. Args: resource_name(str): Required. A fully-qualified resource name or ID. project(str): Project of the resource noun. location(str): The location of the resource noun. Raises: RuntimeError if location is different from resource location """ fields = utils.extract_fields_from_resource_name( resource_name, self._resource_noun ) if not fields: return project, location if location and fields.location != location: raise RuntimeError( f"location {location} is provided, but different from " f"the resource location {fields.location}" ) return fields.project, fields.location def _get_gca_resource(self, resource_name: str) -> proto.Message: """Returns GAPIC service representation of client class resource.""" """ Args: resource_name (str): Required. A fully-qualified resource name or ID. """ resource_name = utils.full_resource_name( resource_name=resource_name, resource_noun=self._resource_noun, project=self.project, location=self.location, ) return getattr(self.api_client, self._getter_method)(name=resource_name) def _sync_gca_resource(self): """Sync GAPIC service representation of client class resource.""" self._gca_resource = self._get_gca_resource(resource_name=self.resource_name) @property def name(self) -> str: """Name of this resource.""" self._assert_gca_resource_is_available() return self._gca_resource.name.split("/")[-1] @property def resource_name(self) -> str: """Full qualified resource name.""" self._assert_gca_resource_is_available() return self._gca_resource.name @property def display_name(self) -> str: """Display name of this resource.""" self._assert_gca_resource_is_available() return self._gca_resource.display_name @property def create_time(self) -> datetime.datetime: """Time this resource was created.""" self._assert_gca_resource_is_available() return self._gca_resource.create_time @property def update_time(self) -> datetime.datetime: """Time this resource was last updated.""" self._sync_gca_resource() return self._gca_resource.update_time @property def encryption_spec(self) -> Optional[gca_encryption_spec.EncryptionSpec]: """Customer-managed encryption key options for this Vertex AI resource. If this is set, then all resources created by this Vertex AI resource will be encrypted with the provided encryption key. """ self._assert_gca_resource_is_available() return getattr(self._gca_resource, "encryption_spec") @property def labels(self) -> Dict[str, str]: """User-defined labels containing metadata about this resource. Read more about labels at https://goo.gl/xmQnxf """ self._assert_gca_resource_is_available() return self._gca_resource.labels @property def gca_resource(self) -> proto.Message: """The underlying resource proto representation.""" self._assert_gca_resource_is_available() return self._gca_resource def _assert_gca_resource_is_available(self) -> None: """Helper method to raise when property is not accessible. Raises: RuntimeError if _gca_resource is has not been created. """ if self._gca_resource is None: raise RuntimeError( f"{self.__class__.__name__} resource has not been created" ) def __repr__(self) -> str: return f"{object.__repr__(self)} \nresource name: {self.resource_name}" def optional_sync( construct_object_on_arg: Optional[str] = None, return_input_arg: Optional[str] = None, bind_future_to_self: bool = True, ): """Decorator for VertexAiResourceNounWithFutureManager with optional sync support. Methods with this decorator should include a "sync" argument that defaults to True. If called with sync=False this decorator will launch the method as a concurrent Future in a separate Thread. Note that this is only robust enough to support our current end to end patterns and may not be suitable for new patterns. Args: construct_object_on_arg (str): Optional. If provided, will only construct output object if arg is present. Example: If custom training does not produce a model. return_input_arg (str): Optional. If provided will return passed in argument instead of constructing. Example: Model.deploy(Endpoint) returns the passed in Endpoint bind_future_to_self (bool): Whether to add this future to the calling object. Example: Model.deploy(Endpoint) would be set to False because we only want the deployment Future to be associated with Endpoint. """ def optional_run_in_thread(method: Callable[..., Any]): """Optionally run this method concurrently in separate Thread. Args: method (Callable[..., Any]): Method to optionally run in separate Thread. """ @functools.wraps(method) def wrapper(*args, **kwargs): """Wraps method.""" sync = kwargs.pop("sync", True) bound_args = inspect.signature(method).bind(*args, **kwargs) self = bound_args.arguments.get("self") calling_object_latest_future = None # check to see if this object has any exceptions if self: calling_object_latest_future = self._latest_future self._raise_future_exception() # if sync then wait for any Futures to complete and execute if sync: if self: self.wait() return method(*args, **kwargs) # callbacks to call within the Future (in same Thread) internal_callbacks = [] # callbacks to add to the Future (may or may not be in same Thread) callbacks = [] # additional Future dependencies to capture dependencies = [] # all methods should have type signatures return_type = get_annotation_class( inspect.getfullargspec(method).annotations["return"] ) # is a classmethod that creates the object and returns it if args and inspect.isclass(args[0]): # assumes classmethod is our resource noun returned_object = args[0]._empty_constructor() self = returned_object else: # instance method # object produced by the method returned_object = bound_args.arguments.get(return_input_arg) # if we're returning an input object if returned_object and returned_object is not self: # make sure the input object doesn't have any exceptions # from previous futures returned_object._raise_future_exception() # if the future will be associated with both the returned object # and calling object then we need to add additional callback # to remove the future from the returned object # if we need to construct a new empty returned object should_construct = not returned_object and bound_args.arguments.get( construct_object_on_arg, not construct_object_on_arg ) if should_construct: if return_type is not None: returned_object = return_type._empty_constructor() # if the future will be associated with both the returned object # and calling object then we need to add additional callback # to remove the future from the returned object if returned_object and bind_future_to_self: callbacks.append(returned_object._complete_future) if returned_object: # sync objects after future completes internal_callbacks.append( returned_object._sync_object_with_future_result ) # If the future is not associated with the calling object # then the return object future needs to form a dependency on the # the latest future in the calling object. if not bind_future_to_self: if calling_object_latest_future: dependencies.append(calling_object_latest_future) self = returned_object future = self._submit( method=method, callbacks=callbacks, internal_callbacks=internal_callbacks, additional_dependencies=dependencies, args=[], kwargs=bound_args.arguments, ) # if the calling object is the one that submitted then add it's future # to the returned object if returned_object and returned_object is not self: returned_object._latest_future = future return returned_object return wrapper return optional_run_in_thread class VertexAiResourceNounWithFutureManager(VertexAiResourceNoun, FutureManager): """Allows optional asynchronous calls to this Vertex AI Resource Nouns.""" def __init__( self, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, resource_name: Optional[str] = None, ): """Initializes class with project, location, and api_client. Args: project (str): Optional. Project of the resource noun. location (str): Optional. The location of the resource noun. credentials(google.auth.crendentials.Crendentials): Optional. custom credentials to use when accessing interacting with resource noun. resource_name(str): A fully-qualified resource name or ID. """ VertexAiResourceNoun.__init__( self, project=project, location=location, credentials=credentials, resource_name=resource_name, ) FutureManager.__init__(self) @classmethod def _empty_constructor( cls, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, resource_name: Optional[str] = None, ) -> "VertexAiResourceNounWithFutureManager": """Initializes with all attributes set to None. The attributes should be populated after a future is complete. This allows scheduling of additional API calls before the resource is created. Args: project (str): Optional. Project of the resource noun. location (str): Optional. The location of the resource noun. credentials(google.auth.crendentials.Crendentials): Optional. custom credentials to use when accessing interacting with resource noun. resource_name(str): A fully-qualified resource name or ID. Returns: An instance of this class with attributes set to None. """ self = cls.__new__(cls) VertexAiResourceNoun.__init__( self, project=project, location=location, credentials=credentials, resource_name=resource_name, ) FutureManager.__init__(self) self._gca_resource = None return self def _sync_object_with_future_result( self, result: "VertexAiResourceNounWithFutureManager" ): """Populates attributes from a Future result to this object. Args: result: VertexAiResourceNounWithFutureManager Required. Result of future with same type as this object. """ sync_attributes = [ "project", "location", "api_client", "_gca_resource", "credentials", ] optional_sync_attributes = ["_prediction_client"] for attribute in sync_attributes: setattr(self, attribute, getattr(result, attribute)) for attribute in optional_sync_attributes: value = getattr(result, attribute, None) if value: setattr(self, attribute, value) @classmethod def _construct_sdk_resource_from_gapic( cls, gapic_resource: proto.Message, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, ) -> VertexAiResourceNoun: """Given a GAPIC resource object, return the SDK representation. Args: gapic_resource (proto.Message): A GAPIC representation of a Vertex AI resource, usually retrieved by a get_* or in a list_* API call. project (str): Optional. Project to construct SDK object from. If not set, project set in aiplatform.init will be used. location (str): Optional. Location to construct SDK object from. If not set, location set in aiplatform.init will be used. credentials (auth_credentials.Credentials): Optional. Custom credentials to use to construct SDK object. Overrides credentials set in aiplatform.init. Returns: VertexAiResourceNoun: An initialized SDK object that represents GAPIC type. """ sdk_resource = cls._empty_constructor( project=project, location=location, credentials=credentials ) sdk_resource._gca_resource = gapic_resource return sdk_resource # TODO(b/144545165): Improve documentation for list filtering once available # TODO(b/184910159): Expose `page_size` field in list method @classmethod def _list( cls, cls_filter: Callable[[proto.Message], bool] = lambda _: True, filter: Optional[str] = None, order_by: Optional[str] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, ) -> List[VertexAiResourceNoun]: """Private method to list all instances of this Vertex AI Resource, takes a `cls_filter` arg to filter to a particular SDK resource subclass. Args: cls_filter (Callable[[proto.Message], bool]): A function that takes one argument, a GAPIC resource, and returns a bool. If the function returns False, that resource will be excluded from the returned list. Example usage: cls_filter = lambda obj: obj.metadata in cls.valid_metadatas filter (str): Optional. An expression for filtering the results of the request. For field names both snake_case and camelCase are supported. order_by (str): Optional. A comma-separated list of fields to order by, sorted in ascending order. Use "desc" after a field name for descending. Supported fields: `display_name`, `create_time`, `update_time` project (str): Optional. Project to retrieve list from. If not set, project set in aiplatform.init will be used. location (str): Optional. Location to retrieve list from. If not set, location set in aiplatform.init will be used. credentials (auth_credentials.Credentials): Optional. Custom credentials to use to retrieve list. Overrides credentials set in aiplatform.init. Returns: List[VertexAiResourceNoun] - A list of SDK resource objects """ resource = cls._empty_constructor( project=project, location=location, credentials=credentials ) # Fetch credentials once and re-use for all `_empty_constructor()` calls creds = initializer.global_config.credentials resource_list_method = getattr(resource.api_client, resource._list_method) list_request = { "parent": initializer.global_config.common_location_path( project=project, location=location ), "filter": filter, } if order_by: list_request["order_by"] = order_by resource_list = resource_list_method(request=list_request) or [] return [ cls._construct_sdk_resource_from_gapic( gapic_resource, project=project, location=location, credentials=creds ) for gapic_resource in resource_list if cls_filter(gapic_resource) ] @classmethod def _list_with_local_order( cls, cls_filter: Callable[[proto.Message], bool] = lambda _: True, filter: Optional[str] = None, order_by: Optional[str] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, ) -> List[VertexAiResourceNoun]: """Private method to list all instances of this Vertex AI Resource, takes a `cls_filter` arg to filter to a particular SDK resource subclass. Provides client-side sorting when a list API doesn't support `order_by`. Args: cls_filter (Callable[[proto.Message], bool]): A function that takes one argument, a GAPIC resource, and returns a bool. If the function returns False, that resource will be excluded from the returned list. Example usage: cls_filter = lambda obj: obj.metadata in cls.valid_metadatas filter (str): Optional. An expression for filtering the results of the request. For field names both snake_case and camelCase are supported. order_by (str): Optional. A comma-separated list of fields to order by, sorted in ascending order. Use "desc" after a field name for descending. Supported fields: `display_name`, `create_time`, `update_time` project (str): Optional. Project to retrieve list from. If not set, project set in aiplatform.init will be used. location (str): Optional. Location to retrieve list from. If not set, location set in aiplatform.init will be used. credentials (auth_credentials.Credentials): Optional. Custom credentials to use to retrieve list. Overrides credentials set in aiplatform.init. Returns: List[VertexAiResourceNoun] - A list of SDK resource objects """ li = cls._list( cls_filter=cls_filter, filter=filter, order_by=None, # This method will handle the ordering locally project=project, location=location, credentials=credentials, ) if order_by: desc = "desc" in order_by order_by = order_by.replace("desc", "") order_by = order_by.split(",") li.sort( key=lambda x: tuple(getattr(x, field.strip()) for field in order_by), reverse=desc, ) return li @classmethod def list( cls, filter: Optional[str] = None, order_by: Optional[str] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, ) -> List[VertexAiResourceNoun]: """List all instances of this Vertex AI Resource. Example Usage: aiplatform.BatchPredictionJobs.list( filter='state="JOB_STATE_SUCCEEDED" AND display_name="my_job"', ) aiplatform.Model.list(order_by="create_time desc, display_name") Args: filter (str): Optional. An expression for filtering the results of the request. For field names both snake_case and camelCase are supported. order_by (str): Optional. A comma-separated list of fields to order by, sorted in ascending order. Use "desc" after a field name for descending. Supported fields: `display_name`, `create_time`, `update_time` project (str): Optional. Project to retrieve list from. If not set, project set in aiplatform.init will be used. location (str): Optional. Location to retrieve list from. If not set, location set in aiplatform.init will be used. credentials (auth_credentials.Credentials): Optional. Custom credentials to use to retrieve list. Overrides credentials set in aiplatform.init. Returns: List[VertexAiResourceNoun] - A list of SDK resource objects """ return cls._list( filter=filter, order_by=order_by, project=project, location=location, credentials=credentials, ) @optional_sync() def delete(self, sync: bool = True) -> None: """Deletes this Vertex AI resource. WARNING: This deletion is permanent. Args: sync (bool): Whether to execute this deletion synchronously. If False, this method will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. """ _LOGGER.log_action_start_against_resource("Deleting", "", self) lro = getattr(self.api_client, self._delete_method)(name=self.resource_name) _LOGGER.log_action_started_against_resource_with_lro( "Delete", "", self.__class__, lro ) lro.result() _LOGGER.log_action_completed_against_resource("deleted.", "", self) def __repr__(self) -> str: if self._gca_resource: return VertexAiResourceNoun.__repr__(self) return FutureManager.__repr__(self) def _wait_for_resource_creation(self) -> None: """Wait until underlying resource is created. Currently this should only be used on subclasses that implement the construct then `run` pattern because the underlying sync=False implementation will not update downstream resource noun object's _gca_resource until the entire invoked method is complete. Ex: job = CustomTrainingJob() job.run(sync=False, ...) job._wait_for_resource_creation() Raises: RuntimeError if the resource has not been scheduled to be created. """ # If the user calls this but didn't actually invoke an API to create if self._are_futures_done() and not getattr(self._gca_resource, "name", None): self._raise_future_exception() raise RuntimeError( f"{self.__class__.__name__} resource is not scheduled to be created." ) while not getattr(self._gca_resource, "name", None): # breaks out of loop if creation has failed async if self._are_futures_done() and not getattr( self._gca_resource, "name", None ): self._raise_future_exception() time.sleep(1) def _assert_gca_resource_is_available(self) -> None: """Helper method to raise when accessing properties that do not exist. Overrides VertexAiResourceNoun to provide a more informative exception if resource creation has failed asynchronously. Raises: RuntimeError when resource has not been created. """ if not getattr(self._gca_resource, "name", None): raise RuntimeError( f"{self.__class__.__name__} resource has not been created." + ( f" Resource failed with: {self._exception}" if self._exception else "" ) ) def get_annotation_class(annotation: type) -> type: """Helper method to retrieve type annotation. Args: annotation (type): Type hint """ # typing.Optional if getattr(annotation, "__origin__", None) is Union: return annotation.__args__[0] else: return annotation
the-stack_0_6043
import logging import os import sys import click from rich.logging import RichHandler from labfunctions.conf.server_settings import settings # from labfunctions.control_plane import rqscheduler from labfunctions.types.agent import AgentConfig from labfunctions.utils import get_external_ip, get_hostname hostname = get_hostname() @click.group(name="agent") def agentcli(): """ Execute agent related actions """ pass @agentcli.command(name="run") @click.option("--workers", "-w", default=1, help="How many workers spawn") @click.option("--redis", "-r", default=settings.QUEUE_REDIS, help="Redis full dsn") @click.option( "--qnames", "-q", default="default", help="Comma separated list of queues to listen to", ) @click.option( "--cluster", "-C", default="default", help="Cluster name, it will be added as qname", ) @click.option( "--ip-address", "-i", default=None, help="IP address of the host", ) @click.option( "--agent-name", "-a", default=None, help="Agent Name", ) @click.option( "--debug", "-D", is_flag=True, default=False, help="Debug log", ) @click.option("--machine-id", "-m", default=f"localhost/ba/{hostname}") def runcli(redis, workers, qnames, cluster, ip_address, agent_name, machine_id, debug): """Run the agent""" # pylint: disable=import-outside-toplevel # from labfunctions.control_plane import agent from labfunctions.control import agent level = "INFO" if debug: level = "DEBUG" FORMAT = "%(message)s" logging.basicConfig( level=level, format=FORMAT, datefmt="[%X]", handlers=[RichHandler(rich_tracebacks=True)], ) agent.set_env(settings) ip_address = ip_address or get_external_ip(settings.DNS_IP_ADDRESS) queues = qnames.split(",") conf = AgentConfig( redis_dsn=redis, cluster=cluster, qnames=queues, ip_address=ip_address, machine_id=machine_id, heartbeat_ttl=settings.AGENT_HEARTBEAT_TTL, heartbeat_check_every=settings.AGENT_HEARTBEAT_CHECK, agent_name=agent_name, workers_n=workers, ) agent.run(conf)
the-stack_0_6044
from .ad_hoc import * from nltk import WordNetLemmatizer from config import cfg def extract_tokens(sentence, str_list=None): """ Extract tokens among a sentences, meanwhile picking out the proper nouns and numbers. :param str sentence: The sentence to tokenize. :param list str_list: Proper nouns. :rtype: [list, list] """ str_list = str_list or list() if len(sentence.split()) < cfg.len_threshold: return None tokens = [] slot = [] sentence += ' ' sentence.replace(' ', ' ') while len(sentence) > 1: to_continue = False for template_str in str_list: template_str += ' ' if sentence.startswith(template_str): slot.append(template_str[:-1]) tokens.append('<STR>') sentence = sentence[len(template_str):] to_continue = True break if to_continue: continue space_idx = sentence.index(' ') next_word = sentence[:space_idx] sentence = sentence[space_idx+1:] if next_word.isdigit(): slot.append(int(next_word)) tokens.append('<NUM>') continue if next_word.lower() in num_list: slot.append(num_list.index(next_word.lower())) tokens.append('<NUM>') continue if len(next_word) > 0: tokens.append(next_word) slot.append(None) return tokens, slot def collide(l1, l2): """ Detect whether l1 and l2 have common elements. :param list l1: List 1. :param list l2: List 2. :rtype: bool """ return len(set(l1).intersection(l2)) > 0 wnl = WordNetLemmatizer() def lemmatize(word): """ Helper function of convert. :param str word: word to convert. :rtype: str """ if word.endswith('ly'): word = word[:-2] word = wnl.lemmatize(word, 'v') word = wnl.lemmatize(word, 'n') word = wnl.lemmatize(word, 'a') word = wnl.lemmatize(word, 's') return word
the-stack_0_6046
import time from multiprocessing import Pool, cpu_count import click from lib.lsun_room_api.lsun_room.item import DataItems def worker(item): #item.remap_layout() item.save_layout() @click.command() @click.option('--dataset_root', default='../data/lsun_room/') def main(dataset_root): for phase in ['train', 'val']: print('==> re-label for data in %s phase' % phase) s = time.time() dataset = DataItems(root=dataset_root, phase=phase) with Pool(cpu_count()) as pool: pool.map(worker, dataset.items) print('==> Done in %.4f sec.' % (time.time() - s)) if __name__ == '__main__': main()
the-stack_0_6047
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import unittest import tempfile from test_dist_fleet_base import TestFleetBase class TestDistMnistAsync2x2(TestFleetBase): def _setup_config(self): self._mode = "async" self._reader = "pyreader" def check_with_place(self, model_file, delta=1e-3, check_error_log=False, need_envs={}): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""), "FLAGS_rpc_deadline": "5000", # 5sec to fail fast "http_proxy": "", "CPU_NUM": "2" } required_envs.update(need_envs) if check_error_log: required_envs["GLOG_v"] = "3" required_envs["GLOG_logtostderr"] = "1" tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): self.check_with_place( "dist_fleet_ctr.py", delta=1e-5, check_error_log=False) class TestDistCtrHalfAsync2x2(TestFleetBase): def _setup_config(self): self._mode = "async" self._reader = "pyreader" def check_with_place(self, model_file, delta=1e-3, check_error_log=False, need_envs={}): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""), "FLAGS_rpc_deadline": "30000", # 5sec to fail fast "http_proxy": "", "FLAGS_communicator_send_queue_size": "2", "FLAGS_communicator_max_merge_var_num": "2", "CPU_NUM": "2", "SAVE_MODEL": "0" } required_envs.update(need_envs) if check_error_log: required_envs["GLOG_v"] = "3" required_envs["GLOG_logtostderr"] = "1" tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): self.check_with_place( "dist_fleet_ctr.py", delta=1e-5, check_error_log=False) if __name__ == "__main__": unittest.main()
the-stack_0_6048
from __future__ import print_function, absolute_import import sys import time from collections import OrderedDict import torch import numpy as np from .evaluation_metrics import cmc, mean_ap from .tlift import TLift def pre_tlift(gallery, query): gal_cam_id = np.array([cam for _, _, cam, _ in gallery]) gal_time = np.array([frame_time for _, _, _, frame_time in gallery]) prob_cam_id = np.array([cam for _, _, cam, _ in query]) prob_time = np.array([frame_time for _, _, _, frame_time in query]) return {'gal_cam_id': gal_cam_id, 'gal_time': gal_time, 'prob_cam_id': prob_cam_id, 'prob_time': prob_time, 'num_cams': gal_cam_id.max() + 1} def extract_cnn_feature(model, inputs): model = model.cuda().eval() with torch.no_grad(): outputs = model(inputs) outputs = outputs.cpu() return outputs def extract_features(model, data_loader, verbose=False): fea_time = 0 data_time = 0 features = OrderedDict() labels = OrderedDict() end = time.time() if verbose: print('Extract Features...', end='\t') for i, (imgs, fnames, pids, _) in enumerate(data_loader): data_time += time.time() - end end = time.time() outputs = extract_cnn_feature(model, imgs) for fname, output, pid in zip(fnames, outputs, pids): features[fname] = output labels[fname] = pid fea_time += time.time() - end end = time.time() if verbose: print('Feature time: {:.3f} seconds. Data time: {:.3f} seconds.'.format(fea_time, data_time)) return features, labels def pairwise_distance(matcher, prob_fea, gal_fea, gal_batch_size=4, prob_batch_size=4096): with torch.no_grad(): num_gals = gal_fea.size(0) num_probs = prob_fea.size(0) score = torch.zeros(num_probs, num_gals, device=prob_fea.device) matcher.eval() for i in range(0, num_probs, prob_batch_size): j = min(i + prob_batch_size, num_probs) matcher.make_kernel(prob_fea[i: j, :, :, :].cuda()) for k in range(0, num_gals, gal_batch_size): k2 = min(k + gal_batch_size, num_gals) score[i: j, k: k2] = matcher(gal_fea[k: k2, :, :, :].cuda()) # scale matching scores to make them visually more recognizable score = torch.sigmoid(score / 10) return (1. - score).cpu() # [p, g] def evaluate_all(distmat, query=None, gallery=None, query_ids=None, gallery_ids=None, query_cams=None, gallery_cams=None, cmc_topk=(1, 5, 10, 20)): if query is not None and gallery is not None: query_ids = [pid for _, pid, _, _ in query] gallery_ids = [pid for _, pid, _, _ in gallery] query_cams = [cam for _, _, cam, _ in query] gallery_cams = [cam for _, _, cam, _ in gallery] else: assert (query_ids is not None and gallery_ids is not None and query_cams is not None and gallery_cams is not None) # Compute mean AP mAP = mean_ap(distmat, query_ids, gallery_ids, query_cams, gallery_cams) print('Mean AP: {:4.1%}'.format(mAP)) # Compute CMC scores cmc_configs = { 'market1501': dict(separate_camera_set=False, single_gallery_shot=False, first_match_break=True)} cmc_scores = {name: cmc(distmat, query_ids, gallery_ids, query_cams, gallery_cams, **params) for name, params in cmc_configs.items()} print('CMC Scores') for k in cmc_topk: print(' top-{:<4}{:12.1%}' .format(k, cmc_scores['market1501'][k - 1])) return cmc_scores['market1501'][0], mAP def reranking(dist, query_num, k1=20, k2=6, lamda_value=0.3, verbose=False): original_dist = dist.numpy() all_num = original_dist.shape[0] original_dist = np.transpose(original_dist / np.max(original_dist, axis=0)) V = np.zeros_like(original_dist).astype(np.float16) initial_rank = np.argsort(original_dist).astype(np.int32) if verbose: print('starting re_ranking...', end='\t') for i in range(all_num): # k-reciprocal neighbors forward_k_neigh_index = initial_rank[i, :k1 + 1] backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1] fi = np.where(backward_k_neigh_index == i)[0] k_reciprocal_index = forward_k_neigh_index[fi] k_reciprocal_expansion_index = k_reciprocal_index for j in range(len(k_reciprocal_index)): candidate = k_reciprocal_index[j] candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 / 2)) + 1] candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index, :int(np.around(k1 / 2)) + 1] fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0] candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate] if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len( candidate_k_reciprocal_index): k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index) k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index) weight = np.exp(-original_dist[i, k_reciprocal_expansion_index]) V[i, k_reciprocal_expansion_index] = weight / np.sum(weight) original_dist = original_dist[:query_num, ] if k2 != 1: V_qe = np.zeros_like(V, dtype=np.float16) for i in range(all_num): V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0) V = V_qe del V_qe del initial_rank invIndex = [] for i in range(all_num): invIndex.append(np.where(V[:, i] != 0)[0]) jaccard_dist = np.zeros_like(original_dist, dtype=np.float16) for i in range(query_num): temp_min = np.zeros(shape=[1, all_num], dtype=np.float16) indNonZero = np.where(V[i, :] != 0)[0] indImages = [] indImages = [invIndex[ind] for ind in indNonZero] for j in range(len(indNonZero)): temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]], V[indImages[j], indNonZero[j]]) jaccard_dist[i] = 1 - temp_min / (2 - temp_min) final_dist = jaccard_dist * (1 - lamda_value) + original_dist * lamda_value del original_dist del V del jaccard_dist final_dist = final_dist[:query_num, query_num:] return final_dist class Evaluator(object): def __init__(self, model): super(Evaluator, self).__init__() self.model = model def evaluate(self, matcher, testset, query_loader, gallery_loader, gal_batch_size=4, prob_batch_size=4096, tau=100, sigma=200, K=10, alpha=0.2): query = testset.query gallery = testset.gallery print('Compute similarity ...', end='\t') start = time.time() prob_fea, _ = extract_features(self.model, query_loader) prob_fea = torch.cat([prob_fea[f].unsqueeze(0) for f, _, _, _ in query], 0) num_prob = len(query) num_gal = len(gallery) batch_size = gallery_loader.batch_size dist = torch.zeros(num_prob, num_gal) for i, (imgs, fnames, pids, _) in enumerate(gallery_loader): print('Compute similarity %d / %d. \t' % (i + 1, len(gallery_loader)), end='\r', file=sys.stdout.console) gal_fea = extract_cnn_feature(self.model, imgs) g0 = i * batch_size g1 = min(num_gal, (i + 1) * batch_size) dist[:, g0:g1] = pairwise_distance(matcher, prob_fea, gal_fea, batch_size, prob_batch_size) # [p, g] print('Time: %.3f seconds.' % (time.time() - start)) rank1, mAP = evaluate_all(dist, query=query, gallery=gallery) if testset.has_time_info: num_gal = gal_fea.size(0) num_prob = prob_fea.size(0) num_all = num_gal + num_prob dist_rerank = torch.zeros(num_all, num_all) print('Compute similarity for rerank...', end='\t') start = time.time() with torch.no_grad(): dist_rerank[:num_prob, num_prob:] = dist dist_rerank[num_prob:, :num_prob] = dist.t() dist_rerank[:num_prob, :num_prob] = pairwise_distance(matcher, prob_fea, prob_fea, gal_batch_size, prob_batch_size) gal_fea, _ = extract_features(self.model, gallery_loader, verbose=True) gal_fea = torch.cat([gal_fea[f].unsqueeze(0) for f, _, _, _ in gallery], 0) dist_rerank[num_prob:, num_prob:] = pairwise_distance(matcher, gal_fea, gal_fea, gal_batch_size, prob_batch_size) dist_rerank = reranking(dist_rerank, num_prob, verbose=True) print('Time: %.3f seconds.' % (time.time() - start)) rank1_rerank, mAP_rerank = evaluate_all(dist_rerank, query=query, gallery=gallery) score_rerank = 1 - dist_rerank print('Compute TLift...', end='\t') start = time.time() pre_tlift_dict = pre_tlift(gallery, query) score_tlift = TLift(score_rerank, tau=tau, sigma=sigma, K=K, alpha=alpha, **pre_tlift_dict) print('Time: %.3f seconds.' % (time.time() - start)) dist_tlift = 1 - score_tlift rank1_tlift, mAP_tlift = evaluate_all(dist_tlift, query=query, gallery=gallery) else: pre_tlift_dict = {'gal_time': 0, 'prob_time': 0} dist_tlift = 0 dist_rerank = 0 rank1_rerank = 0 mAP_rerank = 0 rank1_tlift = 0 mAP_tlift = 0 return rank1, mAP, rank1_rerank, mAP_rerank, rank1_tlift, mAP_tlift, dist.numpy(), dist_rerank, \ dist_tlift, pre_tlift_dict
the-stack_0_6050
# Copyright (c) 2013, TeamPRO and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from datetime import datetime from calendar import monthrange from frappe import _, msgprint from frappe.utils import flt def execute(filters=None): if not filters: filters = {} columns = get_columns() data = [] row = [] conditions, filters = get_conditions(filters) total = 0 salary_slips = get_salary_slips(conditions, filters) for ss in salary_slips: row = [] if ss.client: row = [ss.client] else: row = [""] if ss.site: row += [ss.site] else: row = [""] basic = frappe.db.get_value("Salary Detail", {'abbr': 'B', 'parent': ss.name}, ['amount']) if basic: row += [basic] else: row += [0] pf = frappe.db.get_value("Salary Detail", {'abbr': 'PF', 'parent': ss.name}, ['amount']) if basic: row += [pf] else: row += [0] esi = frappe.db.get_value("Salary Detail", {'abbr': 'ESI', 'parent': ss.name}, ['amount']) if esi: row += [esi] else: row += [0] pt = frappe.db.get_value("Salary Detail", {'abbr': 'PT', 'parent': ss.name}, ['amount']) if pt: row += [pt] else: row += [0] ctc = frappe.db.get_value("Salary Detail", {'abbr': 'CTC', 'parent': ss.name}, ['amount']) if ctc: row += [ctc] else: row += [0] data.append(row) return columns, data def get_columns(): columns = [ _("Client") + ":Data:300", _("Site") + ":Data:150", _("Basic") + ":Currency:120", _("PF") + ":Currency:120", _("ESI") + ":Currency:120", _("PT") + ":Currency:120", _("CTC") + ":Currency:120" ] return columns def get_salary_slips(conditions, filters): # salary_slips = frappe.db.sql("""select sum(`tabSalary Detail`.amount), ss.client_name as client,ss.site as site,ss.name as name from `tabSalary Slip` ss # left join `tabSalary Detail` on ss.name = `tabSalary Detail`.parent # where `tabSalary Detail`.salary %s order by site""" % conditions, filters, as_dict=1) salary_slips = frappe.db.sql("""select ss.client_name as client,ss.site as site,ss.name as name from `tabSalary Slip` ss where %s order by site""" % conditions, filters, as_dict=1) return salary_slips def get_conditions(filters): conditions = "" if filters.get("from_date"): conditions += "start_date >= %(from_date)s" if filters.get("to_date"): conditions += " and end_date >= %(to_date)s" if filters.get("client"): conditions += " and client_name = %(client)s" if filters.get("site"): conditions += " and site = %(site)s" return conditions, filters
the-stack_0_6054
# -*- coding: utf8 -*- # Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # CAM签名/鉴权错误。 AUTHFAILURE = 'AuthFailure' # DryRun 操作,代表请求将会是成功的,只是多传了 DryRun 参数。 DRYRUNOPERATION = 'DryRunOperation' # 操作失败。 FAILEDOPERATION = 'FailedOperation' # 内部错误。 INTERNALERROR = 'InternalError' # 数据库异常。 INTERNALERROR_DBERROR = 'InternalError.DBError' # 参数错误。 INVALIDPARAMETER = 'InvalidParameter' # 参数取值错误。 INVALIDPARAMETERVALUE = 'InvalidParameterValue' # 名字冲突。 INVALIDPARAMETERVALUE_DUPLICATENAME = 'InvalidParameterValue.DuplicateName' # 超过配额限制。 LIMITEXCEEDED = 'LimitExceeded' # 缺少参数错误。 MISSINGPARAMETER = 'MissingParameter' # 操作被拒绝。 OPERATIONDENIED = 'OperationDenied' # 请求的次数超过了频率限制。 REQUESTLIMITEXCEEDED = 'RequestLimitExceeded' # 资源被占用。 RESOURCEINUSE = 'ResourceInUse' # 资源不足。 RESOURCEINSUFFICIENT = 'ResourceInsufficient' # 资源不存在。 RESOURCENOTFOUND = 'ResourceNotFound' # 资源不可用。 RESOURCEUNAVAILABLE = 'ResourceUnavailable' # 资源售罄。 RESOURCESSOLDOUT = 'ResourcesSoldOut' # 未授权操作。 UNAUTHORIZEDOPERATION = 'UnauthorizedOperation' # 未知参数错误。 UNKNOWNPARAMETER = 'UnknownParameter' # 操作不支持。 UNSUPPORTEDOPERATION = 'UnsupportedOperation'
the-stack_0_6057
import os import numpy as np import xarray as xr import netCDF4 as nc from glob import glob from functools import partial from os import makedirs as mkdir from multiprocessing import get_context from datetime import datetime, timedelta os.environ['OMP_NUM_THREAD'] = '1' # Set up init to use sys.argv later init = datetime(2020, 10, 28, 0) nlat, xlat = 30, 50 nlon, xlon = -130, -100 tmpdir = '/scratch/general/lustre/u1070830/nomads_nbm/tmp/'; mkdir(tmpdir, exist_ok=True) datadir = '/scratch/general/lustre/u1070830/nomads_nbm/'; mkdir(datadir, exist_ok=True) def download_grib(url, subset_str, tmp): from subprocess import Popen, call import requests filename = url.split('file=')[1].split('&')[0] filename = filename.replace('.co.', '.%s.'%subset_str) if not os.path.isfile(tmp + filename): print('Downloading %s'%filename) r = requests.get(url, allow_redirects=True) open(tmp + filename, 'wb').write(r.content) cmd = 'wget -O "%s" "%s"'%(tmp + filename, url) Popen(cmd, shell=True) return filename def repack_nbm_grib2(f): import pygrib import gc print(f.split('/')[-1]) if not os.path.isfile(f+'.nc'): try: grb = pygrib.open(f) msgs = grb.read() init = str(msgs[0]).split(':')[-2].split(' ')[-1] init = datetime.strptime(init, '%Y%m%d%H%M') fhr = msgs[0]['endStep'] valid = np.datetime64(init + timedelta(hours=fhr)) lons, lats = msgs[0].data()[2], msgs[0].data()[1] except: raise return None else: probability, probability_labels = [], [] percentile, percentile_labels = [], [] deterministic, deterministic_labels = [], [] got_deterministic = {i:False for i in [1, 6, 12, 24]} for msg in msgs: interval = msg['stepRange'].split('-') interval = int(interval[1]) - int(interval[0]) if 'Probability of event' in str(msg): # Probability of event above upper limit (> 0.254) NOT inclusive threshold = round(msg['upperLimit']/25.4, 2) probability.append([msg.values]) probability_labels.append([interval, threshold]) elif 'percentileValue' in msg.keys(): percentile.append([msg.values]) percentile_labels.append([interval, msg['percentileValue']]) else: if got_deterministic[interval] == False: deterministic_labels.append(interval) deterministic.append(msg.values) got_deterministic[interval] = True else: pass grb.close() gc.collect() deterministic_labels = np.array(deterministic_labels) deterministic_labels = deterministic_labels[np.argsort(deterministic_labels)] deterministic = np.array(deterministic)[np.argsort(deterministic_labels)] probability = np.array(probability, dtype=object).reshape(-1, lats.shape[0], lats.shape[1]) probability_labels = np.array(probability_labels) percentile = np.array(percentile, dtype=object).reshape(-1, 99, lats.shape[0], lats.shape[1]) percentile_labels = np.array(percentile_labels) deterministic = xr.DataArray(deterministic.astype(np.float32), name='pop', dims=('interval', 'y', 'x'), coords={'interval':('interval', deterministic_labels), 'lats':(('y', 'x'), lats), 'lons':(('y', 'x'), lons)}) pop = xr.DataArray(probability[:3].astype(np.float32), name='pop', dims=('interval', 'y', 'x'), coords={'interval':('interval', probability_labels[:3, 0]), 'lats':(('y', 'x'), lats), 'lons':(('y', 'x'), lons)}) probability = xr.DataArray([probability[2:].astype(np.float32)], name='probability', dims=('interval', 'threshold', 'y', 'x'), coords={'interval':('interval', [24]), 'threshold':('threshold', probability_labels[2:,1]), 'lats':(('y', 'x'), lats), 'lons':(('y', 'x'), lons)}) percentile = xr.DataArray(percentile.astype(np.float32), name='percentile', dims=('interval', 'percentile', 'y', 'x'), coords={'interval':('interval', np.unique(percentile_labels[:, 0])), 'percentile':('percentile', range(1, 100)), 'lats':(('y', 'x'), lats), 'lons':(('y', 'x'), lons)}) ds = xr.Dataset() # ds['fhr'] = fhr ds['time'] = valid ds.attrs['InitTime'] = str(init) ds['qpf'] = deterministic ds['pop'] = pop ds['probx'] = probability ds['pqpf'] = percentile ds.to_netcdf(f+'.nc') del ds gc.collect() return None else: print('Found: %s, skipping'%f.split('/')[-1]) def write_output(output, ncfilename): lat, lon = output['lats'], output['lons'] os.makedirs(tmpdir, exist_ok=True) with nc.Dataset(tmpdir + ncfilename, 'w', format='NETCDF4') as ncfile: ncfile.nx = str(lon.shape[1]) ncfile.ny = str(lon.shape[0]) ncfile.InitTime = output.attrs['InitTime'] # Lat Lon dimensions and data ncfile.createDimension('lon', lon.shape[1]) ncfile.createDimension('lat', lon.shape[0]) ncfile.createDimension('time', None) ncfile.createDimension('interval', output['interval'].size) ncfile.createDimension('percentile', output['percentile'].size) ncfile.createDimension('threshold', output['threshold'].size) lon_nc = ncfile.createVariable('lon', 'f4', ('lat', 'lon')) lon_nc.long_name = 'longitude' lon_nc.units = 'degrees_east' lon_nc.standard_name = 'longitude' lon_nc._CoordinateAxisType = 'Lon' lat_nc = ncfile.createVariable('lat', 'f4', ('lat', 'lon')) lat_nc.long_name = 'latitude' lat_nc.units = 'degrees_north' lat_nc.standard_name = 'latitude' lat_nc._CoordinateAxisType = 'Lat' lon_nc[:] = output.lons.values lat_nc[:] = output.lats.values interval = ncfile.createVariable('interval', 'short', ('interval')) interval.long_name = 'accumulation interval' interval.units = 'hours' interval.standard_name = 'interval' interval[:] = output['interval'].values.astype(int) percentile = ncfile.createVariable('percentile', 'short', ('percentile'), zlib=True, complevel=9) percentile.long_name = 'accumulation percentile' percentile.units = 'none' percentile.standard_name = 'percentile' percentile[:] = output['percentile'].values.astype(int) threshold = ncfile.createVariable('threshold', 'f4', ('threshold'), zlib=True, complevel=9) threshold.long_name = 'probabiity of exceedence threshold' threshold.units = 'in' threshold.standard_name = 'threshold' threshold[:] = output['threshold'].values # Write variable data qpf_nc = ncfile.createVariable('qpf', 'f4', ('time', 'interval', 'lat', 'lon'), fill_value=-9999.0, zlib=True, complevel=9) qpf_nc.long_name = 'Deterministic QPF' qpf_nc.level = '0' qpf_nc.units = 'in' qpf_nc[:] = output['qpf'].values # pop_nc = ncfile.createVariable('pop', 'f4', ('time', 'interval', 'lat', 'lon'), # fill_value=-9999.0, zlib=True, complevel=9) # pop_nc.long_name = 'Probability of Precipitation (> 0.01")' # pop_nc.level = '0' # pop_nc.units = 'in' # pop_nc[:] = output['pop'].values pqpf_nc = ncfile.createVariable('pqpf', 'f4', ('time', 'interval', 'percentile', 'lat', 'lon'), fill_value=-9999.0, zlib=True, complevel=9) pqpf_nc.long_name = 'Probabilistic QPF' pqpf_nc.level = '0' pqpf_nc.units = 'in' pqpf_nc[:] = output['pqpf'].values probx_nc = ncfile.createVariable('probx', 'f4', ('time', 'interval', 'threshold', 'lat', 'lon'), fill_value=-9999.0, zlib=True, complevel=9) probx_nc.long_name = 'Probability of Exceedence' probx_nc.level = '0' probx_nc.units = '%' probx_nc[:] = output['probx'].values print(ncfile) if __name__ == '__main__': yyyy, mm, dd, hh = init.year, init.month, init.day, init.hour base = 'https://nomads.ncep.noaa.gov/cgi-bin/filter_blend.pl?' var = '&var_APCP=on' region = '&subregion=&leftlon={:.2f}&rightlon={:.2f}&toplat={:.2f}&bottomlat={:.2f}'.format(nlon, xlon, xlat, nlat) mdir = '&dir=%2Fblend.{:04d}{:02d}{:02d}%2F{:02d}%2Fqmd'.format(yyyy, mm, dd, hh) url_list = [] # Need to fix the data processing below to allow for sub24 leads for fhr in np.arange(24, 180+1, 6): file = 'file=blend.t{:02d}z.qmd.f{:03d}.co.grib2'.format(hh, fhr) url_list.append(base + file + var + region + mdir) download_grib_mp = partial(download_grib, subset_str='WR', tmp=tmpdir) with get_context('forkserver').Pool(len(url_list)) as p: flist = p.imap_unordered(download_grib_mp, url_list, chunksize=1) p.close() p.join() flist = sorted(flist) filelist = sorted(glob(tmpdir + '*.grib2')) print(filelist[0]) repack_nbm_grib2(filelist[0]) # with get_context('forkserver').Pool(6) as p: # output = p.imap_unordered(repack_nbm_grib2, filelist, chunksize=1) # p.close() # p.join() # output = [xr.open_dataset(f+'.nc') for f in filelist] # output = xr.concat([i for i in output if i is not None], dim='time') # write_output(output) # compress = {'compression':'gzip', 'compression_opts':9} # encoding = {var:compress for var in output.data_vars if var != 'time'} # output.to_netcdf(tmpdir + './test_output.nc', engine='h5netcdf', encoding=encoding)
the-stack_0_6058
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # <pep8 compliant> from bpy.types import Header, Menu, Panel from bl_ui.space_dopesheet import ( DopesheetFilterPopoverBase, dopesheet_filter, ) class GRAPH_HT_header(Header): bl_space_type = 'GRAPH_EDITOR' def draw(self, context): layout = self.layout tool_settings = context.tool_settings st = context.space_data layout.template_header() # Now a exposed as a sub-space type # layout.prop(st, "mode", text="") GRAPH_MT_editor_menus.draw_collapsible(context, layout) row = layout.row(align=True) row.prop(st, "use_normalization", icon='NORMALIZE_FCURVES', text="Normalize", toggle=True) sub = row.row(align=True) sub.active = st.use_normalization sub.prop(st, "use_auto_normalization", icon='FILE_REFRESH', text="", toggle=True) layout.separator_spacer() dopesheet_filter(layout, context) row = layout.row(align=True) if st.has_ghost_curves: row.operator("graph.ghost_curves_clear", text="", icon='X') else: row.operator("graph.ghost_curves_create", text="", icon='FCURVE_SNAPSHOT') layout.popover( panel="GRAPH_PT_filters", text="", icon='FILTER', ) layout.prop(st, "pivot_point", icon_only=True) layout.prop(st, "auto_snap", text="") row = layout.row(align=True) row.prop(tool_settings, "use_proportional_fcurve", text="", icon_only=True) sub = row.row(align=True) sub.active = tool_settings.use_proportional_fcurve sub.prop(tool_settings, "proportional_edit_falloff", text="", icon_only=True) class GRAPH_PT_filters(DopesheetFilterPopoverBase, Panel): bl_space_type = 'GRAPH_EDITOR' bl_region_type = 'HEADER' bl_label = "Filters" def draw(self, context): layout = self.layout DopesheetFilterPopoverBase.draw_generic_filters(context, layout) layout.separator() DopesheetFilterPopoverBase.draw_search_filters(context, layout) layout.separator() DopesheetFilterPopoverBase.draw_standard_filters(context, layout) class GRAPH_MT_editor_menus(Menu): bl_idname = "GRAPH_MT_editor_menus" bl_label = "" def draw(self, _context): layout = self.layout layout.menu("GRAPH_MT_view") layout.menu("GRAPH_MT_select") layout.menu("GRAPH_MT_marker") layout.menu("GRAPH_MT_channel") layout.menu("GRAPH_MT_key") class GRAPH_MT_view(Menu): bl_label = "View" def draw(self, context): layout = self.layout st = context.space_data layout.prop(st, "show_region_ui") layout.separator() layout.prop(st, "use_realtime_update") layout.prop(st, "show_cursor") layout.prop(st, "show_sliders") layout.prop(st, "show_group_colors") layout.prop(st, "show_marker_lines") layout.prop(st, "use_auto_merge_keyframes") layout.separator() layout.prop(st, "use_beauty_drawing") layout.separator() layout.prop(st, "show_handles") layout.prop(st, "use_only_selected_curves_handles") layout.prop(st, "use_only_selected_keyframe_handles") layout.prop(st, "show_seconds") layout.prop(st, "show_locked_time") layout.separator() layout.operator("anim.previewrange_set") layout.operator("anim.previewrange_clear") layout.operator("graph.previewrange_set") layout.separator() layout.operator("graph.view_all") layout.operator("graph.view_selected") layout.operator("graph.view_frame") # Add this to show key-binding (reverse action in dope-sheet). layout.separator() props = layout.operator("wm.context_set_enum", text="Toggle Dope Sheet") props.data_path = "area.type" props.value = 'DOPESHEET_EDITOR' layout.separator() layout.menu("INFO_MT_area") class GRAPH_MT_select(Menu): bl_label = "Select" def draw(self, _context): layout = self.layout layout.operator("graph.select_all", text="All").action = 'SELECT' layout.operator("graph.select_all", text="None").action = 'DESELECT' layout.operator("graph.select_all", text="Invert").action = 'INVERT' layout.separator() props = layout.operator("graph.select_box") props.axis_range = False props.include_handles = False props = layout.operator("graph.select_box", text="Box Select (Axis Range)") props.axis_range = True props.include_handles = False props = layout.operator("graph.select_box", text="Box Select (Include Handles)") props.axis_range = False props.include_handles = True layout.operator("graph.select_circle") layout.separator() layout.operator("graph.select_column", text="Columns on Selected Keys").mode = 'KEYS' layout.operator("graph.select_column", text="Column on Current Frame").mode = 'CFRA' layout.operator("graph.select_column", text="Columns on Selected Markers").mode = 'MARKERS_COLUMN' layout.operator("graph.select_column", text="Between Selected Markers").mode = 'MARKERS_BETWEEN' layout.separator() props = layout.operator("graph.select_leftright", text="Before Current Frame") props.extend = False props.mode = 'LEFT' props = layout.operator("graph.select_leftright", text="After Current Frame") props.extend = False props.mode = 'RIGHT' layout.separator() layout.operator("graph.select_more") layout.operator("graph.select_less") layout.separator() layout.operator("graph.select_linked") class GRAPH_MT_marker(Menu): bl_label = "Marker" def draw(self, context): layout = self.layout from bl_ui.space_time import marker_menu_generic marker_menu_generic(layout, context) # TODO: pose markers for action edit mode only? class GRAPH_MT_channel(Menu): bl_label = "Channel" def draw(self, context): layout = self.layout layout.operator_context = 'INVOKE_REGION_CHANNELS' layout.operator("anim.channels_delete") if context.space_data.mode == 'DRIVERS': layout.operator("graph.driver_delete_invalid") layout.separator() layout.operator("anim.channels_group") layout.operator("anim.channels_ungroup") layout.separator() layout.operator_menu_enum("anim.channels_setting_toggle", "type") layout.operator_menu_enum("anim.channels_setting_enable", "type") layout.operator_menu_enum("anim.channels_setting_disable", "type") layout.separator() layout.operator("anim.channels_editable_toggle") layout.operator_menu_enum("graph.extrapolation_type", "type", text="Extrapolation Mode") layout.separator() layout.operator("graph.hide", text="Hide Selected Curves").unselected = False layout.operator("graph.hide", text="Hide Unselected Curves").unselected = True layout.operator("graph.reveal") layout.separator() layout.operator("anim.channels_expand") layout.operator("anim.channels_collapse") layout.separator() layout.operator_menu_enum("anim.channels_move", "direction", text="Move...") layout.separator() layout.operator("anim.channels_fcurves_enable") class GRAPH_MT_key(Menu): bl_label = "Key" def draw(self, _context): layout = self.layout layout.menu("GRAPH_MT_key_transform", text="Transform") layout.operator_menu_enum("graph.snap", "type", text="Snap") layout.operator_menu_enum("graph.mirror", "type", text="Mirror") layout.separator() layout.operator_menu_enum("graph.keyframe_insert", "type") layout.operator_menu_enum("graph.fmodifier_add", "type") layout.operator("graph.sound_bake") layout.separator() layout.operator("graph.frame_jump") layout.separator() layout.operator("graph.copy") layout.operator("graph.paste") layout.operator("graph.paste", text="Paste Flipped").flipped = True layout.operator("graph.duplicate_move") layout.operator("graph.delete") layout.separator() layout.operator_menu_enum("graph.handle_type", "type", text="Handle Type") layout.operator_menu_enum("graph.interpolation_type", "type", text="Interpolation Mode") layout.operator_menu_enum("graph.easing_type", "type", text="Easing Type") layout.separator() layout.operator("graph.clean").channels = False layout.operator("graph.clean", text="Clean Channels").channels = True layout.operator("graph.smooth") layout.operator("graph.sample") layout.operator("graph.bake") layout.separator() layout.operator("graph.euler_filter", text="Discontinuity (Euler) Filter") class GRAPH_MT_key_transform(Menu): bl_label = "Transform" def draw(self, _context): layout = self.layout layout.operator("transform.translate", text="Move") layout.operator("transform.transform", text="Extend").mode = 'TIME_EXTEND' layout.operator("transform.rotate", text="Rotate") layout.operator("transform.resize", text="Scale") class GRAPH_MT_delete(Menu): bl_label = "Delete" def draw(self, _context): layout = self.layout layout.operator("graph.delete") layout.separator() layout.operator("graph.clean").channels = False layout.operator("graph.clean", text="Clean Channels").channels = True class GRAPH_MT_context_menu(Menu): bl_label = "F-Curve Context Menu" def draw(self, _context): layout = self.layout layout.operator_context = 'INVOKE_DEFAULT' layout.operator("graph.copy", text="Copy", icon='COPYDOWN') layout.operator("graph.paste", text="Paste", icon='PASTEDOWN') layout.operator("graph.paste", text="Paste Flipped", icon='PASTEFLIPDOWN').flipped = True layout.separator() layout.operator_menu_enum("graph.handle_type", "type", text="Handle Type") layout.operator_menu_enum("graph.interpolation_type", "type", text="Interpolation Mode") layout.operator_menu_enum("graph.easing_type", "type", text="Easing Type") layout.separator() layout.operator("graph.keyframe_insert").type = 'SEL' layout.operator("graph.duplicate_move") layout.operator_context = 'EXEC_REGION_WIN' layout.operator("graph.delete") layout.separator() layout.operator_menu_enum("graph.mirror", "type", text="Mirror") layout.operator_menu_enum("graph.snap", "type", text="Snap") class GRAPH_MT_pivot_pie(Menu): bl_label = "Pivot Point" def draw(self, context): layout = self.layout pie = layout.menu_pie() pie.prop_enum(context.space_data, "pivot_point", value='BOUNDING_BOX_CENTER') pie.prop_enum(context.space_data, "pivot_point", value='CURSOR') pie.prop_enum(context.space_data, "pivot_point", value='INDIVIDUAL_ORIGINS') class GRAPH_MT_snap_pie(Menu): bl_label = "Snap" def draw(self, _context): layout = self.layout pie = layout.menu_pie() pie.operator("graph.snap", text="Current Frame").type = 'CFRA' pie.operator("graph.snap", text="Cursor Value").type = 'VALUE' pie.operator("graph.snap", text="Nearest Frame").type = 'NEAREST_FRAME' pie.operator("graph.snap", text="Nearest Second").type = 'NEAREST_SECOND' pie.operator("graph.snap", text="Nearest Marker").type = 'NEAREST_MARKER' pie.operator("graph.snap", text="Flatten Handles").type = 'HORIZONTAL' class GRAPH_MT_channel_context_menu(Menu): bl_label = "F-Curve Channel Context Menu" def draw(self, context): layout = self.layout st = context.space_data layout.separator() layout.operator("anim.channels_setting_enable", text="Mute Channels").type = 'MUTE' layout.operator("anim.channels_setting_disable", text="Unmute Channels").type = 'MUTE' layout.separator() layout.operator("anim.channels_setting_enable", text="Protect Channels").type = 'PROTECT' layout.operator("anim.channels_setting_disable", text="Unprotect Channels").type = 'PROTECT' layout.separator() layout.operator("anim.channels_group") layout.operator("anim.channels_ungroup") layout.separator() layout.operator("anim.channels_editable_toggle") layout.operator_menu_enum("graph.extrapolation_type", "type", text="Extrapolation Mode") layout.separator() layout.operator("graph.hide", text="Hide Selected Curves").unselected = False layout.operator("graph.hide", text="Hide Unselected Curves").unselected = True layout.operator("graph.reveal") layout.separator() layout.operator("anim.channels_expand") layout.operator("anim.channels_collapse") layout.separator() layout.operator_menu_enum("anim.channels_move", "direction", text="Move...") layout.separator() layout.operator("anim.channels_delete") if st.mode == 'DRIVERS': layout.operator("graph.driver_delete_invalid") classes = ( GRAPH_HT_header, GRAPH_MT_editor_menus, GRAPH_MT_view, GRAPH_MT_select, GRAPH_MT_marker, GRAPH_MT_channel, GRAPH_MT_key, GRAPH_MT_key_transform, GRAPH_MT_delete, GRAPH_MT_context_menu, GRAPH_MT_channel_context_menu, GRAPH_MT_pivot_pie, GRAPH_MT_snap_pie, GRAPH_PT_filters, ) if __name__ == "__main__": # only for live edit. from bpy.utils import register_class for cls in classes: register_class(cls)
the-stack_0_6060
""" Copyright 2016 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import time from cloudcafe.networking.networks.common.proxy_mgr.proxy_mgr \ import NetworkProxyMgr from cloudroast.networking.networks.fixtures import NetworkingComputeFixture from cloudcafe.networking.networks.personas import ServerPersona from cloudroast.networking.networks.topologies.topology_routines \ import TopologyFixtureRoutines class SpokeAndHubFixture(NetworkingComputeFixture, TopologyFixtureRoutines): NUM_OF_SPOKES = 5 IP_VERSION = 4 SSH_PROVISION_DELAY = 20 @classmethod def setUpClass(cls): super(SpokeAndHubFixture, cls).setUpClass() cls.iso_nets = [] cls.iso_subnets = [] # dictionary key = server_id # Sub-dictionary key constants are defined in the # TopologyFixtureRoutine class # Dictionary Structure: # <server_id>: {PERSONA: <persona_obj>, # SERVER: <server_model>, # PROXY: <proxy_model>} cls.servers = {} # Hub is a single server model representing the hub of the wheel # (Every network gateway interface is defined on the hub, and all # traffic goes through the hub). Dictionary is same as sub-dictionary # in servers. (KEYS = PERSONA, SERVER, PROXY) cls.hub = {} cls.last_connectivity_check = {} cls.base_iso_net = cls.net.subnets.config.ipv4_prefix.replace('*', '0') cls.base_iso_subnet_mask = cls.determine_octet_mask(cls.base_iso_net) def setUp(self): super(SpokeAndHubFixture, self).setUp() # If the hub is not defined, build the topology if not self.hub.keys(): self.fixture_log.debug("NUMBER OF SPOKES ON HUB: {0}".format( self.NUM_OF_SPOKES)) self.log_action('Build spoke and hub topology') self.servers, self.iso_nets, self.iso_subnets = \ self._build_spokes() self.hub = self._build_hub_router(network_spokes=self.iso_subnets) self.log_action('Verify Spoke and Hub Connectivity') connectivity = self.verify_ping_connectivity() # If DEBUGGING or STAGING FOR MANUAL DEBUGGING # (NUM_OF_SPOKES=1), then execute cmd else make sure connectivity # is working... if self.DEBUG and (not connectivity or self.NUM_OF_SPOKES == 1): self.debug_topology_routine() # If NUM_OF_SPOKES == 1, recommend using flat network fixture elif self.NUM_OF_SPOKES > 1: self.assertTrue(connectivity, self.connectivity_error()) def _build_spokes(self): """ Builds each spoke network (isolated network) and adds a host at the end of each spoke. Each network gateway will be registered on the hub router. :return: (tuple), servers [Dict: end of spoke hosts], iso_nets [List: isolated networks created] iso_subnets [list: isolated network subnets created] """ # NOTE: Each spoke is a subnet on its own isolated network # Check to see if any spokes are needed if len(self.servers) >= self.NUM_OF_SPOKES: return self.servers, self.iso_nets, self.iso_subnets # Determine the network needed for the static route network_for_static_route = '{net}/{snm}'.format( net=self.base_iso_net, snm=self.base_iso_subnet_mask) self.fixture_log.info('Network for static route: {0}'.format( network_for_static_route)) # Determine how many spokes are needed and build the spokes num_of_spokes = self.NUM_OF_SPOKES - len(self.servers) for spoke in xrange(num_of_spokes): svr_num = '{run_id!s}_{index!s}'.format( index=spoke + len(self.servers), run_id=self.RUN_ID) iso_net, iso_subnet, _ = self._build_isolated_network( ip_version=self.IP_VERSION) # Store ISOLATED Network/Subnet information self.iso_nets.append(iso_net) self.iso_subnets.append(iso_subnet) # Build "End of the spoke" (non-hub) hosts self._build_and_register_iso_net_server( svr_id_num=svr_num, iso_network=iso_net) # Wait for final spoke server to stabilize time.sleep(self.SSH_PROVISION_DELAY) # Add the generalized isolated network static route (so any isolated # subnets are routed out the local isolated network interface and not # the standard default route (public network interface). self.fixture_log.debug('\n\n**** Add Static Routes **** \n\n') addressing_details = '' for server_dict in self.servers.itervalues(): # Add a generalized static route for the general isolated networks persona = server_dict[TopologyFixtureRoutines.PERSONA] addressing_details += '{0!s}\n'.format(persona) interface_to_use = self.get_vm_network_interface_for_ip( server_dict=server_dict, ip_address=persona.inet_fix_ipv4[0]) self.add_static_default_route( svr_dict=server_dict, network_to_add=network_for_static_route, interface=interface_to_use) self.fixture_log.debug('\n\n**** SPOKE ADDRESSING DETAILS **** \n\n') self.fixture_log.debug(addressing_details) return self.servers, self.iso_nets, self.iso_subnets def _build_hub_router(self, network_spokes): """ Build the hub router (host) with each spoke's gateway configured as a network interface on the router. :param network_spokes: [List] List of iso_subnets that define each spoke :return: VM server model representing the hub router """ port_ids = [] hub_name = 'HUB_{spokes}_{run_id}'.format( spokes=len(network_spokes), run_id=self.RUN_ID) hub = {} # Iterate across spoke (subnets), and configure each GW IP as an # interface on the 'hub' router. for spoke in network_spokes: network_id = spoke.network_id fixed_ips = [{'ip_address': spoke.gateway_ip, 'subnet_id': spoke.id}] port_resp = self.net.ports.behaviors.create_port( network_id=network_id, admin_state_up=True, fixed_ips=fixed_ips) self.delete_ports.append(port_resp.response.entity.id) port_ids.append(port_resp.response.entity.id) # Add public and service networks to the hub router attached_networks = [self.public_network_id, self.service_network_id] hub_svr = self.net.behaviors.create_networking_server( name=hub_name, admin_pass=self.ADMIN_PASS, network_ids=attached_networks, port_ids=port_ids) self.delete_servers.append(hub_svr.entity.id) # Store HUB server information hub[TopologyFixtureRoutines.SERVER] = hub_svr.entity hub_persona = ServerPersona(server=hub_svr.entity) hub[TopologyFixtureRoutines.PERSONA] = hub_persona proxy = NetworkProxyMgr(use_proxy=False, debug=True) proxy.set_proxy_server(hub_svr.entity) hub[TopologyFixtureRoutines.PROXY] = proxy self.fixture_log.debug("HUB INTERFACE INFO (PARTIAL)\n{0}".format( hub_persona)) # Wait for VM's public network to come online by pinging the server's # public interface attempt = 0 max_attempts = 10 hub_available = False while not hub_available and attempt < max_attempts: attempt += 1 self.fixture_log.debug( 'Verifying hub router is online. Attempt: {0} of {1}'.format( attempt, max_attempts)) try: hub_available = proxy.ping(hub_persona.pnet_fix_ipv4[0]) except Exception as err: self.fixture_log.info('PING EXCEPTION: {0}'.format(err)) hub_available = False if not hub_available: time.sleep(5) if attempt >= max_attempts: self.assertClassSetupFailure( 'Hub router (hub & spoke topology) never came online. Unable ' 'to proceed.') # Give the SSH daemon time to start up. The network is active, # but SSH is unstable at this point in the hub's provisioning. time.sleep(self.SSH_PROVISION_DELAY) # Enable the hub to do basic routing self.enable_ip_forwarding(hub) return hub
the-stack_0_6064
import textwrap import unittest from stone.backends.js_client import JavascriptClientBackend from test.backend_test_util import _mock_output from stone.ir import Api, ApiNamespace, ApiRoute, Void, Int32 from stone.ir.data_types import Struct MYPY = False if MYPY: import typing # noqa: F401 # pylint: disable=import-error,unused-import,useless-suppression class TestGeneratedJSClient(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestGeneratedJSClient, self).__init__(*args, **kwargs) def _get_api(self): # type () -> Api api = Api(version='0.1b1') api.route_schema = Struct(u'Route', 'stone_cfg', None) route1 = ApiRoute('get_metadata', 1, None) route1.set_attributes(None, ':route:`get_metadata`', Void(), Void(), Void(), {}) route2 = ApiRoute('get_metadata', 2, None) route2.set_attributes(None, ':route:`get_metadata:2`', Void(), Int32(), Void(), {}) route3 = ApiRoute('get_metadata', 3, None) route3.set_attributes(None, ':route:`get_metadata:3`', Int32(), Int32(), Void(), {}) ns = ApiNamespace('files') ns.add_route(route1) ns.add_route(route2) ns.add_route(route3) api.namespaces[ns.name] = ns return api, ns def test_route_versions(self): # type: () -> None api, _ = self._get_api() backend = JavascriptClientBackend( target_folder_path='output', args=['files', '-c', 'DropboxBase']) get_result = _mock_output(backend) backend.generate(api) result = get_result() expected = textwrap.dedent('''\ // Auto-generated by Stone, do not modify. var routes = {}; /** * get_metadata * @function DropboxBase#filesGetMetadata * @returns {Promise.<void, Error.<void>>} */ routes.filesGetMetadata = function () { return this.request("files/get_metadata", null); }; /** * get_metadata_v2 * @function DropboxBase#filesGetMetadataV2 * @returns {Promise.<number, Error.<void>>} */ routes.filesGetMetadataV2 = function () { return this.request("files/get_metadata_v2", null); }; /** * get_metadata_v3 * @function DropboxBase#filesGetMetadataV3 * @arg {number} arg - The request parameters. * @returns {Promise.<number, Error.<void>>} */ routes.filesGetMetadataV3 = function (arg) { return this.request("files/get_metadata_v3", arg); }; export { routes }; ''') assert result == expected def test_wrap_response_in_flag(self): # type: () -> None api, _ = self._get_api() backend = JavascriptClientBackend( target_folder_path='output', args=['files', '-c', 'DropboxBase', '--wrap-response-in', 'DropboxResponse']) get_result = _mock_output(backend) backend.generate(api) result = get_result() expected = textwrap.dedent('''\ // Auto-generated by Stone, do not modify. var routes = {}; /** * get_metadata * @function DropboxBase#filesGetMetadata * @returns {Promise.<DropboxResponse<void>, Error.<void>>} */ routes.filesGetMetadata = function () { return this.request("files/get_metadata", null); }; /** * get_metadata_v2 * @function DropboxBase#filesGetMetadataV2 * @returns {Promise.<DropboxResponse<number>, Error.<void>>} */ routes.filesGetMetadataV2 = function () { return this.request("files/get_metadata_v2", null); }; /** * get_metadata_v3 * @function DropboxBase#filesGetMetadataV3 * @arg {number} arg - The request parameters. * @returns {Promise.<DropboxResponse<number>, Error.<void>>} */ routes.filesGetMetadataV3 = function (arg) { return this.request("files/get_metadata_v3", arg); }; export { routes }; ''') assert result == expected def test_route_with_version_number_conflict(self): # type: () -> None api, ns = self._get_api() # Add a conflicting route route3 = ApiRoute('get_metadata_v2', 1, None) route3.set_attributes(None, None, Void(), Int32(), Void(), {}) ns.add_route(route3) backend = JavascriptClientBackend( target_folder_path='output', args=['files', '-c', 'DropboxBase']) with self.assertRaises(RuntimeError) as cm: backend.generate(api) self.assertTrue(str(cm.exception).startswith( 'There is a name conflict between'))
the-stack_0_6065
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """lbaas version 2 api Revision ID: lbaasv2 Revises: start_neutron_lbaas Create Date: 2014-06-18 10:50:15.606420 """ # revision identifiers, used by Alembic. revision = 'lbaasv2' down_revision = 'start_neutron_lbaas' from alembic import op import sqlalchemy as sa listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP", name="listener_protocolsv2") pool_protocols = sa.Enum("HTTP", "HTTPS", "TCP", name="pool_protocolsv2") sesssionpersistences_type = sa.Enum("SOURCE_IP", "HTTP_COOKIE", "APP_COOKIE", name="sesssionpersistences_typev2") lb_algorithms = sa.Enum("ROUND_ROBIN", "LEAST_CONNECTIONS", "SOURCE_IP", name="lb_algorithmsv2") healthmonitors_type = sa.Enum("PING", "TCP", "HTTP", "HTTPS", name="healthmonitors_typev2") def upgrade(): op.create_table( u'lbaas_healthmonitors', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'type', healthmonitors_type, nullable=False), sa.Column(u'delay', sa.Integer(), nullable=False), sa.Column(u'timeout', sa.Integer(), nullable=False), sa.Column(u'max_retries', sa.Integer(), nullable=False), sa.Column(u'http_method', sa.String(16), nullable=True), sa.Column(u'url_path', sa.String(255), nullable=True), sa.Column(u'expected_codes', sa.String(64), nullable=True), sa.Column(u'status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id') ) op.create_table( u'lbaas_pools', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'protocol', pool_protocols, nullable=False), sa.Column(u'lb_algorithm', lb_algorithms, nullable=False), sa.Column(u'healthmonitor_id', sa.String(36), nullable=True), sa.Column(u'status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.UniqueConstraint(u'healthmonitor_id'), sa.ForeignKeyConstraint([u'healthmonitor_id'], [u'lbaas_healthmonitors.id']) ) op.create_table( u'lbaas_sessionpersistences', sa.Column(u'pool_id', sa.String(36), nullable=False), sa.Column(u'type', sesssionpersistences_type, nullable=False), sa.Column(u'cookie_name', sa.String(1024), nullable=True), sa.ForeignKeyConstraint([u'pool_id'], [u'lbaas_pools.id']), sa.PrimaryKeyConstraint(u'pool_id') ) op.create_table( u'lbaas_members', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'pool_id', sa.String(36), nullable=False), sa.Column(u'subnet_id', sa.String(36), nullable=True), sa.Column(u'address', sa.String(64), nullable=False), sa.Column(u'protocol_port', sa.Integer(), nullable=False), sa.Column(u'weight', sa.Integer(), nullable=True), sa.Column(u'status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.ForeignKeyConstraint([u'pool_id'], [u'lbaas_pools.id']), sa.UniqueConstraint(u'pool_id', u'address', u'protocol_port', name=u'uniq_pool_address_port_v2') ) op.create_table( u'lbaas_loadbalancers', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'vip_port_id', sa.String(36), nullable=True), sa.Column(u'vip_subnet_id', sa.String(36), nullable=False), sa.Column(u'vip_address', sa.String(36), nullable=True), sa.Column(u'status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint([u'vip_port_id'], [u'ports.id'], name=u'fk_lbaas_loadbalancers_ports_id'), sa.PrimaryKeyConstraint(u'id') ) op.create_table( u'lbaas_listeners', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'protocol', listener_protocols, nullable=False), sa.Column(u'protocol_port', sa.Integer(), nullable=False), sa.Column(u'connection_limit', sa.Integer(), nullable=True), sa.Column(u'loadbalancer_id', sa.String(36), nullable=True), sa.Column(u'default_pool_id', sa.String(36), nullable=True), sa.Column(u'status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint([u'loadbalancer_id'], [u'lbaas_loadbalancers.id']), sa.ForeignKeyConstraint([u'default_pool_id'], [u'lbaas_pools.id']), sa.UniqueConstraint(u'default_pool_id'), sa.UniqueConstraint(u'loadbalancer_id', u'protocol_port', name=u'uniq_loadbalancer_listener_port'), sa.PrimaryKeyConstraint(u'id') ) op.create_table( u'lbaas_loadbalancer_statistics', sa.Column(u'loadbalancer_id', sa.String(36), nullable=False), sa.Column(u'bytes_in', sa.BigInteger(), nullable=False), sa.Column(u'bytes_out', sa.BigInteger(), nullable=False), sa.Column(u'active_connections', sa.BigInteger(), nullable=False), sa.Column(u'total_connections', sa.BigInteger(), nullable=False), sa.PrimaryKeyConstraint(u'loadbalancer_id'), sa.ForeignKeyConstraint([u'loadbalancer_id'], [u'lbaas_loadbalancers.id']) ) def downgrade(): op.drop_table(u'lbaas_loadbalancer_statistics') op.drop_table(u'lbaas_listeners') listener_protocols.drop(op.get_bind(), checkfirst=False) op.drop_table(u'lbaas_loadbalancers') op.drop_table(u'lbaas_members') op.drop_table(u'lbaas_sessionpersistences') sesssionpersistences_type.drop(op.get_bind(), checkfirst=False) op.drop_table(u'lbaas_pools') pool_protocols.drop(op.get_bind(), checkfirst=False) lb_algorithms.drop(op.get_bind(), checkfirst=False) op.drop_table(u'lbaas_healthmonitors') healthmonitors_type.drop(op.get_bind(), checkfirst=False)
the-stack_0_6067
import json import os import sys import argparse import shutil import logging import re from zipfile import ZipFile from google.cloud.storage import Blob, Bucket from Tests.scripts.utils.log_util import install_logging from Tests.Marketplace.marketplace_services import init_storage_client, Pack, \ load_json, store_successful_and_failed_packs_in_ci_artifacts, \ get_upload_data from Tests.Marketplace.marketplace_constants import PackStatus, GCPConfig, BucketUploadFlow, PACKS_FOLDER, \ PACKS_FULL_PATH, IGNORED_FILES from Tests.Marketplace.upload_packs import extract_packs_artifacts, print_packs_summary, get_packs_summary LATEST_ZIP_REGEX = re.compile(fr'^{GCPConfig.GCS_PUBLIC_URL}/[\w./-]+/content/packs/([A-Za-z0-9-_.]+/\d+\.\d+\.\d+/' r'[A-Za-z0-9-_.]+\.zip$)') def get_pack_names(target_packs: str) -> set: """ Retrieves the paths of all relevant packs (that aren't ignored) Args: target_packs (str): csv packs names or `All` for all available packs in content. Returns: The list of paths of the packs """ if target_packs.lower() == "all": if os.path.exists(PACKS_FULL_PATH): all_packs = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES} logging.info(f"Number of selected packs to upload is: {len(all_packs)}") # return all available packs names return all_packs else: logging.error(f"Folder {PACKS_FOLDER} was not found at the following path: {PACKS_FULL_PATH}") sys.exit(1) elif target_packs and isinstance(target_packs, str): modified_packs = {p.strip() for p in target_packs.split(',') if p not in IGNORED_FILES} logging.info(f"Number of selected packs to upload is: {len(modified_packs)}") # return only packs from csv list return modified_packs else: logging.error("Not correct usage of flag -p. Please check help section of upload packs script.") sys.exit(1) def copy_index(index_folder_path: str, build_index_blob: Blob, build_index_generation: str, production_bucket: Bucket, build_bucket: Bucket, storage_base_path: str, build_bucket_base_path: str): """ Copies the build bucket index to the production bucket index path. Args: index_folder_path (str): index folder full path. build_index_blob (Blob): google cloud storage object that represents build index.zip blob. build_index_generation (str): downloaded build index generation. production_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where index is copied to. build_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where index is copied from. storage_base_path (str): the path to upload the index to. build_bucket_base_path (str): the path in the build bucket of the index. """ try: build_index_blob.reload() build_current_index_generation = build_index_blob.generation # disabling caching for prod index blob prod_index_storage_path = os.path.join(storage_base_path, f"{GCPConfig.INDEX_NAME}.zip") prod_index_blob = production_bucket.blob(prod_index_storage_path) prod_index_blob.cache_control = "no-cache,max-age=0" prod_index_json_storage_path = os.path.join(storage_base_path, f"{GCPConfig.INDEX_NAME}.json") prod_index_json_blob = production_bucket.blob(prod_index_json_storage_path) prod_index_json_blob.cache_control = "no-cache,max-age=0" if build_current_index_generation == build_index_generation: copied_index = build_bucket.copy_blob( blob=build_index_blob, destination_bucket=production_bucket, new_name=prod_index_storage_path ) if copied_index.exists(): logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.") else: logging.error("Failed copying index.zip from build index - blob does not exist.") sys.exit(1) copied_index_json_blob = build_bucket.blob( os.path.join(build_bucket_base_path, f"{GCPConfig.INDEX_NAME}.json") ) copied_index_json = build_bucket.copy_blob( blob=copied_index_json_blob, destination_bucket=production_bucket, new_name=prod_index_json_storage_path ) if copied_index_json.exists(): logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.json to storage.") else: logging.error("Failed copying index.json from build index - blob does not exist.") sys.exit(1) else: logging.error(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation") logging.error(f"Downloaded build index generation: {build_index_generation}") logging.error(f"Current build index generation: {build_current_index_generation}") sys.exit(1) except Exception as e: logging.exception(f"Failed copying {GCPConfig.INDEX_NAME}. Additional Info: {str(e)}") sys.exit(1) finally: shutil.rmtree(index_folder_path) def upload_core_packs_config(production_bucket: Bucket, build_number: str, extract_destination_path: str, build_bucket: Bucket, storage_base_path: str, build_bucket_base_path: str): """Uploads the corepacks.json file to the target bucket. This files contains all of the server's core packs, under the key corepacks, and specifies which core packs should be upgraded upon XSOAR upgrade, under the key upgradeCorePacks. Args: production_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded. build_number (str): CircleCI build number. extract_destination_path (str): Full path of folder to extract the corepacks file build_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is downloaded from. storage_base_path (str): the path to upload the corepacks.json to. build_bucket_base_path (str): the path in the build bucket of the corepacks.json. """ # download the corepacks.json stored in the build bucket to temp dir build_corepacks_file_path = os.path.join(build_bucket_base_path, GCPConfig.CORE_PACK_FILE_NAME) build_corepacks_blob = build_bucket.blob(build_corepacks_file_path) if not build_corepacks_blob.exists(): logging.critical(f"{GCPConfig.CORE_PACK_FILE_NAME} is missing in {build_bucket.name} bucket, exiting...") sys.exit(1) temp_corepacks_file_path = os.path.join(extract_destination_path, GCPConfig.CORE_PACK_FILE_NAME) build_corepacks_blob.download_to_filename(temp_corepacks_file_path) corepacks_file = load_json(temp_corepacks_file_path) # change the storage paths to the prod bucket corepacks_list = corepacks_file.get('corePacks', []) try: corepacks_list = [os.path.join(GCPConfig.GCS_PUBLIC_URL, production_bucket.name, storage_base_path, LATEST_ZIP_REGEX.findall(corepack_path)[0]) for corepack_path in corepacks_list] except IndexError: corepacks_list_str = '\n'.join(corepacks_list) logging.exception(f"GCS paths in build bucket corepacks.json file are not of format: " f"{GCPConfig.GCS_PUBLIC_URL}/<BUCKET_NAME>/.../content/packs/...\n" f"List of build bucket corepacks paths:\n{corepacks_list_str}") sys.exit(1) # construct core pack data with public gcs urls core_packs_data = { 'corePacks': corepacks_list, 'upgradeCorePacks': corepacks_file.get('upgradeCorePacks', []), 'buildNumber': build_number } # upload core pack json file to gcs prod_corepacks_file_path = os.path.join(storage_base_path, GCPConfig.CORE_PACK_FILE_NAME) prod_corepacks_blob = production_bucket.blob(prod_corepacks_file_path) prod_corepacks_blob.upload_from_string(json.dumps(core_packs_data, indent=4)) logging.success(f"Finished uploading {GCPConfig.CORE_PACK_FILE_NAME} to storage.") def download_and_extract_index(build_bucket: Bucket, extract_destination_path: str, build_bucket_base_path: str): """Downloads and extracts production and build indexes zip from cloud storage. Args: build_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where build index.zip is stored. extract_destination_path (str): the full path of extract folder. build_bucket_base_path (str): the path in the build bucket of the index. Returns: str: extracted build index folder full path. Blob: google cloud storage object that represents prod index.zip blob. Blob: google cloud storage object that represents build index.zip blob. str: downloaded prod index generation. str: downloaded build index generation. """ build_index_storage_path = os.path.join(build_bucket_base_path, f"{GCPConfig.INDEX_NAME}.zip") download_build_index_path = os.path.join(extract_destination_path, f"{GCPConfig.INDEX_NAME}.zip") build_index_blob = build_bucket.blob(build_index_storage_path) build_index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME) if not os.path.exists(extract_destination_path): os.mkdir(extract_destination_path) if not build_index_blob.exists(): logging.error(f"No build index was found in path: {build_index_storage_path}") sys.exit(1) build_index_blob.reload() build_index_generation = build_index_blob.generation build_index_blob.download_to_filename(download_build_index_path, if_generation_match=build_index_generation) if os.path.exists(download_build_index_path): with ZipFile(download_build_index_path, 'r') as index_zip: index_zip.extractall(extract_destination_path) if not os.path.exists(build_index_folder_path): logging.error(f"Failed creating build {GCPConfig.INDEX_NAME} folder with extracted data.") sys.exit(1) os.remove(download_build_index_path) logging.success(f"Finished downloading and extracting build {GCPConfig.INDEX_NAME} file to " f"{extract_destination_path}") return build_index_folder_path, build_index_blob, build_index_generation else: logging.error(f"Failed to download build {GCPConfig.INDEX_NAME}.zip file from cloud storage.") sys.exit(1) def copy_id_set(production_bucket: Bucket, build_bucket: Bucket, storage_base_path: str, build_bucket_base_path: str): """ Copies the id_set.json artifact from the build bucket to the production bucket. Args: production_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where id_set is copied to. build_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where id_set is copied from. storage_base_path (str): the path to upload the id_set.json to. build_bucket_base_path (str): the path in the build bucket of the id_set.json. """ build_id_set_path = os.path.join(os.path.dirname(build_bucket_base_path), 'id_set.json') build_id_set_blob = build_bucket.blob(build_id_set_path) if not build_id_set_blob.exists(): logging.error(f"id_set.json file does not exists in build bucket in path: {build_id_set_path}") sys.exit(1) prod_id_set_path = os.path.join(os.path.dirname(storage_base_path), 'id_set.json') try: copied_blob = build_bucket.copy_blob( blob=build_id_set_blob, destination_bucket=production_bucket, new_name=prod_id_set_path ) if not copied_blob.exists(): logging.error(f"Failed to upload id_set.json to {prod_id_set_path}") sys.exit(1) else: logging.success("Finished uploading id_set.json to storage.") except Exception as e: logging.exception(f"Failed copying ID Set. Additional Info: {str(e)}") sys.exit(1) def verify_copy(successful_packs: list, pc_successful_packs_dict: dict): """ Verify that all uploaded packs from Prepare were copied & verify that no packs were mistakenly copied Args: successful_packs: The packs that were copied successfully pc_successful_packs_dict: The pack that were uploaded successfully in Prepare Content """ pc_successful_packs_names = {*pc_successful_packs_dict} successful_packs_names = {pack.name for pack in successful_packs} not_uploaded = [pack for pack in pc_successful_packs_names if pack not in successful_packs_names] mistakenly_uploaded = [pack for pack in successful_packs_names if pack not in pc_successful_packs_names] error_str = "Mismatch in Prepare Content successful packs and Upload successful packs\n" error_str += f"Packs not copied: {', '.join(not_uploaded)}\n" if not_uploaded else "" error_str += f"Packs mistakenly copied: {', '.join(mistakenly_uploaded)}\n" if mistakenly_uploaded else "" assert not not_uploaded and not mistakenly_uploaded, error_str def check_if_need_to_upload(pc_successful_packs_dict: dict, pc_failed_packs_dict: dict, pc_successful_private_packs_dict: dict, pc_uploaded_images: dict): """ If the three dicts are empty then no upload was done in Prepare Content step, so we need to skip uploading Args: pc_successful_packs_dict: The successful packs dict pc_failed_packs_dict: The failed packs dict pc_successful_private_packs_dict : The successful private packs dict pc_uploaded_images: The image data dict """ if not pc_successful_packs_dict and not pc_failed_packs_dict and not pc_successful_private_packs_dict and not \ pc_uploaded_images: logging.warning("Production bucket is updated with origin/master.") logging.warning("Skipping Upload To Marketplace Storage Step.") sys.exit(0) def options_handler(): """ Validates and parses script arguments. Returns: Namespace: Parsed arguments object. """ parser = argparse.ArgumentParser(description="Store packs in cloud storage.") # disable-secrets-detection-start parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True) parser.add_argument('-e', '--extract_path', help="Full path of folder to extract wanted packs", required=True) parser.add_argument('-pb', '--production_bucket_name', help="Production bucket name", required=True) parser.add_argument('-bb', '--build_bucket_name', help="CircleCI Build bucket name", required=True) parser.add_argument('-s', '--service_account', help=("Path to gcloud service account, is for circleCI usage. " "For local development use your personal account and " "authenticate using Google Cloud SDK by running: " "`gcloud auth application-default login` and leave this parameter blank. " "For more information go to: " "https://googleapis.dev/python/google-api-core/latest/auth.html"), required=False) parser.add_argument('-p', '--pack_names', help=("Target packs to upload to gcs. Optional values are: `All`" " or csv list of packs " "Default is set to `All`"), required=False, default="All") parser.add_argument('-n', '--ci_build_number', help="CircleCi build number (will be used as hash revision at index file)", required=True) parser.add_argument('-c', '--circle_branch', help="CircleCi branch of current build", required=True) parser.add_argument('-pbp', '--production_base_path', help="Production base path of the directory to upload to.", required=False) # disable-secrets-detection-end return parser.parse_args() def main(): install_logging('Copy_and_Upload_Packs.log') options = options_handler() packs_artifacts_path = options.artifacts_path extract_destination_path = options.extract_path production_bucket_name = options.production_bucket_name build_bucket_name = options.build_bucket_name service_account = options.service_account build_number = options.ci_build_number circle_branch = options.circle_branch production_base_path = options.production_base_path target_packs = options.pack_names # Google cloud storage client initialized storage_client = init_storage_client(service_account) production_bucket = storage_client.bucket(production_bucket_name) build_bucket = storage_client.bucket(build_bucket_name) # Initialize build and prod base paths build_bucket_path = os.path.join(GCPConfig.BUILD_PATH_PREFIX, circle_branch, build_number) build_bucket_base_path = os.path.join(build_bucket_path, GCPConfig.CONTENT_PACKS_PATH) # Relevant when triggering test upload flow if production_bucket_name: GCPConfig.PRODUCTION_BUCKET = production_bucket_name # Download and extract build index from build and prod buckets build_index_folder_path, build_index_blob, build_index_generation = \ download_and_extract_index(build_bucket, extract_destination_path, build_bucket_base_path) # Get the successful and failed packs file from Prepare Content step in Create Instances job if there are packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE) pc_successful_packs_dict, pc_failed_packs_dict, pc_successful_private_packs_dict, \ pc_uploaded_images = get_upload_data(packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING) logging.debug(f"Successful packs from Prepare Content: {pc_successful_packs_dict}") logging.debug(f"Failed packs from Prepare Content: {pc_failed_packs_dict}") logging.debug(f"Successful private packs from Prepare Content: {pc_successful_private_packs_dict}") logging.debug(f"Images from Prepare Content: {pc_uploaded_images}") # Check if needs to upload or not check_if_need_to_upload(pc_successful_packs_dict, pc_failed_packs_dict, pc_successful_private_packs_dict, pc_uploaded_images) # Detect packs to upload pack_names = get_pack_names(target_packs) extract_packs_artifacts(packs_artifacts_path, extract_destination_path) packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names if os.path.exists(os.path.join(extract_destination_path, pack_name))] # Starting iteration over packs for pack in packs_list: # Indicates whether a pack has failed to upload on Prepare Content step task_status, pack_status = pack.is_failed_to_upload(pc_failed_packs_dict) if task_status: pack.status = pack_status pack.cleanup() continue task_status = pack.load_user_metadata() if not task_status: pack.status = PackStatus.FAILED_LOADING_USER_METADATA.name pack.cleanup() continue task_status = pack.copy_integration_images( production_bucket, build_bucket, pc_uploaded_images, production_base_path, build_bucket_base_path) if not task_status: pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name pack.cleanup() continue task_status = pack.copy_author_image( production_bucket, build_bucket, pc_uploaded_images, production_base_path, build_bucket_base_path) if not task_status: pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name pack.cleanup() continue task_status, skipped_pack_uploading = pack.copy_and_upload_to_storage( production_bucket, build_bucket, pc_successful_packs_dict, production_base_path, build_bucket_base_path) if skipped_pack_uploading: pack.status = PackStatus.PACK_ALREADY_EXISTS.name pack.cleanup() continue if not task_status: pack.status = PackStatus.FAILED_UPLOADING_PACK.name pack.cleanup() continue pack.status = PackStatus.SUCCESS.name # upload core packs json to bucket upload_core_packs_config(production_bucket, build_number, extract_destination_path, build_bucket, production_base_path, build_bucket_base_path) # finished iteration over content packs copy_index(build_index_folder_path, build_index_blob, build_index_generation, production_bucket, build_bucket, production_base_path, build_bucket_base_path) # upload id_set.json to bucket copy_id_set(production_bucket, build_bucket, production_base_path, build_bucket_base_path) # get the lists of packs divided by their status successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list) # Store successful and failed packs list in CircleCI artifacts store_successful_and_failed_packs_in_ci_artifacts( packs_results_file_path, BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE, successful_packs, failed_packs, list(pc_successful_private_packs_dict) ) # verify that the successful from Prepare content and are the ones that were copied verify_copy(successful_packs, pc_successful_packs_dict) # summary of packs status print_packs_summary(successful_packs, skipped_packs, failed_packs) if __name__ == '__main__': main()
the-stack_0_6069
# (C) Datadog, Inc. 2019-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os import mock import pytest from datadog_checks.base.utils.common import get_docker_hostname from datadog_checks.dev.kube_port_forward import port_forward from datadog_checks.dev.terraform import terraform_run from .common import ADDL_AGENT_METRICS, AGENT_DEFAULT_METRICS, OPERATOR_AWS_METRICS, OPERATOR_METRICS try: from contextlib import ExitStack except ImportError: from contextlib2 import ExitStack HERE = os.path.dirname(os.path.abspath(__file__)) HOST = get_docker_hostname() AGENT_PORT = 9090 OPERATOR_PORT = 6942 AGENT_URL = "http://{}:{}/metrics".format(HOST, AGENT_PORT) OPERATOR_URL = "http://{}:{}/metrics".format(HOST, OPERATOR_PORT) PORTS = [AGENT_PORT, OPERATOR_PORT] @pytest.fixture(scope='session') def dd_environment(): with terraform_run(os.path.join(HERE, 'terraform')) as outputs: kubeconfig = outputs['kubeconfig']['value'] with ExitStack() as stack: ip_ports = [ stack.enter_context(port_forward(kubeconfig, 'cilium', 'cilium-operator', port)) for port in PORTS ] instances = { 'instances': [ { 'agent_endpoint': 'http://{}:{}/metrics'.format(*ip_ports[0]), 'metrics': ADDL_AGENT_METRICS + AGENT_DEFAULT_METRICS, }, { 'operator_endpoint': 'http://{}:{}/metrics'.format(*ip_ports[1]), 'metrics': OPERATOR_METRICS + OPERATOR_AWS_METRICS, }, ] } yield instances @pytest.fixture(scope="session") def agent_instance(): return {'agent_endpoint': AGENT_URL, 'tags': ['pod_test']} @pytest.fixture def operator_instance(): return {'operator_endpoint': OPERATOR_URL, 'tags': ['operator_test']} @pytest.fixture() def mock_agent_data(): f_name = os.path.join(os.path.dirname(__file__), 'fixtures', 'agent_metrics.txt') with open(f_name, 'r') as f: text_data = f.read() with mock.patch( 'requests.get', return_value=mock.MagicMock( status_code=200, iter_lines=lambda **kwargs: text_data.split("\n"), headers={'Content-Type': "text/plain"} ), ): yield @pytest.fixture() def mock_operator_data(): f_name = os.path.join(os.path.dirname(__file__), 'fixtures', 'operator_metrics.txt') with open(f_name, 'r') as f: text_data = f.read() with mock.patch( 'requests.get', return_value=mock.MagicMock( status_code=200, iter_lines=lambda **kwargs: text_data.split("\n"), headers={'Content-Type': "text/plain"} ), ): yield
the-stack_0_6072
import logging from django.apps import apps from django.db.utils import OperationalError, ProgrammingError from django.utils import six from django.utils.translation import ugettext_lazy as _ from mayan.apps.common.class_mixins import AppsModuleLoaderMixin from mayan.apps.common.classes import PropertyHelper from mayan.apps.templating.classes import Template from .exceptions import WorkflowStateActionError __all__ = ('WorkflowAction',) logger = logging.getLogger(name=__name__) class DocumentStateHelper(PropertyHelper): @staticmethod @property def constructor(*args, **kwargs): return DocumentStateHelper(*args, **kwargs) def get_result(self, name): return self.instance.workflows.get(workflow__internal_name=name) class WorkflowActionMetaclass(type): _registry = {} def __new__(mcs, name, bases, attrs): new_class = super(WorkflowActionMetaclass, mcs).__new__( mcs, name, bases, attrs ) if not new_class.__module__ == __name__: mcs._registry[ '{}.{}'.format(new_class.__module__, name) ] = new_class return new_class class WorkflowActionBase(AppsModuleLoaderMixin): fields = () class WorkflowAction( six.with_metaclass(WorkflowActionMetaclass, WorkflowActionBase) ): _loader_module_name = 'workflow_actions' previous_dotted_paths = () @classmethod def load_modules(cls): super().load_modules() for action_class in WorkflowAction.get_all(): action_class.migrate() @classmethod def clean(cls, request, form_data=None): return form_data @classmethod def get(cls, name): return cls._registry[name] @classmethod def get_all(cls): return sorted(cls._registry.values(), key=lambda x: x.label) @classmethod def get_choices(cls): apps_name_map = { app.name: app for app in apps.get_app_configs() } # Match each workflow action to an app apps_workflow_action_map = {} for klass in WorkflowAction.get_all(): for app_name, app in apps_name_map.items(): if klass.__module__.startswith(app_name): apps_workflow_action_map.setdefault(app, []) apps_workflow_action_map[app].append((klass.id(), klass.label)) result = [ (app.verbose_name, workflow_actions) for app, workflow_actions in apps_workflow_action_map.items() ] # Sort by app, then by workflow action return sorted(result, key=lambda x: (x[0], x[1])) @classmethod def id(cls): return '{}.{}'.format(cls.__module__, cls.__name__) @classmethod def migrate(cls): WorkflowStateAction = apps.get_model( app_label='document_states', model_name='WorkflowStateAction' ) for previous_dotted_path in cls.previous_dotted_paths: try: WorkflowStateAction.objects.filter( action_path=previous_dotted_path ).update(action_path=cls.id()) except (OperationalError, ProgrammingError): # Ignore errors during the database migration and # quit further attempts. return def __init__(self, form_data=None): self.form_data = form_data def get_form_schema(self, workflow_state, request=None): result = { 'fields': self.fields or {}, 'media': getattr(self, 'media', {}), 'widgets': getattr(self, 'widgets', {}), } if hasattr(self, 'field_order'): result['field_order'] = self.field_order return result def render_field(self, field_name, context): try: result = Template( template_string=self.form_data.get(field_name, '') ).render( context=context ) except Exception as exception: raise WorkflowStateActionError( _('%(field_name)s template error: %(exception)s') % { 'field_name': field_name, 'exception': exception } ) logger.debug('%s template result: %s', field_name, result) return result
the-stack_0_6073
# Gamma is a discrete RandomVariable that represents # the instantaneous values of a model parameter # to be embedded into continuous space # parameters: # # stencil : list of values that the parameter takes # alphas: probabilities of taking each value. # For example, stencil = [2, 3] and alphas=[0.2, 0.8] # means the random variable takes value 3 with prob 0.2 # and the value 3 with prob 0.8. import numpy as np import math import random class Gamma(): def __init__(self, stencil, alphas): self.stencil = stencil self.alphas = alphas assert(len(stencil)>0) assert(len(alphas)==len(stencil)) assert(sum(alphas)<=1.0+1e-6)#all probabilities should sum to 1 #instantaneous and mean values self.value = self.stencil[0] self.mean_value=sum([(stencil[i]*alphas[i]) for i in range(len(stencil))]) #update and return the instantaneous value: def get(self): v = np.random.choice(self.stencil,p=self.alphas) self.value=v return v def Test(): gamma = Gamma([2,3,4],[0.4,0.4,0.2]) for i in range(20): print (gamma.get()) print("Mean=", gamma.mean_value)
the-stack_0_6077
# # Copyright 2018 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import time from itertools import chain from pandas import DatetimeIndex from pandas.tseries.holiday import ( GoodFriday, USLaborDay, USPresidentsDay, USThanksgivingDay, ) from pytz import timezone from .trading_calendar import TradingCalendar, HolidayCalendar from .us_holidays import ( USNewYearsDay, USMartinLutherKingJrAfter1998, USMemorialDay, USIndependenceDay, Christmas, MonTuesThursBeforeIndependenceDay, FridayAfterIndependenceDayExcept2013, USBlackFridayBefore1993, USBlackFridayInOrAfter1993, September11Closings, HurricaneSandyClosings, USNationalDaysofMourning, ChristmasEveBefore1993, ChristmasEveInOrAfter1993, ) # Useful resources for making changes to this file: # http://www.nyse.com/pdfs/closings.pdf # http://www.stevemorse.org/jcal/whendid.html class XNYSExchangeCalendar(TradingCalendar): """ Exchange calendar for the New York Stock Exchange (XNYS). Open Time: 9:31 AM, US/Eastern Close Time: 4:00 PM, US/Eastern Regularly-Observed Holidays: - New Years Day (observed on monday when Jan 1 is a Sunday) - Martin Luther King Jr. Day (3rd Monday in January, only after 1998) - Washington's Birthday (aka President's Day, 3rd Monday in February) - Good Friday (two days before Easter Sunday) - Memorial Day (last Monday in May) - Independence Day (observed on the nearest weekday to July 4th) - Labor Day (first Monday in September) - Thanksgiving (fourth Thursday in November) - Christmas (observed on nearest weekday to December 25) NOTE: The NYSE does not observe the following US Federal Holidays: - Columbus Day - Veterans Day Regularly-Observed Early Closes: - July 3rd (Mondays, Tuesdays, and Thursdays, 1995 onward) - July 5th (Fridays, 1995 onward, except 2013) - Christmas Eve (except on Fridays, when the exchange is closed entirely) - Day After Thanksgiving (aka Black Friday, observed from 1992 onward) NOTE: Until 1993, the standard early close time for the NYSE was 2:00 PM. From 1993 onward, it has been 1:00 PM. Additional Irregularities: - Closed from 9/11/2001 to 9/16/2001 due to terrorist attacks in NYC. - Closed on 10/29/2012 and 10/30/2012 due to Hurricane Sandy. - Closed on 4/27/1994 due to Richard Nixon's death. - Closed on 6/11/2004 due to Ronald Reagan's death. - Closed on 1/2/2007 due to Gerald Ford's death. - Closed at 1:00 PM on Wednesday, July 3rd, 2013 - Closed at 1:00 PM on Friday, December 31, 1999 - Closed at 1:00 PM on Friday, December 26, 1997 - Closed at 1:00 PM on Friday, December 26, 2003 NOTE: The exchange was **not** closed early on Friday December 26, 2008, nor was it closed on Friday December 26, 2014. The next Thursday Christmas will be in 2025. If someone is still maintaining this code in 2025, then we've done alright...and we should check if it's a half day. """ regular_early_close = time(13) name = 'XNYS' tz = timezone('US/Eastern') open_times = ( (None, time(9, 31)), ) close_times = ( (None, time(16)), ) @property def regular_holidays(self): return HolidayCalendar([ USNewYearsDay, USMartinLutherKingJrAfter1998, USPresidentsDay, GoodFriday, USMemorialDay, USIndependenceDay, USLaborDay, USThanksgivingDay, Christmas, ]) @property def adhoc_holidays(self): return list(chain( September11Closings, HurricaneSandyClosings, USNationalDaysofMourning, )) @property def special_closes(self): return [ (self.regular_early_close, HolidayCalendar([ MonTuesThursBeforeIndependenceDay, FridayAfterIndependenceDayExcept2013, USBlackFridayInOrAfter1993, ChristmasEveInOrAfter1993 ])), (time(14), HolidayCalendar([ ChristmasEveBefore1993, USBlackFridayBefore1993, ])), ] @property def special_closes_adhoc(self): return [ ( self.regular_early_close, DatetimeIndex( [ '1997-12-26', '1999-12-31', '2003-12-26', '2013-07-03', ], tz='UTC', ) ) ]
the-stack_0_6078
# Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import unittest import numpy as np from extensions.ops.reorgyolo import ReorgYoloOp from mo.front.common.extractors.utils import layout_attrs from mo.graph.graph import Node from unit_tests.utils.graph import build_graph nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'}, 'reorg': {'type': 'ReorgYolo', 'kind': 'op'}, 'node_3': {'type': 'Identity', 'kind': 'op'}, 'op_output': { 'kind': 'op', 'op': 'Result'} } def calculate_reorgyolo_output(input, stride): output = np.full_like(input, -1, dtype=np.int64) output[0] = input[0] output[1] = input[1] * stride ** 2 output[2] = np.round(input[2] / stride) output[3] = np.round(input[3] / stride) return output class TestReorgYOLO(unittest.TestCase): def test_reorgyolo_infer(self): graph = build_graph(nodes_attributes, [('node_1', 'reorg'), ('reorg', 'node_3'), ('node_3', 'op_output') ], {'node_3': {'shape': None}, 'node_1': {'shape': np.array([1, 3, 227, 227])}, 'reorg': {'stride': 2, **layout_attrs()} }) reorg_node = Node(graph, 'reorg') ReorgYoloOp.reorgyolo_infer(reorg_node) exp_shape = calculate_reorgyolo_output(np.array([1, 3, 227, 227]), 2) res_shape = graph.node['node_3']['shape'] for i in range(0, len(exp_shape)): self.assertEqual(exp_shape[i], res_shape[i])
the-stack_0_6079
import numpy import theano from theano import tensor from theano.tests.breakpoint import PdbBreakpoint from theano.tests import unittest_tools as utt from theano.tensor.tests import test_basic import theano.sandbox.gpuarray from .. import basic_ops from ..type import GpuArrayType, gpuarray_shared_constructor, get_context from ..basic_ops import GpuAlloc, GpuReshape, GpuFromHost, host_from_gpu from ..elemwise import GpuCAReduceCuda, GpuCAReduceCPY, GpuElemwise from ..subtensor import GpuSubtensor from .config import mode_with_gpu, test_ctx_name def test_local_assert(): x = theano.tensor.fmatrix() a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any()) f = theano.function([x], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)] assert len(a_op) == 1 assert isinstance(a_op[0].inputs[0].type, GpuArrayType) def test_local_remove_all_assert(): x = theano.tensor.fmatrix() a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any()) # By default `unsafe` should not be there f = theano.function([x], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)] assert len(a_op) == 1 # Put `unsafe` f = theano.function([x], a, mode=mode_with_gpu.including('unsafe')) topo = f.maker.fgraph.toposort() a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)] assert len(a_op) == 0 # Remove `unsafe` f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe')) topo = f.maker.fgraph.toposort() a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)] assert len(a_op) == 1 def test_local_gpu_contiguous_gpu_contiguous(): a = tensor.fmatrix() o1 = basic_ops.gpu_contiguous(a) o2 = basic_ops.gpu_contiguous(o1) f1 = theano.function([a], o1, mode=mode_with_gpu) f2 = theano.function([a], o2, mode=mode_with_gpu) assert 1 == len([node for node in f1.maker.fgraph.toposort() if isinstance(node.op, basic_ops.GpuContiguous)]) assert 1 == len([node for node in f2.maker.fgraph.toposort() if isinstance(node.op, basic_ops.GpuContiguous)]) def test_flatten(): m = theano.tensor.fmatrix() f = theano.function([m], m.flatten(), mode=mode_with_gpu) val = numpy.random.rand(10, 11).astype("float32") res = f(val) utt.assert_allclose(res, val.flatten()) assert res.shape == val.flatten().shape assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()] val = numpy.random.rand(10, 11).astype("float32") res = f(val) utt.assert_allclose(res, val.flatten()) assert res.shape == val.flatten().shape assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()] f = theano.function([m], m.flatten(ndim=2), mode=mode_with_gpu) val = numpy.random.rand(10, 11).astype("float32") res = f(val) utt.assert_allclose(res, val) assert res.shape == val.shape assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()] m = theano.tensor.tensor3() f = theano.function([m], m.flatten(ndim=2), mode=mode_with_gpu) val = numpy.random.rand(10, 11, 12).astype("float32") res = f(val) utt.assert_allclose(res, val.reshape(10, -1)) assert res.shape == val.reshape(10, -1).shape assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()] def test_reduce(): kind = get_context(test_ctx_name).kind for method, param in [('sum', dict(acc_dtype='float32')), ('prod', dict(acc_dtype='float32')), ('max', {}), ('min', {})]: m = theano.tensor.fmatrix() f = theano.function([m], getattr(m, method)(axis=0, **param), mode=mode_with_gpu) val = numpy.random.rand(10, 11).astype("float32") res = f(val) utt.assert_allclose(res, getattr(val, method)(axis=0)) assert res.shape == (11,) topo = f.maker.fgraph.toposort() ops = [type(node.op) for node in topo] if kind == 'opencl' and method in ["max", "min"]: assert not(GpuCAReduceCuda in ops or GpuCAReduceCPY in ops) else: assert GpuCAReduceCuda in ops or GpuCAReduceCPY in ops def test_local_gpualloc_memset_0(): i = theano.tensor.iscalar() z = numpy.zeros((1,), dtype='float32') o = numpy.ones((1,), dtype='float32') ones = numpy.ones((2,), dtype='float32') # Test with 0 a = GpuAlloc(test_ctx_name)(z, i) f = theano.function([i], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, GpuAlloc) and topo[0].op.memset_0 assert (numpy.asarray(f(6)) == 0).all() # Test with 1 a = GpuAlloc(test_ctx_name)(o, i) f = theano.function([i], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, GpuAlloc) assert not topo[0].op.memset_0 assert (numpy.asarray(f(6)) == 1).all() # Test with 1, 1 a = GpuAlloc(test_ctx_name)(ones, i) f = theano.function([i], a, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, GpuAlloc) assert not topo[0].op.memset_0 assert (numpy.asarray(f(2)) == 1).all() def test_rebroadcast(): d = numpy.random.rand(10, 10).astype('float32') v = theano.tensor.fmatrix() up = tensor.unbroadcast(v.sum().dimshuffle('x', 'x'), 0, 1) f = theano.function([v], [up], mode=mode_with_gpu) f(d) topo = f.maker.fgraph.toposort() rebrs = [node for node in topo if isinstance(node.op, tensor.Rebroadcast)] assert len(rebrs) == 1 rebr = rebrs[0] assert isinstance(rebr.inputs[0].type, GpuArrayType) assert isinstance(rebr.outputs[0].type, GpuArrayType) class TestSpecifyShape(test_basic.TestSpecifyShape): mode = mode_with_gpu input_type = GpuArrayType def test_print_op(): """ Test that print ops don't block gpu optimization""" b = tensor.fmatrix() f = theano.function([b], theano.printing.Print()(b) * 2, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, GpuFromHost) assert isinstance(topo[1].op, theano.printing.Print) assert isinstance(topo[2].op, GpuElemwise) assert topo[3].op == host_from_gpu f(numpy.random.random((5, 5)).astype('float32')) def test_pdbbreakpoint_op(): """ Test that PdbBreakpoint ops don't block gpu optimization""" b = tensor.fmatrix() # Create a function composed of a breakpoint followed by # some computation condition = tensor.gt(b.sum(), 0) b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b) output = b_monitored ** 2 f = theano.function([b], output, mode=mode_with_gpu) # Ensure that, in the compiled function, the computation following the # breakpoint has been moved to the gpu. topo = f.maker.fgraph.toposort() assert isinstance(topo[-2].op, GpuElemwise) assert topo[-1].op == host_from_gpu def test_local_gpu_elemwise_careduce(): x = theano.tensor.matrix() o = (x * x).sum() f = theano.function([x], o, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert len(topo) == 3 assert topo[1].op.pre_scalar_op == theano.scalar.sqr f(numpy.random.rand(3, 4).astype(theano.config.floatX)) def test_local_gpu_subtensor(): # Test shared forced on CPU. t = tensor._shared(numpy.zeros(20, "float32")) f = theano.function([], t[3:4], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert any([type(node.op) is tensor.Subtensor for node in topo]) assert not any([isinstance(node.op, GpuSubtensor) for node in topo]) # Test graph input. t = tensor.fmatrix() f = theano.function([t], t[3:4], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert any([type(node.op) is tensor.Subtensor for node in topo]) assert not any([isinstance(node.op, GpuSubtensor) for node in topo]) # Test multiple use of the input # We want the subtensor to be on the GPU to prevent multiple transfer. t = tensor.fmatrix() f = theano.function([t], [t[3:4], t + 1], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert not any([type(node.op) is tensor.Subtensor for node in topo]) assert any([isinstance(node.op, GpuSubtensor) for node in topo]) # Test multiple use of the input + input as output # We want the subtensor to be on the GPU to prevent multiple transfer. t = tensor.fmatrix() f = theano.function([t], [t[3:4], t + 1, t], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert not any([type(node.op) is tensor.Subtensor for node in topo]) assert any([isinstance(node.op, GpuSubtensor) for node in topo]) # Test shared forced on CPU end we do computation on the output of # the subtensor. t = tensor._shared(numpy.zeros(20, "float32")) f = theano.function([], t[3:4] + 1, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert any([type(node.op) is tensor.Subtensor for node in topo]) assert not any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuElemwise) for node in topo]) def test_local_gpu_elemwise(): """ Test local_gpu_elemwise when there is a dtype upcastable to float32 """ a = tensor.bmatrix() b = tensor.fmatrix() c = tensor.fmatrix() a_v = (numpy.random.rand(4, 5) * 10).astype("int8") b_v = (numpy.random.rand(4, 5) * 10).astype("float32") c_v = (numpy.random.rand(4, 5) * 10).astype("float32") # Due to optimization order, this composite is created when all # the op are on the gpu. f = theano.function([a, b, c], a + b + c, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1 assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0 utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v) # Now test with the composite already on the cpu before we move it # to the gpu a_s = theano.scalar.int8() b_s = theano.scalar.float32() c_s = theano.scalar.float32() out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s + c_s]) out_op = tensor.Elemwise(out_s) f = theano.function([a, b, c], out_op(a, b, c), mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1 assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0 utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v) return # Not yet implemeted # Test multiple output a_s = theano.scalar.float32() a = tensor.fmatrix() from theano.scalar.basic import identity out_s = theano.scalar.Composite([a_s, b_s, c_s], [identity(a_s), identity(c_s), identity(b_s)]) outs_op = tensor.Elemwise(out_s) f = theano.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1 assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0 out = f(a_v, b_v, c_v) utt.assert_allclose(out[0], a_v) utt.assert_allclose(out[1], c_v) utt.assert_allclose(out[2], b_v) # Test multiple output out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s, a_s * b_s]) outs_op = tensor.Elemwise(out_s) f = theano.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1 assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0 out = f(a_v, b_v, c_v) utt.assert_allclose(out[0], a_v + b_v) utt.assert_allclose(out[1], a_v * c_v) # Test non-contiguous input c = gpuarray_shared_constructor(numpy.asarray(c_v, dtype='float32')) f = theano.function([a, b], outs_op(a[::2], b[::2], c[::2]), mode=mode_with_gpu) out = f(a_v, b_v) utt.assert_allclose(out[0], a_v[::2] + b_v[::2]) utt.assert_allclose(out[1], a_v[::2] * c_v[::2])
the-stack_0_6080
import math import torch import torch.nn as nn from .utils import to_cpu # This new loss function is based on https://github.com/ultralytics/yolov3/blob/master/utils/loss.py def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9): # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 box2 = box2.T # Get the coordinates of bounding boxes if x1y1x2y2: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] else: # transform from xywh to xyxy b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 # Intersection area inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) # Union Area w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps union = w1 * h1 + w2 * h2 - inter + eps iou = inter / union if GIoU or DIoU or CIoU: # convex (smallest enclosing box) width cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared if DIoU: return iou - rho2 / c2 # DIoU elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi ** 2) * \ torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = v / ((1 + eps) - iou + v) return iou - (rho2 / c2 + v * alpha) # CIoU else: # GIoU https://arxiv.org/pdf/1902.09630.pdf c_area = cw * ch + eps # convex area return iou - (c_area - union) / c_area # GIoU else: return iou # IoU def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 # return positive, negative label smoothing BCE targets return 1.0 - 0.5 * eps, 0.5 * eps class BCEBlurWithLogitsLoss(nn.Module): # BCEwithLogitLoss() with reduced missing label effects. def __init__(self, alpha=0.05): super(BCEBlurWithLogitsLoss, self).__init__() self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() self.alpha = alpha def forward(self, pred, true): loss = self.loss_fcn(pred, true) pred = torch.sigmoid(pred) # prob from logits dx = pred - true # reduce only missing label effects # dx = (pred - true).abs() # reduce missing label and false label effects alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) loss *= alpha_factor return loss.mean() class FocalLoss(nn.Module): # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): super(FocalLoss, self).__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha self.reduction = loss_fcn.reduction self.loss_fcn.reduction = 'none' # required to apply FL to each element def forward(self, pred, true): loss = self.loss_fcn(pred, true) # p_t = torch.exp(-loss) # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py pred_prob = torch.sigmoid(pred) # prob from logits p_t = true * pred_prob + (1 - true) * (1 - pred_prob) alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) modulating_factor = (1.0 - p_t) ** self.gamma loss *= alpha_factor * modulating_factor if self.reduction == 'mean': return loss.mean() elif self.reduction == 'sum': return loss.sum() else: # 'none' return loss class QFocalLoss(nn.Module): # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): super(QFocalLoss, self).__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha self.reduction = loss_fcn.reduction self.loss_fcn.reduction = 'none' # required to apply FL to each element def forward(self, pred, true): loss = self.loss_fcn(pred, true) pred_prob = torch.sigmoid(pred) # prob from logits alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) modulating_factor = torch.abs(true - pred_prob) ** self.gamma loss *= alpha_factor * modulating_factor if self.reduction == 'mean': return loss.mean() elif self.reduction == 'sum': return loss.sum() else: # 'none' return loss def compute_loss(predictions, targets, model): # predictions, targets, model device = targets.device lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) tcls, tbox, indices, anchors = build_targets(predictions, targets, model) # targets # Define criteria BCEcls = nn.BCEWithLogitsLoss( pos_weight=torch.tensor([1.0], device=device)) BCEobj = nn.BCEWithLogitsLoss( pos_weight=torch.tensor([1.0], device=device)) # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 cp, cn = smooth_BCE(eps=0.0) # Focal loss gamma = 0 # focal loss gamma if gamma > 0: BCEcls, BCEobj = FocalLoss(BCEcls, gamma), FocalLoss(BCEobj, gamma) # Losses # layer index, layer predictions for layer_index, layer_predictions in enumerate(predictions): # image, anchor, gridy, gridx b, anchor, grid_j, grid_i = indices[layer_index] tobj = torch.zeros_like(layer_predictions[..., 0], device=device) # target obj num_targets = b.shape[0] # number of targets if num_targets: # prediction subset corresponding to targets ps = layer_predictions[b, anchor, grid_j, grid_i] # Regression pxy = ps[:, :2].sigmoid() pwh = torch.exp(ps[:, 2:4]) * anchors[layer_index] pbox = torch.cat((pxy, pwh), 1) # predicted box # iou(prediction, target) iou = bbox_iou(pbox.T, tbox[layer_index], x1y1x2y2=False, CIoU=True) lbox += (1.0 - iou).mean() # iou loss model.gr = 1 # Objectness tobj[b, anchor, grid_j, grid_i] = \ (1.0 - model.gr) + model.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio # Classification if ps.size(1) - 5 > 1: t = torch.full_like(ps[:, 5:], cn, device=device) # targets t[range(num_targets), tcls[layer_index]] = cp lcls += BCEcls(ps[:, 5:], t) # BCE lobj += BCEobj(layer_predictions[..., 4], tobj) # obj loss lbox *= 0.05 * (3. / 2) lobj *= (3. / 2) lcls *= 0.31 batch_size = tobj.shape[0] # batch size loss = lbox + lobj + lcls return loss * batch_size, to_cpu(torch.cat((lbox, lobj, lcls, loss))) def build_targets(p, targets, model): # Build targets for compute_loss(), input targets(image,class,x,y,w,h) na, nt = 3, targets.shape[0] # number of anchors, targets #TODO tcls, tbox, indices, anch = [], [], [], [] gain = torch.ones(7, device=targets.device) # normalized to gridspace gain ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) # append anchor indices targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) g = 0.5 # bias off = torch.tensor([[0, 0]], device=targets.device).float() * g # offsets for i, yolo_layer in enumerate(model.yolo_layers): anchors = yolo_layer.anchors / yolo_layer.stride gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain # Match targets to anchors t = targets * gain if nt: # Matches r = t[:, :, 4:6] / anchors[:, None] # wh ratio j = torch.max(r, 1. / r).max(2)[0] < 4 # compare #TODO # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) t = t[j] # filter # Offsets gxy = t[:, 2:4] # grid xy gxi = gain[[2, 3]] - gxy # inverse j, k = ((gxy % 1. < g) & (gxy > 1.)).T l, m = ((gxi % 1. < g) & (gxi > 1.)).T j = torch.stack((torch.ones_like(j),)) t = t.repeat((off.shape[0], 1, 1))[j] offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] else: t = targets[0] offsets = 0 # Define b, c = t[:, :2].long().T # image, class gxy = t[:, 2:4] # grid xy gwh = t[:, 4:6] # grid wh gij = (gxy - offsets).long() gi, gj = gij.T # grid xy indices # Append a = t[:, 6].long() # anchor indices # image, anchor, grid indices indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) tbox.append(torch.cat((gxy - gij, gwh), 1)) # box anch.append(anchors[a]) # anchors tcls.append(c) # class return tcls, tbox, indices, anch
the-stack_0_6082
"""A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject """ # Always prefer setuptools over distutils from setuptools import setup, find_packages # To use a consistent encoding from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, "README.rst"), encoding="utf-8") as f: long_description = f.read() setup( name="adafruit-circuitpython-lis3dh", use_scm_version=True, setup_requires=["setuptools_scm"], description="CircuitPython library for LIS3DH accelerometer.", long_description=long_description, long_description_content_type="text/x-rst", # The project's main homepage. url="https://github.com/adafruit/Adafruit_CircuitPython_LIS3DH", # Author details author="Adafruit Industries", author_email="[email protected]", install_requires=["Adafruit-Blinka"], # Choose your license license="MIT", # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Topic :: Software Development :: Libraries", "Topic :: System :: Hardware", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", ], # What does your project relate to? keywords="adafruit accelerometer lis3dh acceleration hardware micropython circuitpython", # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). py_modules=["adafruit_lis3dh"], )
the-stack_0_6083
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builder function for image resizing operations.""" import functools import tensorflow.compat.v1 as tf from object_detection.core import preprocessor from object_detection.protos import image_resizer_pb2 def _tf_resize_method(resize_method): """Maps image resize method from enumeration type to TensorFlow. Args: resize_method: The resize_method attribute of keep_aspect_ratio_resizer or fixed_shape_resizer. Returns: method: The corresponding TensorFlow ResizeMethod. Raises: ValueError: if `resize_method` is of unknown type. """ dict_method = { image_resizer_pb2.BILINEAR: tf.image.ResizeMethod.BILINEAR, image_resizer_pb2.NEAREST_NEIGHBOR: tf.image.ResizeMethod.NEAREST_NEIGHBOR, image_resizer_pb2.BICUBIC: tf.image.ResizeMethod.BICUBIC, image_resizer_pb2.AREA: tf.image.ResizeMethod.AREA } if resize_method in dict_method: return dict_method[resize_method] else: raise ValueError('Unknown resize_method') def build(image_resizer_config): """Builds callable for image resizing operations. Args: image_resizer_config: image_resizer.proto object containing parameters for an image resizing operation. Returns: image_resizer_fn: Callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions. Raises: ValueError: if `image_resizer_config` is of incorrect type. ValueError: if `image_resizer_config.image_resizer_oneof` is of expected type. ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer is used. """ if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer): raise ValueError('image_resizer_config not of type ' 'image_resizer_pb2.ImageResizer.') image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof') if image_resizer_oneof == 'keep_aspect_ratio_resizer': keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer if not (keep_aspect_ratio_config.min_dimension <= keep_aspect_ratio_config.max_dimension): raise ValueError('min_dimension > max_dimension') method = _tf_resize_method(keep_aspect_ratio_config.resize_method) per_channel_pad_value = (0, 0, 0) if keep_aspect_ratio_config.per_channel_pad_value: per_channel_pad_value = tuple(keep_aspect_ratio_config. per_channel_pad_value) image_resizer_fn = functools.partial( preprocessor.resize_to_range, min_dimension=keep_aspect_ratio_config.min_dimension, max_dimension=keep_aspect_ratio_config.max_dimension, method=method, pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension, per_channel_pad_value=per_channel_pad_value) if not keep_aspect_ratio_config.convert_to_grayscale: return image_resizer_fn elif image_resizer_oneof == 'fixed_shape_resizer': fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer method = _tf_resize_method(fixed_shape_resizer_config.resize_method) image_resizer_fn = functools.partial( preprocessor.resize_image, new_height=fixed_shape_resizer_config.height, new_width=fixed_shape_resizer_config.width, method=method) if not fixed_shape_resizer_config.convert_to_grayscale: return image_resizer_fn elif image_resizer_oneof == 'identity_resizer': def image_resizer_fn(image, masks=None, **kwargs): del kwargs if masks is None: return [image, tf.shape(image)] else: return [image, masks, tf.shape(image)] return image_resizer_fn elif image_resizer_oneof == 'conditional_shape_resizer': conditional_shape_resize_config = ( image_resizer_config.conditional_shape_resizer) method = _tf_resize_method(conditional_shape_resize_config.resize_method) if conditional_shape_resize_config.condition == ( image_resizer_pb2.ConditionalShapeResizer.GREATER): image_resizer_fn = functools.partial( preprocessor.resize_to_max_dimension, max_dimension=conditional_shape_resize_config.size_threshold, method=method) elif conditional_shape_resize_config.condition == ( image_resizer_pb2.ConditionalShapeResizer.SMALLER): image_resizer_fn = functools.partial( preprocessor.resize_to_min_dimension, min_dimension=conditional_shape_resize_config.size_threshold, method=method) else: raise ValueError( 'Invalid image resizer condition option for ' 'ConditionalShapeResizer: \'%s\'.' % conditional_shape_resize_config.condition) if not conditional_shape_resize_config.convert_to_grayscale: return image_resizer_fn elif image_resizer_oneof == 'pad_to_multiple_resizer': pad_to_multiple_resizer_config = ( image_resizer_config.pad_to_multiple_resizer) if pad_to_multiple_resizer_config.multiple < 0: raise ValueError('`multiple` for pad_to_multiple_resizer should be > 0.') else: image_resizer_fn = functools.partial( preprocessor.resize_pad_to_multiple, multiple=pad_to_multiple_resizer_config.multiple) if not pad_to_multiple_resizer_config.convert_to_grayscale: return image_resizer_fn else: raise ValueError( 'Invalid image resizer option: \'%s\'.' % image_resizer_oneof) def grayscale_image_resizer(image, masks=None): """Convert to grayscale before applying image_resizer_fn. Args: image: A 3D tensor of shape [height, width, 3] masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A 3D tensor of shape [new_height, new_width, 1], where the image has been resized (with bilinear interpolation) so that min(new_height, new_width) == min_dimension or max(new_height, new_width) == max_dimension. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width]. resized_image_shape: A 1D tensor of shape [3] containing shape of the resized image. """ # image_resizer_fn returns [resized_image, resized_image_shape] if # mask==None, otherwise it returns # [resized_image, resized_mask, resized_image_shape]. In either case, we # only deal with first and last element of the returned list. retval = image_resizer_fn(image, masks) resized_image = retval[0] resized_image_shape = retval[-1] retval[0] = preprocessor.rgb_to_gray(resized_image) retval[-1] = tf.concat([resized_image_shape[:-1], [1]], 0) return retval return functools.partial(grayscale_image_resizer)
the-stack_0_6090
import argparse import datetime import gym import envs import numpy as np import torch import imageio import itertools from rl.model import GaussianPolicy, QNetwork, DeterministicPolicy from transformer_split.util import getGraphStructure from transformer_split.vae_model import VAE_Model from torch.nn import functional as F from transformer_vae import util parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args') parser.add_argument('--env1-name', default="ant", help='Mujoco Gym environment (default: HalfCheetah-v2)') parser.add_argument('--env2-name', default="ant3", help='Mujoco Gym environment (default: HalfCheetah-v2)') parser.add_argument('--model_path', default="runs/2021-05-19_13-46-41_VAE_ant-v0_both/", help='model path') parser.add_argument('--seed', type=int, default=123456, metavar='N', help='random seed (default: 123456)') parser.add_argument('--policy_hidden_size', type=int, default=256, metavar='N', help='hidden size (default: 256)') parser.add_argument('--latent_dim', type=int, default=128, help='Encoder latent dimension') parser.add_argument('--cuda', action="store_true", help='run on CUDA (default: False)') parser.add_argument('--agent_memory1', default='data/ant_jump.memory', help='Path for saved replay memory') parser.add_argument('--video_file_name', default="ant_turn.mp4", help='output file name') parser.add_argument('--msg_dim', type=int, default=32, help='run on CUDA (default: False)') parser.add_argument('--batch_size', type=int, default=1, help='run on CUDA (default: False)') parser.add_argument('--actor_path', help='checkpoint training model every # steps') parser.add_argument('--num_episodes', type=int, default=3, metavar='N', help='maximum number of steps (default: 1000000)') parser.add_argument('--root_size', type=int, default=11, help='root dimension') parser.add_argument('--lr', type=float, default=1e-4, metavar='N', help='random seed (default: 123456)') parser.add_argument( "--transformer_norm", default=0, type=int, help="Use layernorm", ) parser.add_argument( "--beta", type=float, default=.1, help="beta coefficient of KL divergence", ) parser.add_argument( "--gradient_penalty", type=float, default=10, help="beta coefficient of KL divergence", ) parser.add_argument( "--discriminator_limiting_accuracy", type=float, default=0.7, help="beta coefficient of KL divergence", ) parser.add_argument( "--attention_layers", default=3, type=int, help="How many attention layers to stack", ) parser.add_argument( "--attention_heads", default=2, type=int, help="How many attention heads to stack", ) parser.add_argument( "--attention_hidden_size", type=int, default=128, help="Hidden units in an attention block", ) parser.add_argument( "--attention_embedding_size", type=int, default=128, help="Hidden units in an attention block", ) parser.add_argument( "--dropout_rate", type=float, default=0.0, help="How much to drop if drop in transformers", ) args = parser.parse_args() torch.manual_seed(args.seed) np.random.seed(args.seed) # Agent device = torch.device("cuda" if args.cuda else "cpu") env_names = ["ant-v0", "ant3-v0", "ant_a-v0"] train_envs = [gym.make(n) for n in env_names] graphs = [getGraphStructure(e.xml) for e in train_envs] # All environments have the same dimension per limb. num_limbs = len(graphs[0]) #torso + body limbs body_limbs = num_limbs - 1 dim_per_limb = int((train_envs[0].observation_space.shape[0] - args.root_size) / (body_limbs - 1)) max_num_limbs = max(len(g) for g in graphs) args.dim_per_limb = dim_per_limb args.max_num_limbs = max_num_limbs root_dir = util.get_project_root() render_env = train_envs[2] render_topology = graphs[2] render_limbs = len(render_topology) expert_env = train_envs[0] expert_topology = graphs[0] policy = GaussianPolicy( expert_env.observation_space.shape[0], expert_env.action_space.shape[0], args.policy_hidden_size, expert_env.action_space).to(device) policy.load_state_dict(torch.load(args.actor_path)) vae_model = VAE_Model(args) vae_model.load_model(args.model_path) def pad_state(data, state_size, max_num_limbs): max_dim = args.root_size + state_size * (max_num_limbs - 1) output = torch.zeros(max_dim) output[:data.shape[0]] = torch.tensor(data) return output def pad_topology(top, max_num_limbs): topology = torch.full((max_num_limbs,), -1, dtype=torch.int32) topology[:len(top)] = torch.tensor(top, dtype=torch.int32) return topology # Evaluation loop total_numsteps = 0 avg_reward = 0. state = render_env.reset() with imageio.get_writer(args.video_file_name, fps=30) as video: for i_episode in itertools.count(1): episode_reward = 0 episode_steps = 0 done = False state = render_env.reset() video.append_data(render_env.render('rgb_array')) done = False while not done: state = pad_state(state, dim_per_limb, max_num_limbs).unsqueeze(0) src_topology = pad_topology(render_topology, max_num_limbs).unsqueeze(0) tgt_topology = pad_topology(expert_topology, max_num_limbs).unsqueeze(0) x_hat = vae_model.transfer(state, tgt_topology) x_hat = x_hat.detach().cpu() x_hat = x_hat[:(render_limbs-1)] x_hat = torch.FloatTensor(x_hat).to(device).unsqueeze(0) action, _, _ = policy.sample(x_hat) action = action.detach().cpu().numpy()[0] next_state, reward, done, _ = render_env.step(action[0][:7]) video.append_data(render_env.render('rgb_array')) episode_reward += reward state = next_state avg_reward += episode_reward if i_episode > args.num_episodes: break
the-stack_0_6091
# ===================================================================================== # # Module for solving Ising models exactly. # # Distributed with ConIII. # # NOTE: This code needs cleanup. # # Author : Edward Lee, [email protected] # ===================================================================================== # # # MIT License # # Copyright (c) 2019 Edward D. Lee, Bryan C. Daniels # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import numpy as np import mpmath as mp import scipy.special as ss from itertools import combinations import sys np.set_printoptions(threshold=sys.maxsize) def write_eqns(n, sym, corrTermsIx, suffix='', high_prec=False): """Create strings for writing out the equations and then write them to file. TODO: This code needs some cleanup. Parameters ---------- n : int number of spins sym : int value of 1 will use {-1,1} formulation, 0 means {0,1} corrTermsIx : list of ndarrays Allows specification of arbitrary correlations to constrain using an index based structure. These should be index arrays as would be returned by np.where that specify which correlations to write down. Each consecutive array should specify a matrix of sequentially increasing dimension. [Nx1, NxN, NxNxN, ...] suffix : str, '' high_prec : bool, False """ import re assert sym in [0,1], "sym argument must be 0 or 1." abc = 'HJKLMNOPQRSTUVWXYZABCDE' expterms = [] # 2**N exponential corrTermsIx binstates = [] # all binary states as strings signs = [] # coefficient for all numerator terms when computing correlations br = "[]" ix0 = 0 # default suffix for high precision files if high_prec: suffix += '_hp' # Collect all corrTermsIx in the partition function. for state in range(2**n): binstates.append("{0:b}".format(state)) if len(binstates[state])<n: binstates[state] = "0"*(n-len(binstates[state])) + binstates[state] expterms.append( '' ) # Get corrTermsIx corresponding to each of the ith order term. if sym: for i in range(len(corrTermsIx)): expterms[state] += get_terms11(corrTermsIx[i], abc[i], binstates[state], br, ix0) else: for i in range(len(corrTermsIx)): expterms[state] += get_terms01(corrTermsIx[i], abc[i], binstates[state], br, ix0) expterms[state] = re.sub(r'\+0\+','+', expterms[state]) expterms[state] = re.sub(r'\)\+0',')', expterms[state]) expterms[state] += ', ' # Collect all terms with corresponding prefix in the equation to solve. for state in range(2**n): for i in range(len(corrTermsIx)): if state==0: signs.append([]) # Get corrTermsIx corresponding to each of the ith order term. if sym: signs_ = _compute_signs(corrTermsIx[i], expterms[state], binstates[state]) else: signs_ = _compute_signs(corrTermsIx[i], expterms[state], binstates[state], False) # expand the length of signs if we haven't reached those constraints yet before if len(signs[i])<signs_.size: for j in range(signs_.size-len(signs[i])): signs[i].append(np.zeros(0, dtype=int)) for j in range(signs_.size): signs[i][j] = np.append(signs[i][j], signs_[j]) Z = ''.join(expterms) # Account for fact that symmetric Python had inverted the order of the states. if sym: extra = '\n Pout = Pout[::-1]' else: extra = '' # write to files write_py(n, sym, corrTermsIx, signs, expterms, Z, extra=extra, suffix=suffix, high_prec=high_prec) def write_py(n, sym, contraintTermsIx, signs, expterms, Z, extra='', suffix='', high_prec=False): """ Write out Ising equations for Python. Parameters ---------- n : int System size. contraintTermsIx : list of str signs : list of ndarray Sign for each term in the numerator when computing correlations. expterms : list of str Every single energy term. Z : str Energies for all states that will be put into partition function. extra : str, '' any extra lines to add at the end suffix : str, '' high_prec : bool, False If True, write version that uses mpmath for high precision calculations. """ import time import os abc = 'HJKLMNOPQRSTUVWXYZABCDE' fname = 'ising_eqn/ising_eqn_%d%s.py'%(n,suffix) print("Generating file ./%s"%fname) if not os.path.isdir('./ising_eqn'): os.makedirs('./ising_eqn') f = open(fname,'w') # insert license try: license = open('../LICENSE.txt','r').readlines() for el in license: el = '# '+el f.write(el) f.write('\n') except FileNotFoundError: print("License file not found...") f.write("# Equations for %d-spin Ising model.\n\n"%n) f.write("# ") f.write(time.strftime("Written on %Y/%m/%d.")+"\n") if high_prec: f.write("from numpy import zeros, array, prod\n") f.write("from ..enumerate import mp_fast_logsumexp as fast_logsumexp\n") f.write("from mpmath import exp, isnan\n\n") else: f.write("from numpy import zeros, exp, array, prod, isnan\n") f.write("from ..enumerate import fast_logsumexp\n\n") # Keep these as string because they need to grow in the loop and then can just be # added all at once at the end. fargs = "def calc_observables(params):\n" if high_prec: vardec = ' Cout = zeros(('+str(sum([len(i) for i in signs]))+'), dtype=object)\n' # string of variable declarations else: vardec = ' Cout = zeros(('+str(sum([len(i) for i in signs]))+'))\n' # string of variable declarations eqns = '' # string of equations to compute ix = np.hstack(( 0, np.cumsum([len(i) for i in signs]) )) for i in range(len(contraintTermsIx)): vardec += ' '+abc[i]+' = params['+str(ix[i])+':'+str(ix[i+1])+']\n' if sym: k = 0 for i in range(len(contraintTermsIx)): for j in range(len(signs[i])): eqns += (" num = fast_logsumexp(energyTerms, "+ str(signs[i][j]).replace('1 ','1,').replace('1\n','1,\n')+ ")\n Cout["+str(k)+"] = exp( num[0] - logZ ) * num[1]\n") k += 1 else: k = 0 for i in range(len(contraintTermsIx)): for j in range(len(signs[i])): eqns += (" num = fast_logsumexp(energyTerms, "+ str(signs[i][j]).replace('0 ','0,').replace('1 ','1,').replace('0\n','0,\n').replace('1\n','1,\n')+ ")\n Cout["+str(k)+"] = exp( num[0] - logZ ) * num[1]\n") k += 1 # Write out correlation terms f.write(fargs) f.write((" \"\"\"\n Give all parameters concatenated into one array from lowest to highest order.\n"+ " Returns all correlations.\n \"\"\"\n")) f.write(vardec) _write_energy_terms(f, Z) f.write(eqns) if high_prec: f.write(" for i in range(Cout.size):\n if isnan(Cout[i]):\n Cout[i] = 0.\n") else: f.write(" Cout[isnan(Cout)] = 0.\n") f.write(" return(Cout)\n\n") # Write equations for probabilities of all states. #f.write("def p("+string.join([i+"," for i in abc[:len(contraintTermsIx)]])+"):\n") f.write("def p(params):\n") f.write((" \"\"\"\n Give all parameters concatenated into one array from lowest to highest order.\n"+ " Returns probabilities of all configurations.\n \"\"\"\n")) f.write(vardec) # Output variable decs and put params into explicit parameters. ix = np.hstack(( 0, np.cumsum([len(i) for i in signs]) )) vardec = '' for i in range(len(contraintTermsIx)): vardec += ' '+abc[i]+' = params['+str(ix[i])+':'+str(ix[i+1])+']\n' if high_prec: vardec += ' Pout = zeros(('+str(2**n)+'), dtype=object)\n' # string of variable declarations else: vardec += ' Pout = zeros(('+str(2**n)+'))\n' # string of variable declarations f.write(vardec) _write_energy_terms(f, Z) # each probability equation for i in range(len(expterms)): f.write(' Pout['+str(i)+'] = exp( '+expterms[i][:-2]+' - logZ )\n') f.write(extra) f.write("\n return(Pout)\n") f.close() def _write_energy_terms(f, Z): """Split expression for energy terms for each term in Z into multiple lines and write out nicely into file. Parameters ---------- f : file Z : list of str Energy terms to write out. """ f.write(' energyTerms = array([') i=0 while i<len(Z): iend=i+100 # end line on a + while iend<len(Z) and Z[iend-1]!='+': iend+=1 if iend>=len(Z): # ignore comma at end of line f.write(' '+Z[i:-1]+'])\n logZ = fast_logsumexp(energyTerms)[0]\n') else: f.write(' '+Z[i:iend]+'\n') i=iend def _compute_signs(subix, expterm, binstate, sym=True): """Iterate through terms that belong in the numerator for each constraint and keep track of the sign of those terms. Parameters ---------- subix : list expterm : list of str binstate : list of str sym : bool, True Returns ------- ndarray Sign of each exponential term in numerator. """ if len(subix)==0: return if sym: downSpin = -1 signs = np.ones(len(subix[0]), dtype=int) for i in range(len(subix[0])): if np.mod( sum([binstate[k[i]]=="1" for k in subix]),2 ): signs[i] = downSpin else: downSpin = 0 signs = np.ones(len(subix[0]), dtype=int) for i in range(len(subix[0])): if np.mod( any([binstate[k[i]]=="0" for k in subix]),2 ): signs[i] = downSpin return signs def get_terms11(subix, prefix, binstate, br, ix0): """ Specific to {-1,1}. """ j = 0 s = '' if len(subix)==0: return s for i in range(len(subix[0])): if np.mod( sum([binstate[k[j]]=="1" for k in subix]),2 ): s += '-' else: s += '+' s += prefix+br[0]+str(j+ix0)+br[1] j += 1 return s def get_terms01(subix, prefix, binstate, br, ix0): """ Specific to {0,1}. """ j = 0 s = '' if len(subix)==0: return s for i in range(len(subix[0])): if np.all( [binstate[k[j]]=="1" for k in subix] ): s += '+'+prefix+br[0]+str(j+ix0)+br[1] j += 1 if s=='': s = '+0' return s def get_terms(subix, prefix, binstate, br, ix0): """ Spins are put in explicitly """ j = 0 s = '' if len(subix)==0: return s for i in range(len(subix[0])): s += '+'+prefix+br[0]+str(j+ix0)+br[1] for k in range(len(subix)): s += '*s'+br[0]+str(subix[k][i])+br[1] j += 1 if s=='': s = '+0' return s def get_3idx(n): """Get binary 3D matrix with truth values where index values correspond to the index of all possible ijk parameters. We can do this by recognizing that the pattern along each plane in the third dimension is like the upper triangle pattern that just moves up and over by one block each cut lower into the box. """ b = np.zeros((n,n,n)) c = np.triu(np.ones((n-1,n-1))==1,1) for i in range(n-1): # shunt this diagonal matrix over every descent into a lower plane in the box # the plane xz if i==0: b[i,(1+i):,(1+i):] = c else: b[i,(1+i):,(1+i):] = c[:-i,:-i] return b def get_nidx(k, n): """ Get the kth order indices corresponding to all the states in which k elements are firing up out of n spins. The ordering correspond to that returned by bin_states(). One can check this code for correctness by comparing with get_3idx() >>>>> print where(exact.get_3idx(4)) print where(exact.get_nidx(3,4)) <<<<< """ if k==n: return np.reshape(list(range(n)),(n,1)) elif k<n: allStates = bin_states(n) statesix = np.sum(allStates,1)==k ix = [] for s in allStates[statesix,:]: j = 0 for i in np.argwhere(s==1).flatten(): if len(ix)<(j+1): ix.append([]) ix[j].append(i) j += 1 return np.array(ix)[:,::-1] # make sure last idx increases first def pairwise(n, sym=0, **kwargs): """Wrapper for writing pairwise maxent model (Ising) files. Parameters ---------- n : int System size. sym : int, 0 Can be 0 or 1. **kwargs Returns ------- None """ assert sym==0 or sym==1 print("Writing equations for pairwise Ising model with %d spins."%n) if sym: write_eqns(n, sym, [np.where(np.ones((n))==1), np.where(np.triu(np.ones((n,n)),k=1)==1)], suffix='_sym', **kwargs) else: write_eqns(n, sym, [np.where(np.ones((n))==1), np.where(np.triu(np.ones((n,n)),k=1)==1)], **kwargs) def triplet(n, sym=0, **kwargs): """Wrapper for writing triplet-order maxent model. Parameters ---------- n : int System size. sym : int, 0 Can be 0 or 1. **kwargs Returns ------- None """ assert sym==0 or sym==1 print("Writing equations for Ising model with triplet interactions and %d spins."%n) if sym: write_eqns(n,sym,[(range(n),), list(zip(*list(combinations(range(n),2)))), list(zip(*list(combinations(range(n),3))))], suffix='_sym_triplet', **kwargs) else: write_eqns(n,sym,[(range(n),), list(zip(*list(combinations(range(n),2)))), list(zip(*list(combinations(range(n),3))))], suffix='_triplet', **kwargs) def _write_matlab(n, terms, fitterms, expterms, Z, suffix=''): """ DEPRECATED: code here for future referencing Write out equations to solve for matlab. """ import time abc = 'HJKLMNOPQRSTUVWXYZABCDE' vardec = '' # Write function to solve to file. f = open('ising_eqn_%d%s.m'%(n,suffix),'w') f.write("% Equations of %d-spin Ising model.\n\n"%n) f.write(time.strftime("%Y/%m/%d")+"\n") f.write("% Give each set of parameters concatenated into one array.\n\n") # Keep these as string because they need to grow in the loop and then can just be # added all at once at the end. f.write("function Cout = calc_observables(params)\n") f.write('\tCout = zeros('+str(sum([len(i) for i in fitterms]))+',1);\n') # string of variable declarations eqns = '' # string of equations to compute ix = np.hstack(( 0,np.cumsum([len(i) for i in fitterms]) ))+1 for i in range(len(terms)): vardec += '\t'+abc[i]+' = params('+str(ix[i])+':'+str(ix[i+1]-1)+');\n' k = 0 for i in range(len(terms)): for j in range(len(fitterms[i])): eqns += "\tCout("+str(k+1)+") = ("+fitterms[i][j]+")/Z;\n" k += 1 f.write(vardec) f.write("\tZ = "+Z+";\n") f.write(eqns) f.close() g = open('probs'+str(n)+'.m','w') g.write("% File for getting the probabilities of Ising model.\n% ") g.write(time.strftime("%Y/%m/%d")+"\n") # Write equations for probabilities of all states. g.write("function Pout = p(params)\n") g.write(vardec) g.write(' Pout = zeros('+str(2**n)+',1);\n') # string of variable declarations g.write(' Z = '+Z+';\n') for i in range(len(expterms)): g.write(' Pout('+str(i+1)+') = '+expterms[i]+'/Z;\n') g.close() def fast_logsumexp(X, coeffs=None): """Simplified version of logsumexp to do correlation calculation in Ising equation files. Scipy's logsumexp can be around 10x slower in comparison. Parameters ---------- X : ndarray Terms inside logs. coeffs : ndarray Factors in front of exponentials. Returns ------- float Value of magnitude of quantity inside log (the sum of exponentials). float Sign. """ Xmx = max(X) if coeffs is None: y = np.exp(X-Xmx).sum() else: y = np.exp(X-Xmx).dot(coeffs) if y<0: return np.log(np.abs(y))+Xmx, -1. return np.log(y)+Xmx, 1. def mp_fast_logsumexp(X, coeffs=None): """fast_logsumexp for high precision numbers using mpmath. Parameters ---------- X : ndarray Terms inside logs. coeffs : ndarray Factors in front of exponentials. Returns ------- float Value of magnitude of quantity inside log (the sum of exponentials). float Sign. """ Xmx = max(X) if coeffs is None: y = sum(map(mp.exp, X-Xmx)) else: y = np.array(coeffs).dot(list(map(mp.exp, X-Xmx))) if y<0: return mp.log(abs(y))+Xmx, -1. return mp.log(y)+Xmx, 1. if __name__=='__main__': """When run with Python, this will write the equations for the Ising model into file ising_eqn_[n][_sym] where n will be replaced by the system size and the suffix '_sym' is included if the equations are written in the {-1,+1} basis. To write the Ising model equations for a system of size 3 in the {0,1} basis, call >>> python enumerate.py 3 For the {-1,1} basis, call >>> python enumerate.py 3 1 To include triplet order interactions, include a 3 at the very end >>> python enumerate.py 3 0 3 To write high precision, include an '-hp=true' as the last argument. >>> python enumerate.py 3 0 3 -hp=true """ import sys args = [i for i in sys.argv if '-'!=i[0]] kwargs = [i for i in sys.argv if '-'==i[0]] n = int(args[1]) if len(args)==2: sym = 0 order = 2 elif len(args)==3: sym = int(args[2]) assert sym==0 or sym==1 order = 2 elif len(args)==4: sym = int(args[2]) order = int(args[3]) else: raise Exception("Unrecognized arguments.") # parse kwargs if len(kwargs): if '-hp='==kwargs[0][:4]: if kwargs[0][4:].lower()=='true': high_prec = True elif kwargs[0][4:].lower()=='false': high_prec = False else: raise Exception("Unrecognized value for hp.") else: high_prec = False else: # default kwargs high_prec = False if order==2: pairwise(n, sym, high_prec=high_prec) elif order==3: triplet(n, sym, high_prec=high_prec) else: raise NotImplementedError("Only order up to 3 implemented for this convenient interface.")
the-stack_0_6092
#!/usr/bin/env python3 """ GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, Atlanta, Georgia 30332-0415 All Rights Reserved See LICENSE for the license information Code generator for wrapping a C++ module with Pybind11 Author: Duy Nguyen Ta, Fan Jiang, Matthew Sklar, Varun Agrawal, and Frank Dellaert """ # pylint: disable=too-many-arguments, too-many-instance-attributes, no-self-use, no-else-return, too-many-arguments, unused-format-string-argument, line-too-long import re from pathlib import Path from typing import List import gtwrap.interface_parser as parser import gtwrap.template_instantiator as instantiator class PybindWrapper: """ Class to generate binding code for Pybind11 specifically. """ def __init__(self, module_name, top_module_namespaces='', use_boost=False, ignore_classes=(), module_template=""): self.module_name = module_name self.top_module_namespaces = top_module_namespaces self.use_boost = use_boost self.ignore_classes = ignore_classes self._serializing_classes = [] self.module_template = module_template self.python_keywords = [ 'lambda', 'False', 'def', 'if', 'raise', 'None', 'del', 'import', 'return', 'True', 'elif', 'in', 'try', 'and', 'else', 'is', 'while', 'as', 'except', 'lambda', 'with', 'assert', 'finally', 'nonlocal', 'yield', 'break', 'for', 'not', 'class', 'from', 'or', 'continue', 'global', 'pass' ] # amount of indentation to add before each function/method declaration. self.method_indent = '\n' + (' ' * 8) # Special methods which are leveraged by ipython/jupyter notebooks self._ipython_special_methods = [ "svg", "png", "jpeg", "html", "javascript", "markdown", "latex" ] def _py_args_names(self, args): """Set the argument names in Pybind11 format.""" names = args.names() if names: py_args = [] for arg in args.list(): if arg.default is not None: default = ' = {arg.default}'.format(arg=arg) else: default = '' argument = 'py::arg("{name}"){default}'.format( name=arg.name, default='{0}'.format(default)) py_args.append(argument) return ", " + ", ".join(py_args) else: return '' def _method_args_signature(self, args): """Generate the argument types and names as per the method signature.""" cpp_types = args.to_cpp(self.use_boost) names = args.names() types_names = [ "{} {}".format(ctype, name) for ctype, name in zip(cpp_types, names) ] return ', '.join(types_names) def wrap_ctors(self, my_class): """Wrap the constructors.""" res = "" for ctor in my_class.ctors: res += ( self.method_indent + '.def(py::init<{args_cpp_types}>()' '{py_args_names})'.format( args_cpp_types=", ".join(ctor.args.to_cpp(self.use_boost)), py_args_names=self._py_args_names(ctor.args), )) return res def _wrap_serialization(self, cpp_class): """Helper method to add serialize, deserialize and pickle methods to the wrapped class.""" if not cpp_class in self._serializing_classes: self._serializing_classes.append(cpp_class) serialize_method = self.method_indent + \ ".def(\"serialize\", []({class_inst} self){{ return gtsam::serialize(*self); }})".format(class_inst=cpp_class + '*') deserialize_method = self.method_indent + \ '.def("deserialize", []({class_inst} self, string serialized)' \ '{{ gtsam::deserialize(serialized, *self); }}, py::arg("serialized"))' \ .format(class_inst=cpp_class + '*') # Since this class supports serialization, we also add the pickle method. pickle_method = self.method_indent + \ ".def(py::pickle({indent} [](const {cpp_class} &a){{ /* __getstate__: Returns a string that encodes the state of the object */ return py::make_tuple(gtsam::serialize(a)); }},{indent} [](py::tuple t){{ /* __setstate__ */ {cpp_class} obj; gtsam::deserialize(t[0].cast<std::string>(), obj); return obj; }}))" return serialize_method + deserialize_method + \ pickle_method.format(cpp_class=cpp_class, indent=self.method_indent) def _wrap_print(self, ret: str, method: parser.Method, cpp_class: str, args_names: List[str], args_signature_with_names: str, py_args_names: str, prefix: str, suffix: str): """ Update the print method to print to the output stream and append a __repr__ method. Args: ret (str): The result of the parser. method (parser.Method): The method to be wrapped. cpp_class (str): The C++ name of the class to which the method belongs. args_names (List[str]): List of argument variable names passed to the method. args_signature_with_names (str): C++ arguments containing their names and type signatures. py_args_names (str): The pybind11 formatted version of the argument list. prefix (str): Prefix to add to the wrapped method when writing to the cpp file. suffix (str): Suffix to add to the wrapped method when writing to the cpp file. Returns: str: The wrapped print method. """ # Redirect stdout - see pybind docs for why this is a good idea: # https://pybind11.readthedocs.io/en/stable/advanced/pycpp/utilities.html#capturing-standard-output-from-ostream ret = ret.replace('self->print', 'py::scoped_ostream_redirect output; self->print') # Make __repr__() call .print() internally ret += '''{prefix}.def("__repr__", [](const {cpp_class}& self{opt_comma}{args_signature_with_names}){{ gtsam::RedirectCout redirect; self.{method_name}({method_args}); return redirect.str(); }}{py_args_names}){suffix}'''.format( prefix=prefix, cpp_class=cpp_class, opt_comma=', ' if args_names else '', args_signature_with_names=args_signature_with_names, method_name=method.name, method_args=", ".join(args_names) if args_names else '', py_args_names=py_args_names, suffix=suffix) return ret def _wrap_method(self, method, cpp_class, prefix, suffix, method_suffix=""): """ Wrap the `method` for the class specified by `cpp_class`. Args: method: The method to wrap. cpp_class: The C++ name of the class to which the method belongs. prefix: Prefix to add to the wrapped method when writing to the cpp file. suffix: Suffix to add to the wrapped method when writing to the cpp file. method_suffix: A string to append to the wrapped method name. """ py_method = method.name + method_suffix cpp_method = method.to_cpp() args_names = method.args.names() py_args_names = self._py_args_names(method.args) args_signature_with_names = self._method_args_signature(method.args) # Special handling for the serialize/serializable method if cpp_method in ["serialize", "serializable"]: return self._wrap_serialization(cpp_class) # Special handling of ipython specific methods # https://ipython.readthedocs.io/en/stable/config/integrating.html if cpp_method in self._ipython_special_methods: idx = self._ipython_special_methods.index(cpp_method) py_method = f"_repr_{self._ipython_special_methods[idx]}_" # Add underscore to disambiguate if the method name matches a python keyword if py_method in self.python_keywords: py_method = py_method + "_" is_method = isinstance( method, (parser.Method, instantiator.InstantiatedMethod)) is_static = isinstance( method, (parser.StaticMethod, instantiator.InstantiatedStaticMethod)) return_void = method.return_type.is_void() caller = cpp_class + "::" if not is_method else "self->" function_call = ('{opt_return} {caller}{method_name}' '({args_names});'.format( opt_return='return' if not return_void else '', caller=caller, method_name=cpp_method, args_names=', '.join(args_names), )) ret = ('{prefix}.{cdef}("{py_method}",' '[]({opt_self}{opt_comma}{args_signature_with_names}){{' '{function_call}' '}}' '{py_args_names}){suffix}'.format( prefix=prefix, cdef="def_static" if is_static else "def", py_method=py_method, opt_self="{cpp_class}* self".format( cpp_class=cpp_class) if is_method else "", opt_comma=', ' if is_method and args_names else '', args_signature_with_names=args_signature_with_names, function_call=function_call, py_args_names=py_args_names, suffix=suffix, )) # Create __repr__ override # We allow all arguments to .print() and let the compiler handle type mismatches. if method.name == 'print': ret = self._wrap_print(ret, method, cpp_class, args_names, args_signature_with_names, py_args_names, prefix, suffix) return ret def wrap_methods(self, methods, cpp_class, prefix='\n' + ' ' * 8, suffix=''): """ Wrap all the methods in the `cpp_class`. """ res = "" for method in methods: # To avoid type confusion for insert if method.name == 'insert' and cpp_class == 'gtsam::Values': name_list = method.args.names() type_list = method.args.to_cpp(self.use_boost) # inserting non-wrapped value types if type_list[0].strip() == 'size_t': method_suffix = '_' + name_list[1].strip() res += self._wrap_method(method=method, cpp_class=cpp_class, prefix=prefix, suffix=suffix, method_suffix=method_suffix) res += self._wrap_method( method=method, cpp_class=cpp_class, prefix=prefix, suffix=suffix, ) return res def wrap_variable(self, namespace, module_var, variable, prefix='\n' + ' ' * 8): """ Wrap a variable that's not part of a class (i.e. global) """ variable_value = "" if variable.default is None: variable_value = variable.name else: variable_value = variable.default return '{prefix}{module_var}.attr("{variable_name}") = {namespace}{variable_value};'.format( prefix=prefix, module_var=module_var, variable_name=variable.name, namespace=namespace, variable_value=variable_value) def wrap_properties(self, properties, cpp_class, prefix='\n' + ' ' * 8): """Wrap all the properties in the `cpp_class`.""" res = "" for prop in properties: res += ('{prefix}.def_{property}("{property_name}", ' '&{cpp_class}::{property_name})'.format( prefix=prefix, property="readonly" if prop.ctype.is_const else "readwrite", cpp_class=cpp_class, property_name=prop.name, )) return res def wrap_operators(self, operators, cpp_class, prefix='\n' + ' ' * 8): """Wrap all the overloaded operators in the `cpp_class`.""" res = "" template = "{prefix}.def({{0}})".format(prefix=prefix) for op in operators: if op.operator == "[]": # __getitem__ res += "{prefix}.def(\"__getitem__\", &{cpp_class}::operator[])".format( prefix=prefix, cpp_class=cpp_class) elif op.operator == "()": # __call__ res += "{prefix}.def(\"__call__\", &{cpp_class}::operator())".format( prefix=prefix, cpp_class=cpp_class) elif op.is_unary: res += template.format("{0}py::self".format(op.operator)) else: res += template.format("py::self {0} py::self".format( op.operator)) return res def wrap_enum(self, enum, class_name='', module=None, prefix=' ' * 4): """ Wrap an enum. Args: enum: The parsed enum to wrap. class_name: The class under which the enum is defined. prefix: The amount of indentation. """ if module is None: module = self._gen_module_var(enum.namespaces()) cpp_class = enum.cpp_typename().to_cpp() if class_name: # If class_name is provided, add that as the namespace cpp_class = class_name + "::" + cpp_class res = '{prefix}py::enum_<{cpp_class}>({module}, "{enum.name}", py::arithmetic())'.format( prefix=prefix, module=module, enum=enum, cpp_class=cpp_class) for enumerator in enum.enumerators: res += '\n{prefix} .value("{enumerator.name}", {cpp_class}::{enumerator.name})'.format( prefix=prefix, enumerator=enumerator, cpp_class=cpp_class) res += ";\n\n" return res def wrap_enums(self, enums, instantiated_class, prefix=' ' * 4): """Wrap multiple enums defined in a class.""" cpp_class = instantiated_class.to_cpp() module_var = instantiated_class.name.lower() res = '' for enum in enums: res += "\n" + self.wrap_enum( enum, class_name=cpp_class, module=module_var, prefix=prefix) return res def wrap_instantiated_class( self, instantiated_class: instantiator.InstantiatedClass): """Wrap the class.""" module_var = self._gen_module_var(instantiated_class.namespaces()) cpp_class = instantiated_class.to_cpp() if cpp_class in self.ignore_classes: return "" if instantiated_class.parent_class: class_parent = "{instantiated_class.parent_class}, ".format( instantiated_class=instantiated_class) else: class_parent = '' if instantiated_class.enums: # If class has enums, define an instance and set module_var to the instance instance_name = instantiated_class.name.lower() class_declaration = ( '\n py::class_<{cpp_class}, {class_parent}' '{shared_ptr_type}::shared_ptr<{cpp_class}>> ' '{instance_name}({module_var}, "{class_name}");' '\n {instance_name}').format( shared_ptr_type=('boost' if self.use_boost else 'std'), cpp_class=cpp_class, class_name=instantiated_class.name, class_parent=class_parent, instance_name=instance_name, module_var=module_var) module_var = instance_name else: class_declaration = ( '\n py::class_<{cpp_class}, {class_parent}' '{shared_ptr_type}::shared_ptr<{cpp_class}>>({module_var}, "{class_name}")' ).format(shared_ptr_type=('boost' if self.use_boost else 'std'), cpp_class=cpp_class, class_name=instantiated_class.name, class_parent=class_parent, module_var=module_var) return ('{class_declaration}' '{wrapped_ctors}' '{wrapped_methods}' '{wrapped_static_methods}' '{wrapped_properties}' '{wrapped_operators};\n'.format( class_declaration=class_declaration, wrapped_ctors=self.wrap_ctors(instantiated_class), wrapped_methods=self.wrap_methods( instantiated_class.methods, cpp_class), wrapped_static_methods=self.wrap_methods( instantiated_class.static_methods, cpp_class), wrapped_properties=self.wrap_properties( instantiated_class.properties, cpp_class), wrapped_operators=self.wrap_operators( instantiated_class.operators, cpp_class))) def wrap_instantiated_declaration( self, instantiated_decl: instantiator.InstantiatedDeclaration): """Wrap the class.""" module_var = self._gen_module_var(instantiated_decl.namespaces()) cpp_class = instantiated_decl.to_cpp() if cpp_class in self.ignore_classes: return "" res = ( '\n py::class_<{cpp_class}, ' '{shared_ptr_type}::shared_ptr<{cpp_class}>>({module_var}, "{class_name}")' ).format(shared_ptr_type=('boost' if self.use_boost else 'std'), cpp_class=cpp_class, class_name=instantiated_decl.name, module_var=module_var) return res def wrap_stl_class(self, stl_class): """Wrap STL containers.""" module_var = self._gen_module_var(stl_class.namespaces()) cpp_class = stl_class.to_cpp() if cpp_class in self.ignore_classes: return "" return ( '\n py::class_<{cpp_class}, {class_parent}' '{shared_ptr_type}::shared_ptr<{cpp_class}>>({module_var}, "{class_name}")' '{wrapped_ctors}' '{wrapped_methods}' '{wrapped_static_methods}' '{wrapped_properties};\n'.format( shared_ptr_type=('boost' if self.use_boost else 'std'), cpp_class=cpp_class, class_name=stl_class.name, class_parent=str(stl_class.parent_class) + (', ' if stl_class.parent_class else ''), module_var=module_var, wrapped_ctors=self.wrap_ctors(stl_class), wrapped_methods=self.wrap_methods(stl_class.methods, cpp_class), wrapped_static_methods=self.wrap_methods( stl_class.static_methods, cpp_class), wrapped_properties=self.wrap_properties( stl_class.properties, cpp_class), )) def wrap_functions(self, functions, namespace, prefix='\n' + ' ' * 8, suffix=''): """ Wrap all the global functions. """ res = "" for function in functions: function_name = function.name # Add underscore to disambiguate if the function name matches a python keyword python_keywords = self.python_keywords + ['print'] if function_name in python_keywords: function_name = function_name + "_" cpp_method = function.to_cpp() is_static = isinstance(function, parser.StaticMethod) return_void = function.return_type.is_void() args_names = function.args.names() py_args_names = self._py_args_names(function.args) args_signature = self._method_args_signature(function.args) caller = namespace + "::" function_call = ('{opt_return} {caller}{function_name}' '({args_names});'.format( opt_return='return' if not return_void else '', caller=caller, function_name=cpp_method, args_names=', '.join(args_names), )) ret = ('{prefix}.{cdef}("{function_name}",' '[]({args_signature}){{' '{function_call}' '}}' '{py_args_names}){suffix}'.format( prefix=prefix, cdef="def_static" if is_static else "def", function_name=function_name, args_signature=args_signature, function_call=function_call, py_args_names=py_args_names, suffix=suffix)) res += ret return res def _partial_match(self, namespaces1, namespaces2): for i in range(min(len(namespaces1), len(namespaces2))): if namespaces1[i] != namespaces2[i]: return False return True def _gen_module_var(self, namespaces): """Get the Pybind11 module name from the namespaces.""" # We skip the first value in namespaces since it is empty sub_module_namespaces = namespaces[len(self.top_module_namespaces):] return "m_{}".format('_'.join(sub_module_namespaces)) def _add_namespaces(self, name, namespaces): if namespaces: # Ignore the first empty global namespace. idx = 1 if not namespaces[0] else 0 return '::'.join(namespaces[idx:] + [name]) else: return name def wrap_namespace(self, namespace): """Wrap the complete `namespace`.""" wrapped = "" includes = "" namespaces = namespace.full_namespaces() if not self._partial_match(namespaces, self.top_module_namespaces): return "", "" if len(namespaces) < len(self.top_module_namespaces): for element in namespace.content: if isinstance(element, parser.Include): include = "{}\n".format(element) # replace the angle brackets with quotes include = include.replace('<', '"').replace('>', '"') includes += include if isinstance(element, parser.Namespace): ( wrapped_namespace, includes_namespace, ) = self.wrap_namespace( # noqa element) wrapped += wrapped_namespace includes += includes_namespace else: module_var = self._gen_module_var(namespaces) if len(namespaces) > len(self.top_module_namespaces): wrapped += ( ' ' * 4 + 'pybind11::module {module_var} = ' '{parent_module_var}.def_submodule("{namespace}", "' '{namespace} submodule");\n'.format( module_var=module_var, namespace=namespace.name, parent_module_var=self._gen_module_var( namespaces[:-1]), )) # Wrap an include statement, namespace, class or enum for element in namespace.content: if isinstance(element, parser.Include): include = "{}\n".format(element) # replace the angle brackets with quotes include = include.replace('<', '"').replace('>', '"') includes += include elif isinstance(element, parser.Namespace): wrapped_namespace, includes_namespace = self.wrap_namespace( element) wrapped += wrapped_namespace includes += includes_namespace elif isinstance(element, instantiator.InstantiatedClass): wrapped += self.wrap_instantiated_class(element) wrapped += self.wrap_enums(element.enums, element) elif isinstance(element, instantiator.InstantiatedDeclaration): wrapped += self.wrap_instantiated_declaration(element) elif isinstance(element, parser.Variable): variable_namespace = self._add_namespaces('', namespaces) wrapped += self.wrap_variable(namespace=variable_namespace, module_var=module_var, variable=element, prefix='\n' + ' ' * 4) elif isinstance(element, parser.Enum): wrapped += self.wrap_enum(element) # Global functions. all_funcs = [ func for func in namespace.content if isinstance(func, (parser.GlobalFunction, instantiator.InstantiatedGlobalFunction)) ] wrapped += self.wrap_functions( all_funcs, self._add_namespaces('', namespaces)[:-2], prefix='\n' + ' ' * 4 + module_var, suffix=';', ) return wrapped, includes def wrap_file(self, content, module_name=None, submodules=None): """ Wrap the code in the interface file. Args: content: The contents of the interface file. module_name: The name of the module. submodules: List of other interface file names that should be linked to. """ # Parse the contents of the interface file module = parser.Module.parseString(content) # Instantiate all templates module = instantiator.instantiate_namespace(module) wrapped_namespace, includes = self.wrap_namespace(module) # Export classes for serialization. boost_class_export = "" for cpp_class in self._serializing_classes: new_name = cpp_class # The boost's macro doesn't like commas, so we have to typedef. if ',' in cpp_class: new_name = re.sub("[,:<> ]", "", cpp_class) boost_class_export += "typedef {cpp_class} {new_name};\n".format( # noqa cpp_class=cpp_class, new_name=new_name) boost_class_export += "BOOST_CLASS_EXPORT({new_name})\n".format( new_name=new_name, ) # Reset the serializing classes list self._serializing_classes = [] holder_type = "PYBIND11_DECLARE_HOLDER_TYPE(TYPE_PLACEHOLDER_DONOTUSE, " \ "{shared_ptr_type}::shared_ptr<TYPE_PLACEHOLDER_DONOTUSE>);" include_boost = "#include <boost/shared_ptr.hpp>" if self.use_boost else "" submodules_init = [] if submodules is not None: module_def = "PYBIND11_MODULE({0}, m_)".format(module_name) for idx, submodule in enumerate(submodules): submodules[idx] = "void {0}(py::module_ &);".format(submodule) submodules_init.append("{0}(m_);".format(submodule)) else: module_def = "void {0}(py::module_ &m_)".format(module_name) submodules = [] return self.module_template.format( include_boost=include_boost, module_def=module_def, module_name=module_name, includes=includes, holder_type=holder_type.format( shared_ptr_type=('boost' if self.use_boost else 'std')) if self.use_boost else "", wrapped_namespace=wrapped_namespace, boost_class_export=boost_class_export, submodules="\n".join(submodules), submodules_init="\n".join(submodules_init), ) def wrap_submodule(self, source): """ Wrap a list of submodule files, i.e. a set of interface files which are in support of a larger wrapping project. E.g. This is used in GTSAM where we have a main gtsam.i, but various smaller .i files which are the submodules. The benefit of this scheme is that it reduces compute and memory usage during compilation. Args: source: Interface file which forms the submodule. """ filename = Path(source).name module_name = Path(source).stem # Read in the complete interface (.i) file with open(source, "r") as f: content = f.read() # Wrap the read-in content cc_content = self.wrap_file(content, module_name=module_name) # Generate the C++ code which Pybind11 will use. with open(filename.replace(".i", ".cpp"), "w") as f: f.write(cc_content) def wrap(self, sources, main_module_name): """ Wrap all the main interface file. Args: sources: List of all interface files. The first file should be the main module. main_module_name: The name for the main module. """ main_module = sources[0] # Get all the submodule names. submodules = [] for source in sources[1:]: module_name = Path(source).stem submodules.append(module_name) with open(main_module, "r") as f: content = f.read() cc_content = self.wrap_file(content, module_name=self.module_name, submodules=submodules) # Generate the C++ code which Pybind11 will use. with open(main_module_name, "w") as f: f.write(cc_content)
the-stack_0_6093
# Copyright (c) 2014 Dark Secret Software Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import datetime import dateutil.parser class Criteria(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod def should_fire(self, stream, last_event, now=None): return False class Inactive(Criteria): def __init__(self, expiry_in_seconds): super(Inactive, self).__init__() self.expiry_in_seconds = expiry_in_seconds def should_fire(self, stream, last_event, debugger, now=None): secs = (now - stream.last_update).seconds #print "Stream %s = %d seconds (%d)" % (stream.uuid, secs, self.expiry_in_seconds) if now is None: now = datetime.datetime.utcnow() return debugger.check( (now - stream.last_update).seconds > self.expiry_in_seconds, "no timeout") class EventType(Criteria): def __init__(self, event_type): super(EventType, self).__init__() self.event_type = event_type def should_fire(self, stream, last_event, debugger, now=None): if not last_event: return debugger.criteria_mismatch('no last event') return debugger.check(last_event['event_type'] == self.event_type, "wrong event type") class And(Criteria): def __init__(self, criteria_list): super(And, self).__init__() self.criteria_list = criteria_list def should_fire(self, stream, last_event, debugger, now=None): should = [c.should_fire(stream, last_event, debugger, now) for c in self.criteria_list] return debugger.check(all(should), "AND failed") class EndOfDayExists(Criteria): def __init__(self, exists_name): super(EndOfDayExists, self).__init__() self.exists_name = exists_name def _is_zero_hour(self, tyme): return tyme.time() == datetime.time.min def should_fire(self, stream, last_event, debugger, now=None): if not last_event: stream.load_events() # Ouch ... expensive. if len(stream.events) == 0: return debugger.criteria_mismatch("No events") last_event = stream.events[-1] if last_event['event_type'] != self.exists_name: return debugger.criteria_mismatch("Wrong event type") payload = last_event['payload'] audit_start = payload.get('audit_period_beginning') audit_end = payload.get('audit_period_ending') if None in [audit_start, audit_end]: return debugger.criteria_mismatch("No audit beginning/end") audit_start = dateutil.parser.parse(audit_start) audit_end = dateutil.parser.parse(audit_end) return debugger.check(self._is_zero_hour(audit_start) and self._is_zero_hour(audit_end), "time != 00:00:00.0 ")
the-stack_0_6094
from common.func_plots import get_plot_pca from common.func_plots import get_plot_line from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from datetime import datetime as ddtime from scipy import signal import datetime as dtime import pmdarima as pm import pandas as pd import numpy as np import math __author__ = "Jose Fernando Montoya Cardona" __credits__ = ["Jose Fernando Montoya Cardona"] __email__ = "[email protected]" def round_down_to_even(f): return math.floor(f / 2.) * 2 def transform_data(df_data, date_i, date_f, get_only_dem=False): cols_demh = ['d' + str(i).zfill(2) for i in range(1, 25)] cols_df_t = ['fecha', 'codsbm'] + cols_demh dft = df_data[cols_df_t] cols_dft = [str(i).zfill(2) + ':00:00' for i in range(0, 24)] dft.columns = ['fecha', 'codsbm'] + cols_dft date_f = (ddtime.strptime(date_f, '%Y-%m-%d')+dtime.timedelta(days=1)).strftime('%Y-%m-%d') dft = dft[(dft.fecha >= ddtime.strptime(date_i, '%Y-%m-%d')) & (dft.fecha < ddtime.strptime(date_f, '%Y-%m-%d'))] dft = pd.melt(dft, id_vars=['fecha', 'codsbm'], var_name='hora', value_vars=cols_dft, value_name='demanda') dft['fechahora'] = dft['fecha'].dt.strftime('%Y-%m-%d') + ' ' + dft['hora'] dft.fechahora = pd.to_datetime(dft.fechahora) dft_sbm = pd.pivot_table(data=dft, values='demanda', columns='codsbm', index=dft.fechahora) if get_only_dem: dft = dft.groupby(by='fechahora')['demanda'].sum() dft = dft.reset_index() return dft, dft_sbm def normalize_min_max(arr): ''' Función que realiza la estandarización Z de las variables Entrada: DataFrame --filas: muestras, columnas: características Salida: DataFrame estandarizado por columnas, es decir por características ''' arr = arr[~np.isnan(arr)].reshape(-1, 1) minmax_scaler = MinMaxScaler() df_norm = minmax_scaler.fit_transform(arr) # df_norm = pd.DataFrame(df_norm, columns=df.columns) return df_norm.reshape(-1) def standar_z(df): ''' Función que realiza la estandarización Z de las variables Entrada: DataFrame --filas: muestras, columnas: características Salida: DataFrame estandarizado por columnas, es decir por características ''' standar_scaler = StandardScaler() df_stand = standar_scaler.fit_transform(df) df_stand = pd.DataFrame(df_stand, columns=df.columns, index=df.index) return df_stand def get_matrix_pca(matrix_features, exp_variance=0.9, show_plot=False, dynamic_component=True, n_comp=40): pca = PCA(n_components=matrix_features.shape[1], svd_solver='full') pca.fit(matrix_features) ev = pd.DataFrame({'Explained_variance': pca.explained_variance_ratio_, 'Cum_explained_variance': np.cumsum(pca.explained_variance_ratio_), 'n_components': list(range(1, matrix_features.shape[1] + 1)) }) if dynamic_component: n_components = ev[ev['Cum_explained_variance'] <= exp_variance]['n_components'].values[-1] print('Getting PCA') print('Número de componentes que explican el ', '{:.1f}'.format(exp_variance * 100), '% de la varianza: ', n_components) else: n_components = n_comp exp_var = ev[ev['n_components'] == n_components]['Cum_explained_variance'].values[0] print('Getting PCA') print('Con ', n_components, ' componentes se explica el ', '{:.1f}'.format(exp_var * 100), '% de la varianza') pca_int = PCA(n_components=n_components) m_pca = pca_int.fit_transform(matrix_features) m_pca = pd.DataFrame(m_pca, columns=['PC_' + str(pca).zfill(2) for pca in range(1, n_components + 1)], index=matrix_features.index) if show_plot: get_plot_pca(ev) return m_pca def group_dem_users_cluster(dem_data, m_features_labels): df_labels_sbm = m_features_labels.reset_index()[['codsbm', 'labels']] df_group = pd.merge(dem_data, df_labels_sbm, how='left', left_on='codsbm', right_on='codsbm') df_group_label = df_group.groupby(by=['fechahora', 'labels'])['demanda'].sum().reset_index() df_train_labels = pd.pivot_table(data=df_group_label, values='demanda', columns='labels', index=df_group_label.fechahora) return df_train_labels def log_transform(s_dem_data): s_log_data = pd.Series(np.log(s_dem_data)) return s_log_data def get_period_signal_num_k(data, n_coeff_fourier=4): f, pxx_den = signal.periodogram(data) m_f = round_down_to_even(round(1 / f[list(pxx_den).index(max(pxx_den))], 0)) if m_f < n_coeff_fourier * 2: k_f = round_down_to_even(m_f / 2) else: k_f = n_coeff_fourier return m_f, k_f, f, pxx_den def conditional_seasonal(seasonal, num_forecast, m, gap_pred): if gap_pred + num_forecast > m: gap_seasonal = list(seasonal)[gap_pred:m] new_n_forecast = gap_pred + num_forecast - m ratio = new_n_forecast / m ent, res = int(str(ratio).split('.')[0]), int(round((ratio - int(str(ratio).split('.')[0])) * m, 0)) pred_seasonal = np.array(gap_seasonal + list(seasonal)[0:m] * ent + list(seasonal)[0:res]) return pred_seasonal elif gap_pred + num_forecast <= m: pred_seasonal = np.array(list(seasonal)[gap_pred:num_forecast+gap_pred]) return pred_seasonal def second_conditional_seasonal(seasonal, num_forecast, m): if num_forecast > m: ratio = num_forecast / m ent, res = int(str(ratio).split('.')[0]), int(round((ratio - int(str(ratio).split('.')[0])) * m, 0)) pred_seasonal = np.array(list(seasonal)[0:m] * ent + list(seasonal)[0:res]) return pred_seasonal elif num_forecast <= m: # pred_seasonal = np.array(list(seasonal)[int(m/2):num_forecast+int(m/2)]) pred_seasonal = np.array(list(seasonal)[0:num_forecast]) return pred_seasonal def get_seasonal(seasonal, num_forecast, m, gap_pred): print('seasonal_shape: ', seasonal.shape, 'period: ', m) if gap_pred/m <= 1: print('condition_gap/m < 1: ', gap_pred/m) # pred_seasonal = conditional_seasonal(seasonal, num_forecast, m, gap_pred) pred_seasonal = second_conditional_seasonal(seasonal, num_forecast, m) else: ratio_gap = gap_pred/m new_gap_pred = int(round((ratio_gap - int(str(ratio_gap).split('.')[0])) * m, 0)) # pred_seasonal = conditional_seasonal(seasonal, num_forecast, m, new_gap_pred) pred_seasonal = second_conditional_seasonal(seasonal, num_forecast, m) return pred_seasonal def decompose_series_forecast_seasonal(series, m, forecast_seasonal, gap_pred=0, num_forecast=24, type_decompose='additive', filter_decompose=None): s_decompose = pm.arima.decompose(series, type_=type_decompose, m=m, filter_=filter_decompose) get_plot_line(pd.DataFrame(s_decompose.seasonal)) seasonal = s_decompose.seasonal pred_seasonal = get_seasonal(seasonal, num_forecast, m, gap_pred) if type_decompose == 'additive': forecast_seasonal += pred_seasonal elif type_decompose == 'multiplicative': forecast_seasonal = forecast_seasonal * pred_seasonal trend = np.array(s_decompose.trend)[~np.isnan(np.array(s_decompose.trend))] residual = s_decompose.random[~np.isnan(s_decompose.random)] trend_residual = trend + residual return trend_residual, forecast_seasonal def decompose_series_search_periods(data, type_decompose='additive', num_decompose=1, filter_decompose=None, num_forecast=24): threshold_power = 6 gap = 0 periods_decompose = [] if type_decompose == 'additive': forecast_seasonal = np.zeros(num_forecast) elif type_decompose == 'multiplicative': forecast_seasonal = np.ones(num_forecast) else: raise ValueError('invalid variable type decompose {}.'.format(type_decompose)) for i in range(1, num_decompose+1): len_data = len(data) val_period, _, f, pxx_den = get_period_signal_num_k(data) if val_period < len_data: m = val_period else: periods = 1 / f[np.where(pxx_den >= max(pxx_den) / threshold_power)] powers = pxx_den[np.where(pxx_den >= max(pxx_den) / threshold_power)] if len(periods) > 1: new_periods = periods[1:] new_powers = powers[1:] m = round_down_to_even(round(new_periods[list(new_powers).index(max(new_powers))], 0)) else: m = val_period if m < len_data: periods_decompose.append(str(m)) data, forecast_seasonal = decompose_series_forecast_seasonal(series=data, m=m , forecast_seasonal=forecast_seasonal , num_forecast=num_forecast , type_decompose=type_decompose , filter_decompose=filter_decompose , gap_pred=gap) gap += int(m / 2) else: print('max_num_decompose_possible: ', i-1) return forecast_seasonal, data, periods_decompose return forecast_seasonal, data, periods_decompose def decompose_series_with_periods(data, list_periods, type_decompose='additive', filter_decompose=None, num_forecast=24): gap = 0 if type_decompose == 'additive': forecast_seasonal = np.zeros(num_forecast) elif type_decompose == 'multiplicative': forecast_seasonal = np.ones(num_forecast) else: raise ValueError('invalid variable type decompose {}.'.format(type_decompose)) for m in list_periods: m = int(m) len_data = len(data) if m < len_data: data, forecast_seasonal = decompose_series_forecast_seasonal(data, m, forecast_seasonal , num_forecast=num_forecast , type_decompose=type_decompose , gap_pred=gap) gap += int(m / 2) else: raise ValueError('invalid period {} to decompose because length of signal is {}.'.format(m, len_data)) return forecast_seasonal, data, gap
the-stack_0_6099
#!/usr/bin/env python import os import re #Definitions def run(files=None,verbose=True,overwrite=None,output=None,macros={},build='',compile_string=''): l=create_file_objs(files,macros) mod2fil=file_objs_to_mod_dict(file_objs=l) depends=get_depends(fob=l,m2f=mod2fil) if verbose: for i in depends.keys(): print("\033[032m"+i+"\033[039m depends on :\033[034m") for j in depends[i]: print("\t"+j) print("\033[039m") if output is None: output = "makefile.dep" tmp=write_depend(outfile=output,dep=depends,overwrite=overwrite,build=build,compile_string=compile_string) return depends def write_depend(outfile="makefile.dep",dep=[],overwrite=False,build='',compile_string=''): "Write the dependencies to outfile" #Test file doesn't exist if os.path.exists(outfile): if not(overwrite): print("\033[031mWarning file exists.\033[039m") opt=raw_input("Overwrite? Y... for yes.") else: opt="y" if opt.lower().startswith("y"): pass else: return #Open file f=open(outfile,'w') f.write('# This file is generated automatically. DO NOT EDIT!\n') for i in dep.keys(): fil,_=os.path.splitext(i) # make each object file depend on it's source file stri="\n"+os.path.join(build, fil+".o"+" : src/defines.inc src/third_party/mersenne_twister.o "+i) for j in dep[i]: fdep,_=os.path.splitext(j) stri=stri+" \\\n\t"+os.path.join(build, fdep+".o") stri=stri+"\n" if compile_string: stri=stri+"\t"+compile_string+" "+i stri=stri+" -o "+os.path.join(build, fil+".o") f.write(stri) f.close() return def get_source(ext=[".f90",".F90"]): "Return all files ending with any of ext" tmp=os.listdir(".") fil=[] for i in ext: fil.extend(filter(lambda x: x.endswith(i),tmp)) return fil def create_file_objs(files=None, macros={}): l=[] if files is None: files = get_source() for i in files: source_file = file_obj() source_file.file_name = i source_file.uses = get_uses(i,macros) source_file.contains = get_contains(i) l.append(source_file) return l def get_uses(infile=None, macros={}): "Return which modules are used in infile after expanding macros" p=re.compile("^\s*use\s*(?P<moduse>\w*)\s*(,)?\s*(only)?\s*(:)?.*?$",re.IGNORECASE).match intrinsic = re.compile("^\s*use\s*(,)?\s*(only)?\s*,\s*intrinsic.*$",re.IGNORECASE).match uses=[] with open(infile,'r') as f: t=f.readlines() for i in t: tmp=p(i) if tmp and not intrinsic(i): uses.append(tmp.group('moduse').strip()) # Remove duplicates uniq_mods = list(set(uses)) for i, mod in enumerate(uniq_mods): for k, v in macros.items(): if re.match(k, mod, re.IGNORECASE): uniq_mods[i] = mod.replace(k,v) return uniq_mods def get_contains(infile=None): "Return all the modules that are in infile" p=re.compile("^\s*module\s*(?P<modname>\w*)",re.IGNORECASE).match contains=[] with open(infile,'r') as f: t=f.readlines() for i in t: tmp=p(i) if tmp: contains.append(tmp.group('modname').strip()) # Remove duplicates before returning return list(set(contains)) def file_objs_to_mod_dict(file_objs=[]): "Turn a list of file_objs in a dictionary, containing which modules depend on which files" dic={} for i in file_objs: for j in i.contains: dic[j.lower()]=i.file_name return dic def get_depends(fob=[],m2f=[]): deps={} for i in fob: tmp=[] for j in i.uses: try: tmp.append(m2f[j.lower()]) except: print("\033[031mWarning:\033[039m module \033[032m"+j+"\033[039m used in "+i.file_name+" not defined in any files. Skipping...") deps[i.file_name]=tmp return deps class file_obj: def __init__(self): self.file_name=None self.uses=None self.contains=None self.depends_on=None #Script if __name__ == "__main__": import argparse # Add command line arguments parser = argparse.ArgumentParser(description='Generate Fortran dependencies') parser.add_argument('-f','--files',nargs='+',help='Files to process') parser.add_argument('-D',nargs='+',action='append',metavar='NAME=DESCRIPTION', help="""The macro NAME is replaced by DEFINITION in 'use' statements""") parser.add_argument('-b','--build',nargs=1,help='Build Directory (prepended to all files in output', default='') parser.add_argument('-o','--output',nargs=1,help='Output file') parser.add_argument('-v','--verbose',action='store_true',help='explain what is done') parser.add_argument('-w','--overwrite',action='store_true',help='Overwrite output file without warning') parser.add_argument('-c','--compile-string',nargs=1,help='String to compile with') # Parse the command line arguments args = parser.parse_args() # Assemble a dictionary out of the macro definitions macros = {} if args.D: for arg in args.D: for var in arg: temp = var.split('=') macros[temp[0]] = temp[1] output = args.output[0] if args.output else None build = args.build[0] if args.build else '' compile_string = args.compile_string[0] if args.compile_string else '' run(files=args.files, verbose=args.verbose, overwrite=args.overwrite, macros=macros, output=output, build=build, compile_string=compile_string)
the-stack_0_6100
from django import forms def should_be_empty(value): if value: raise forms.ValidationError('Field is not empty') class ContactForm(forms.Form): name = forms.CharField(max_length=80, widget=forms.TextInput( attrs={'placeholder': 'Your Name', 'class': 'form-control'})) email = forms.EmailField(widget=forms.EmailInput( attrs={'placeholder': 'Your Email', 'class': 'form-control'})) subject = forms.CharField(max_length=80, widget=forms.TextInput( attrs={'placeholder': 'Your Subject', 'class': 'form-control'})) message = forms.CharField(widget=forms.Textarea( attrs={'placeholder': 'Your Message', 'class': 'form-control'})) forcefield = forms.CharField( required=False, widget=forms.HiddenInput, label='Leave empty', validators=[should_be_empty])
the-stack_0_6101
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../python')) # -- Project information ----------------------------------------------------- project = u'maya_mock' copyright = u'2019, Renaud Lessard Larouche' author = u'Renaud Lessard Larouche' # The short X.Y version version = u'' # The full version, including alpha/beta/rc tags release = u'' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'maya_mockdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'maya_mock.tex', u'maya\\_mock Documentation', u'Renaud Lessard Larouche', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'maya_mock', u'maya_mock Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'maya_mock', u'maya_mock Documentation', author, 'maya_mock', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Extension configuration -------------------------------------------------