blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
โ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
โ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
583fb188d45151a3bf34c90fb00eeaa9ca9b8e95 | fffb732290af97687ea3221ce4a6ce4d95640aff | /courses/w04_py/source/pygame_test/first.py | d9f1e4c556e714fd6d0a6612e0ee319f40a2b401 | [] | no_license | NamWoo/self_driving_car | 851de73ae909639e03756eea4d49ab663447fc19 | cd5c1142c9e543e607ca9dc258f689de6879d207 | refs/heads/master | 2021-07-24T19:51:54.459485 | 2021-07-06T13:58:19 | 2021-07-06T13:58:19 | 186,267,543 | 9 | 7 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | print("hello python")
| [
"[email protected]"
] | |
e02a62dfeb68a1076db30d166b57facbc45c7901 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /hpJsoWBBHWKZ9NcAi_22.py | e06399915f4bab0bc7737e23679f04adb2ec0486 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py |
import re
def bird_code(lst):
l = []
for i in lst:
x = re.sub(r'\-',' ',i.upper()).split()
if len(x) == 1:
l.append(x[0][:4])
if len(x) == 2:
l.append(x[0][:2] + x[1][:2])
if len(x) == 3:
l.append(x[0][0] + x[1][0] + x[2][:2])
if len(x) == 4:
l.append(x[0][0] + x[1][0] + x[2][0] + x[3][0])
return l
| [
"[email protected]"
] | |
a46f19dfaa6990720d7398e822caaf69f0d8be00 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/vns/rtmconnatt.py | 28e7b5b6f4be518f824e5a5438863d03a68a0e9f | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 4,795 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtMConnAtt(Mo):
"""
A target relation to a connector between logical functions. Note that this relation is an internal object.
"""
meta = TargetRelationMeta("cobra.model.vns.RtMConnAtt", "cobra.model.vns.AbsFuncConn")
meta.moClassName = "vnsRtMConnAtt"
meta.rnFormat = "rtMConnAtt-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Function Connector"
meta.writeAccessMask = 0x100000000001
meta.readAccessMask = 0x2000100000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.vns.MConn")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtMConnAtt-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 13555, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4686
prop.defaultValueStr = "vnsAbsFuncConn"
prop._addConstant("unspecified", "unspecified", 0)
prop._addConstant("vnsAbsFuncConn", None, 4686)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 13554, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("MDevToGraphInst", "Graph Instances", "cobra.model.vns.GraphInst"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
608f3de73ace860822927eeb12a62de8a149c2d4 | e2e34d01afc5b6bc6923a721ef92e8ffa8884f86 | /tests/endtoend/http_functions/common_libs_functions/numpy_func/__init__.py | 57d5d08e2d852b5b507e92f2ad34a1c8de1104b0 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-functions-python-worker | 094340eeb0c4728e3202749027f01ab75e908bd8 | d4bdf7edc544b6c15e541930f890da790b180ebd | refs/heads/dev | 2023-08-22T22:48:01.645722 | 2023-08-14T14:52:42 | 2023-08-14T14:52:42 | 117,730,503 | 329 | 122 | MIT | 2023-09-01T16:54:58 | 2018-01-16T19:23:54 | Python | UTF-8 | Python | false | false | 384 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
import azure.functions as func
import numpy as np
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
res = "array: {}".format(np.array([1, 2], dtype=complex))
return func.HttpResponse(res)
| [
"[email protected]"
] | |
73e47e7cfa41cb0ff62204dacbecb1028a5f1b9b | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/2/matrix_squaring_20200421214800.py | e15827330f703c4b569ca048f465a724d1b16375 | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,321 | py | # -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
# Author: Juan Esteban Aristizabal-Zuluaga
# date: 20200414
def rho_free(x,xp,beta):
"""Uso: devuelve elemento de matriz dsnsidad para el caso de una partรญcula libre en un toro infinito."""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial armรณnico para una posiciรณn x dada"""
return 0.5*x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anarmรณnico para una posiciรณn x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teรณrica cuรกntica de encontrar al oscilador armรณnico
(inmerso en un baรฑo tรฉrmico a temperatura inversa beta) en la posiciรณn x.
Recibe:
x: float -> posiciรณn
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teรณrica cuรกntica en posiciรณn x para temperatura inversa beta.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def Z_QHO(beta):
"""Uso: devuelve valor de funciรณn de particiรณn para el QHO unidimensional"""
return 0.5/np.sinh(beta/2)
def E_QHO_avg_theo(beta):
"""Uso: devuelve valor de energรญa interna para el QHO unidimensional"""
return 0.5/np.tanh(0.5*beta)
def rho_trotter(x_max = 5., nx = 101, beta=1, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximaciรณn de Trotter para altas temperaturas
y bajo influencia del potencial "potential".
Recibe:
x_max: float -> los valores de x estarรกn en el intervalo (-x_max,x_max).
nx: int -> nรบmero de valores de x considerados (igualmente espaciados).
beta: float -> inverso de temperatura en unidades reducidas.
potential: func -> potencial de interacciรณn. Debe ser funciรณn de x.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximaciรณn de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que estรก evaluada rho.
dx: float -> separaciรณn entre valores contiguos de grid_x
"""
nx = int(nx)
# Si nx es par lo cambiamos al impar mรกs cercano para incluir al 0 en valores de x
if nx%2 == 0:
nx = nx + 1
# Valor de la discretizaciรณn de posiciones segรบn x_max y nx dados como input
dx = 2 * x_max/(nx-1)
# Lista de valores de x teniendo en cuenta discretizaciรณn y x_max
grid_x = [i*dx for i in range(-int((nx-1)/2),int((nx-1)/2 + 1))]
# Construcciรณn de matriz densidad dada por aproximaciรณn de Trotter
rho = np.array([ [ rho_free(x , xp, beta) * np.exp(-0.5*beta*(potential(x)+potential(xp))) for x in grid_x] for xp in grid_x])
return rho, grid_x, dx
def density_matrix_squaring(rho, grid_x, N_iter = 1, beta_ini = 1, print_steps=True):
"""
Uso: devuelve matriz densidad luego de aplicarle algoritmo matrix squaring N_iter veces.
En la primera iteraciรณn se usa matriz de densidad dada por el input rho (a
temperatura inversa beta_ini); en las siguientes iteraciones se usa matriz densidad
generada por la iteraciรณn inmediatamente anterior. El sistema asociado a la matriz
densidad obtenida (al final de aplicar el algoritmo) estรก a temperatura inversa
beta_fin = beta_ini * 2**(N_iter).
Recibe:
rho: numpy array, shape=(nx,nx) -> matriz densidad discretizada en valores dados
por x_grid.
grid_x: numpy array, shape=(nx,) -> valores de x en los que estรก evaluada rho.
N_iter: int -> nรบmero de iteraciones del algoritmo.
beta_ini: float -> valor de inverso de temperatura asociado a la
matriz densidad rho dada como input.
print_steps: bool -> decide si muestra valores de beta en cada
iteraciรณn.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura inversa
igual a beta_fin. Por la definiciรณn que tomamos
de rho, รฉsta es equivalente a la funciรณn
particiรณn a dicha temperatura.
beta_fin: float -> temperatura inversa del sistema asociado a rho.
"""
# Valor de discretixaciรณn de las posiciones
dx = grid_x[1] - grid_x[0]
# Cรกlculo del valor de beta_fin segรบn valores beta_ini y N_iter dados como input
beta_fin = beta_ini * 2 ** N_iter
# Imprime infromaciรณn relevante
if print_steps:
print('\nbeta_ini = %.3f'%beta_ini,
'\n----------------------------------------------------------------')
# Itera algoritmo matrix squaring
for i in range(N_iter):
rho = dx * np.dot(rho,rho)
# Imprime informaciรณn relevante
if print_steps:
print(u'Iteraciรณn %d) 2^%d * beta_ini --> 2^%d * beta_ini'%(i, i, i+1))
if print_steps:
print('----------------------------------------------------------------\n' +
u'beta_fin = %.3f'%beta_fin)
# Calcula traza de rho
trace_rho = np.trace(rho)*dx
return rho, trace_rho, beta_fin
def save_csv(data, data_headers=None, file_name='file.csv', relevant_info=None, print_data=True):
"""
Uso: data debe contener listas que serรกn las columnas de un archivo CSV que se guardarรก con
nombre file_name. relevant_info agrega comentarios en primeras lรญneas del archivo.
Recibe:
data: array of arrays, shape=(nx,ny) -> cada columna es una columna del archivo.
data_headers: numpy array, shape=(nx,) -> nombres de las columnas
file_name: str -> nombre del archivo en el que se guardarรกn datos.
relevant_info: list of str -> informaciรณn que se agrega como comentario en
primeras lรญneas. Cada elemento de esta lista
se agrega como una nueva lรญnea.
print_data: bool -> decide si imprime datos guardados, en pantalla.
Devuelve:
data_pdDF: pd.DataFrame -> archivo con datos formato "pandas data frame".
guarda archivo con datos e inforamaciรณn relevante en primera lรญnea.
"""
# Almacena datos de probabilifad en diccionario: grid_x para posiciones y x_weights para
# valores de densidad de probabilidad.
data = np.array(data)
number_of_columns = len(data.transpose())
if file_name=='file.csv':
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
file_name = script_dir + '/' + file_name
if data_headers is None:
data_pdDF = pd.DataFrame(data)
print( 'Nota: no se especificaron headers.\n'+
'Los headers usados en el archivo serรกn los nรบmeros 0, 1, 2,...')
elif len(data_headers)!=number_of_columns:
data_pdDF = pd.DataFrame(data)
print( 'Nota: no hay suficientes headers en data_headers para funciรณn save_csv().\n'+
'Los headers usados en el archivo serรกn los nรบmeros 0, 1, 2,...')
else:
data_pdDF = pd.DataFrame(data,columns=data_headers)
# Crea archivo CSV y agrega comentarios relevantes dados como input
if relevant_info is not None:
with open(file_name,mode='w') as file_csv:
for info in list(relevant_info):
file_csv.write('# '+info+'\n')
file_csv.close()
# Usamos pandas para escribir en archivo en formato csv.
with open(file_name,mode='a') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
else:
with open(file_name,mode='w') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
# Imprime datos en pantalla.
if print_data==True:
print(data_pdDF)
return data_pdDF
def run_pi_x_sq_trotter(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
potential_string = 'harmonic_potential', print_steps=True,
save_data=True, file_name=None, relevant_info=None,
plot=True, save_plot=True, show_plot=True):
"""
Uso: corre algoritmo matrix squaring iterativamente (N_iter veces). En la primera
iteraciรณn se usa una matriz densidad en aproximaciรณn de Trotter a temperatura
inversa beta_ini = beta_fin * 2**(-N_iter) para potencial dado por potential;
en las siguientes iteraciones se usa matriz densidad generada por la iteraciรณn
inmediatamente anterior. Ademรกs รฉsta funciรณn guarda datos de pi(x;beta) vs. x
en archivo de texto y grafica pi(x;beta) comparรกndolo con teorรญa para el oscilador
armรณnico cuรกntico.
Recibe:
x_max: float -> los valores de x estarรกn en el intervalo (-x_max,x_max).
nx: int -> nรบmero de valores de x considerados.
N_iter: int -> nรบmero de iteraciones del algoritmo matrix squaring.
beta_ini: float -> valor de inverso de temperatura que queremos tener al final de
aplicar el algoritmo matrix squaring iterativamente.
potential: func -> potencial de interacciรณn usado en aproximaciรณn de trotter. Debe
ser funciรณn de x.
potential_string: str -> nombre del potencial (con รฉste nombramos los archivos que
se generan).
print_steps: bool -> decide si imprime los pasos del algoritmo matrix squaring.
save_data: bool -> decide si guarda los datos en archivo .csv.
plot: bool -> decide si grafica.
save_plot: bool -> decide si guarda la figura.
show_plot: bool -> decide si muestra la figura en pantalla.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura inversa
igual a beta_fin. Por la definiciรณn que tomamos
de "rho", รฉsta es equivalente a la funciรณn
particiรณn en dicha temperatura.
grid_x: numpy array, shape=(nx,) -> valores de x en los que estรก evaluada rho.
"""
# Cรกlculo del valor de beta_ini segรบn valores beta_fin y N_iter dados como input
beta_ini = beta_fin * 2**(-N_iter)
# Cรกlculo de rho con aproximaciรณn de Trotter
rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
grid_x = np.array(grid_x)
# Aproximaciรณn de rho con matrix squaring iterado N_iter veces.
rho, trace_rho, beta_fin_2 = density_matrix_squaring( rho, grid_x, N_iter,
beta_ini, print_steps )
print( '---------------------------------------------------------' +
'---------------------------------------------------------\n'
u'Matrix squaring: beta_ini = %.3f --> beta_fin = %.3f'%(beta_ini, beta_fin_2) +
u' N_iter = %d Z(beta_fin) = Tr(rho(beta_fin)) = %.3E \n'%(N_iter,trace_rho) +
'---------------------------------------------------------' +
'---------------------------------------------------------')
# Normalizaciรณn de rho a 1 y cรกlculo de densidades de probabilidad para valores en grid_x.
rho_normalized = np.copy(rho)/trace_rho
x_weights = np.diag(rho_normalized)
# Guarda datos en archivo .csv.
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
if save_data==True:
# Nombre del archivo .csv en el que guardamos valores de pi(x;beta_fin).
if file_name is None:
csv_file_name = script_dir+u'/pi_x-ms-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.csv'\
%(potential_string,beta_fin,x_max,nx,N_iter)
else:
csv_file_name = script_dir + '/'+ file_name
# Informaciรณn relevante para agregar como comentario al archivo csv.
if relevant_info is None:
relevant_info = [ 'pi(x;beta_fin) computed using matrix squaring algorithm and' + \
' Trotter approximation. Parameters:',
u'%s x_max = %.3f nx = %d '%(potential_string,x_max,nx) + \
u'N_iter = %d beta_ini = %.3f '%(N_iter,beta_ini,) + \
u'beta_fin = %.3f'%beta_fin ]
# Guardamos valores de pi(x;beta_fin) en archivo csv.
pi_x_data = [grid_x.copy(),x_weights.copy()]
pi_x_data_headers = ['position_x','prob_density']
pi_x_data = save_csv(pi_x_data,pi_x_data_headers,csv_file_name,relevant_info,print_data=0)
# Grรกfica y comparaciรณn con teorรญa
if plot == True:
plt.figure(figsize=(8,5))
plt.plot(grid_x, x_weights, label = 'Matrix squaring +\nfรณrmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'%(N_iter,dx))
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teรณrico QHO')
plt.xlabel(u'x')
plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
plt.tight_layout()
if save_plot==True:
if file_name is None:
plot_file_name = script_dir+u'/pi_x-ms-plot-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.eps'%(potential_string,beta_fin,x_max,nx,N_iter)
else:
plot_file_name = script_dir+u'/pi_x-ms-plot-'+file_name+'.eps'
plt.savefig(plot_file_name)
if show_plot==True:
plt.show()
plt.close()
return rho, trace_rho, grid_x
def Z_several_values( temp_min=1./10, temp_max=1/2., N_temp=10, save_Z_csv=True,
Z_file_name = None, relevant_info_Z = None, print_Z_data = True,
x_max=7., nx=201, N_iter=7, potential = harmonic_potential,
potential_string = 'harmonic_potential', print_steps=False,
save_pi_x_data=False, pi_x_file_name=None, relevant_info_pi_x=None,
plot=False, save_plot=False, show_plot=False ):
"""
"""
beta_max = 1./temp_min
beta_min = 1./temp_max
N_temp = int(N_temp)
beta_array = np.linspace(beta_max,beta_min,N_temp)
Z = []
for beta_fin in beta_array:
rho, trace_rho, grid_x = \
run_pi_x_sq_trotter( x_max, nx, N_iter, beta_fin, potential, potential_string,
print_steps, save_pi_x_data, file_name, relevant_info, plot,
save_plot, show_plot)
Z.append(trace_rho)
Z_data = np.array([beta_array.copy(),1./beta_array.copy(),Z.copy()],dtype=float)
if save_Z_csv == True:
if Z_file_name is None:
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
Z_file_name = 'Z-ms-%s-beta_max_%.3f-'%(potential_string,1./temp_min) +\
'beta_min_%.3f-N_temp_%d-x_max_%.3f-'%(1./temp_max,N_temp,x_max) +\
'nx_%d-N_iter_%d.csv'%(nx, N_iter)
Z_file_name = script_dir + '/' + Z_file_name
if relevant_info_Z is None:
relevant_info_Z = [ 'Partition function at several temperatures',
'%s beta_max = %.3f '%(potential_string,1./temp_min) + \
'beta_min = %.3f N_temp = %d '%(1./temp_max,N_temp) + \
'x_max = %.3f nx = %d N_iter = %d'%(x_max,nx, N_iter) ]
Z_data_headers = ['beta','temperature','Z']
Z_data = save_csv( Z_data.transpose(), Z_data_headers, Z_file_name, relevant_info_Z,
print_data = False )
if print_Z_data == True:
print(Z_data)
return Z_data
def average_energy( read_Z_data=True, generate_Z_data=False, Z_file_name = None,
plot_energy=True, save_plot_E=True, show_plot_E=True,
E_plot_name=None,
temp_min=1./10, temp_max=1/2., N_temp=10, save_Z_csv=True,
relevant_info_Z = None, print_Z_data = True,
x_max=7., nx=201, N_iter=7, potential = harmonic_potential,
potential_string = 'harmonic_potential', print_steps=False,
save_pi_x_data=False, pi_x_file_name=None, relevant_info_pi_x=None,
plot_pi_x=False, save_plot_pi_x=False, show_plot_pi_x=False ):
"""
"""
if read_Z_data:
Z_file_read = pd.read_csv(Z_file_name, index_col=0, comment='#')
elif generate_Z_data:
t_0 = time()
Z_data = Z_several_values( temp_min, temp_max, N_temp, save_Z_csv, Z_file_name,
relevant_info_Z, print_Z_data, x_max, nx, N_iter, potential,
potential_string,print_steps, save_pi_x_data, pi_x_file_name,
relevant_info_pi_x, plot_pi_x,save_plot_pi_x, show_plot_pi_x)
t_1 = time()
print( '--------------------------------------------------------------------------\n' +
'%d values of Z(beta) generated --> %.3f sec.'%(N_temp,t_1-t_0))
Z_file_read = Z_data
else:
print( 'Elegir si se generan o se leen los datos para la funciรณn particiรณn, Z.\n' +
'Estas opciones son mutuamente exluyentes. Si se seleccionan las dos, el' +
'algoritmo escoge leer los datos.')
# READ DATA IS OK
beta_read = Z_file_read['beta']
temp_read = Z_file_read['temperature']
Z_read = Z_file_read['Z']
E_avg = np.gradient(-np.log(Z_read),beta_read)
if plot_energy:
plt.figure(figsize=(8,5))
plt.plot(temp_read,E_avg,label=u'$\langle E \\rangle$ via path integral\nnaive sampling')
plt.plot(temp_read,E_QHO_avg_theo(beta_read),label=u'$\langle E \\rangle$ teรณrico')
plt.legend(loc='best')
plt.xlabel(u'$T$')
plt.ylabel(u'$\langle E \\rangle$')
if save_plot_E:
if E_plot_name is None:
script_dir = os.path.dirname(os.path.abspath(__file__))
E_plot_name='E-ms-plot-%s-beta_max_%.3f-'%(potential_string,1./temp_min) +\
'beta_min_%.3f-N_temp_%d-x_max_%.3f-'%(1./temp_max,N_temp,x_max) +\
'nx_%d-N_iter_%d.eps'%(nx, N_iter)
E_plot_name = script_dir + '/' + E_plot_name
plt.savefig(E_plot_name)
if show_plot_E:
plt.show()
plt.close()
return E_avg, beta_read.to_numpy()
def calc_error(x,xp,dx):
"""
Uso: calcula error ponderado por el nรบmero de
"""
x, xp = np.array(x), np.array(xp)
N = len( x )
if N != len(xp):
raise Exception( 'x and xp must have same lenght.' )
else:
return np.sum(np.abs(x-xp))*dx
def optimization( beta_fin=4, x_max=5, potential=harmonic_potential,
potential_string='harmonic_potential',
nx_min=50, nx_max=1000, nx_sampling=10, N_iter_min=1, N_iter_max=20,
save_opt_data = True, plot = True, save_plot = True):
"""
"""
N_iter_min = int(N_iter_min)
N_iter_max = int(N_iter_max)
nx_min = int(nx_min)
nx_max = int(nx_max)
if nx_min%2==1:
nx_min -= 1
if nx_max%2==0:
nx_max += 1
nx_values = range(nx_max,nx_min,-1*nx_sampling)
N_iter_values = range(N_iter_max,N_iter_min-1,-1)
dx_grid = [2*x_max/(nx-1) for nx in nx_values]
beta_ini_grid = [beta_fin * 2**(-N_iter) for N_iter in N_iter_values]
error = []
for N_iter in N_iter_values:
row = []
for nx in nx_values:
rho,trace_rho,grid_x = \
run_pi_x_sq_trotter( x_max, nx, N_iter, beta_fin, potential, potential_string,
False, False, None, None, False, False, False )
grid_x = np.array(grid_x)
dx = grid_x[1]-grid_x[0]
rho_normalized = np.copy(rho)/trace_rho
pi_x = np.diag(rho_normalized)
theoretical_pi_x = QHO_canonical_ensemble(grid_x,beta_fin)
error_comp_theo = calc_error(pi_x,theoretical_pi_x,dx)
row.append(error_comp_theo)
error.append(row)
error = np.array(error)
print(error)
try:
error = np.where(np.isinf(error),-np.Inf,error)
nan_value = 1.1*max(error)
except:
nan_value = 0
error = np.nan_to_num(error, nan = nan_value, posinf=nan_value, neginf = nan_value)
DX, BETA_INI = np.meshgrid(dx_grid, beta_ini_grid)
if plot:
fig, ax = plt.subplots(1, 1)
cp = plt.imshow(DX,BETA_INI,error)
plt.colorbar(cp)
ax.set_ylabel(u'$\\beta_{ini}$')
ax.set_xlabel('$dx$')
plt.tight_layout()
plt.show()
plt.close()
return error, dx_grid, beta_ini_grid
beta_fin = 4
x_max = 5
potential, potential_string = harmonic_potential, 'harmonic_potential'
nx_min = 200
nx_max = 301
nx_sampling=10
N_iter_min = 1
N_iter_max = 20
t_0 = time()
error, dx_grid, beta_ini_grid = optimization(beta_fin,x_max,potential,potential_string,nx_min,nx_max,nx_sampling,N_iter_min,N_iter_max)
t_1 = time()
print( '----------------------------------------------------------------------------------\n' +
'Optimization: beta_fin=%.3f, x_max=%.3f, potential=%s\n \
nx_min=%d, nx_max=%d, N_iter_min=%d, N_iter_max=%d \ncomputation time = %.3f sec.\n' \
%(beta_fin,x_max,potential_string,nx_min,nx_max,N_iter_min,N_iter_max,t_1-t_0) +
'----------------------------------------------------------------------------------')
print(error)
#################################################################################################
# PANEL DE CONTROL
#
# Decide si corre algoritmo matrix squaring
run_ms_algorithm = False
# Decide si corre algoritmo para cรกlculo de energรญa interna
run_avg_energy = False
# Decide si corre algoritmo para optimizaciรณn de dx y beta_ini
run_optimizarion = False
#
#
#################################################################################################
#################################################################################################
# PARรMETROS GENERALES PARA LAS FIGURAS
#
# Usar latex en texto de figuras y agrandar tamaรฑo de fuente
plt.rc('text', usetex=True)
plt.rcParams.update({'font.size':15,'text.latex.unicode':True})
# Obtenemos path para guardar archivos en el mismo directorio donde se ubica el script
script_dir = os.path.dirname(os.path.abspath(__file__))
#
#################################################################################################
#################################################################################################
# CORRE ALGORITMO MATRIX SQUARING
#
# Parรกmetros fรญsicos del algoritmo
x_max = 5.
nx = 201
N_iter = 7
beta_fin = 4
potential, potential_string = harmonic_potential, 'harmonic_potential'
#
# Parรกmetros tรฉcnicos
print_steps = False
save_data = False
file_name = None
relevant_info = None
plot = True
save_plot = False
show_plot = True
if run_ms_algorithm:
rho, trace_rho, grid_x = \
run_pi_x_sq_trotter( x_max, nx, N_iter, beta_fin, potential, potential_string,
print_steps, save_data, file_name, relevant_info, plot,
save_plot, show_plot)
#
#
#################################################################################################
#################################################################################################
# CORRE ALGORITMO PARA CรLCULO DE ENERGรA INTERNA
#
# Parรกmetros tรฉcnicos funciรณn particiรณn y cรกlculo de energรญa
read_Z_data = False
generate_Z_data = True
Z_file_name = None
plot_energy = True
save_plot_E = True
show_plot_E = True
E_plot_name = None #script_dir + 'E.eps'
#
# Parรกmetros fรญsicos para calcular Z y <E>
temp_min = 1./10
temp_max = 1./2
N_temp = 10
potential, potential_string = harmonic_potential, 'harmonic_potential'
#
# Mรกs parรกmetros tรฉcnicos
save_Z_csv = True
relevant_info_Z = None
print_Z_data = False
x_max = 7.
nx = 201
N_iter = 7
print_steps = False
save_pi_x_data = False
pi_x_file_name = None
relevant_info_pi_x = None
plot_pi_x = False
save_plot_pi_x = False
show_plot_pi_x = False
#
if run_avg_energy:
average_energy( read_Z_data, generate_Z_data, Z_file_name, plot_energy, save_plot_E,
show_plot_E, E_plot_name,
temp_min, temp_max, N_temp, save_Z_csv, relevant_info_Z, print_Z_data,
x_max, nx, N_iter, potential, potential_string, print_steps, save_pi_x_data,
pi_x_file_name, relevant_info_pi_x,plot_pi_x, save_plot_pi_x, show_plot_pi_x)
#
#
################################################################################################# | [
"[email protected]"
] | |
6d66f8b0a71831e4b0a87e5373dab443525903ea | 88c4d5b462998a9c0411a0243ab95ee05ffee60a | /tests/type/test_validation.py | e9f5d5e69f375202cb88188bab1585d0833f7d87 | [
"MIT"
] | permissive | graphql-python/graphql-core | 606f3f3d479d576a4bdcd7d7995c0fddc486282f | 0c93b8452eed38d4f800c7e71cf6f3f3758cd1c6 | refs/heads/main | 2023-09-04T09:22:45.162575 | 2023-06-09T22:13:10 | 2023-06-09T22:13:10 | 143,207,933 | 259 | 101 | MIT | 2023-06-09T22:13:11 | 2018-08-01T20:57:07 | Python | UTF-8 | Python | false | false | 73,423 | py | from operator import attrgetter
from typing import List, Union
from pytest import mark, raises
from graphql.language import DirectiveLocation, parse
from graphql.pyutils import inspect
from graphql.type import (
GraphQLArgument,
GraphQLDirective,
GraphQLEnumType,
GraphQLField,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInputType,
GraphQLInterfaceType,
GraphQLList,
GraphQLNamedType,
GraphQLNonNull,
GraphQLObjectType,
GraphQLOutputType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType,
assert_directive,
assert_enum_type,
assert_input_object_type,
assert_interface_type,
assert_object_type,
assert_scalar_type,
assert_union_type,
assert_valid_schema,
validate_schema,
)
from graphql.utilities import build_schema, extend_schema
from ..utils import dedent
SomeSchema = build_schema(
"""
scalar SomeScalar
interface SomeInterface { f: SomeObject }
type SomeObject implements SomeInterface { f: SomeObject }
union SomeUnion = SomeObject
enum SomeEnum { ONLY }
input SomeInputObject { val: String = "hello" }
directive @SomeDirective on QUERY
"""
)
get_type = SomeSchema.get_type
SomeScalarType = assert_scalar_type(get_type("SomeScalar"))
SomeInterfaceType = assert_interface_type(get_type("SomeInterface"))
SomeObjectType = assert_object_type(get_type("SomeObject"))
SomeUnionType = assert_union_type(get_type("SomeUnion"))
SomeEnumType = assert_enum_type(get_type("SomeEnum"))
SomeInputObjectType = assert_input_object_type(get_type("SomeInputObject"))
SomeDirective = assert_directive(SomeSchema.get_directive("SomeDirective"))
def with_modifiers(
type_: GraphQLNamedType,
) -> List[Union[GraphQLNamedType, GraphQLNonNull, GraphQLList]]:
return [
type_,
GraphQLList(type_),
GraphQLNonNull(type_),
GraphQLNonNull(GraphQLList(type_)),
]
output_types = [
*with_modifiers(GraphQLString),
*with_modifiers(SomeScalarType),
*with_modifiers(SomeEnumType),
*with_modifiers(SomeObjectType),
*with_modifiers(SomeUnionType),
*with_modifiers(SomeInterfaceType),
]
not_output_types = with_modifiers(SomeInputObjectType)
input_types = [
*with_modifiers(GraphQLString),
*with_modifiers(SomeScalarType),
*with_modifiers(SomeEnumType),
*with_modifiers(SomeInputObjectType),
]
not_input_types = [
*with_modifiers(SomeObjectType),
*with_modifiers(SomeUnionType),
*with_modifiers(SomeInterfaceType),
]
not_graphql_types = [
type("IntType", (int,), {"name": "IntType"}),
type("FloatType", (float,), {"name": "FloatType"}),
type("StringType", (str,), {"name": "StringType"}),
]
get_name = attrgetter("__class__.__name__")
def schema_with_field_type(type_):
return GraphQLSchema(
query=GraphQLObjectType(name="Query", fields={"f": GraphQLField(type_)})
)
def describe_type_system_a_schema_must_have_object_root_types():
def accepts_a_schema_whose_query_type_is_an_object_type():
schema = build_schema(
"""
type Query {
test: String
}
"""
)
assert validate_schema(schema) == []
schema_with_def = build_schema(
"""
schema {
query: QueryRoot
}
type QueryRoot {
test: String
}
"""
)
assert validate_schema(schema_with_def) == []
def accepts_a_schema_whose_query_and_mutation_types_are_object_types():
schema = build_schema(
"""
type Query {
test: String
}
type Mutation {
test: String
}
"""
)
assert validate_schema(schema) == []
schema_with_def = build_schema(
"""
schema {
query: QueryRoot
mutation: MutationRoot
}
type QueryRoot {
test: String
}
type MutationRoot {
test: String
}
"""
)
assert validate_schema(schema_with_def) == []
def accepts_a_schema_whose_query_and_subscription_types_are_object_types():
schema = build_schema(
"""
type Query {
test: String
}
type Subscription {
test: String
}
"""
)
assert validate_schema(schema) == []
schema_with_def = build_schema(
"""
schema {
query: QueryRoot
subscription: SubscriptionRoot
}
type QueryRoot {
test: String
}
type SubscriptionRoot {
test: String
}
"""
)
assert validate_schema(schema_with_def) == []
def rejects_a_schema_without_a_query_type():
schema = build_schema(
"""
type Mutation {
test: String
}
"""
)
assert validate_schema(schema) == [
{"message": "Query root type must be provided.", "locations": None}
]
schema_with_def = build_schema(
"""
schema {
mutation: MutationRoot
}
type MutationRoot {
test: String
}
"""
)
assert validate_schema(schema_with_def) == [
{"message": "Query root type must be provided.", "locations": [(2, 13)]}
]
def rejects_a_schema_whose_query_root_type_is_not_an_object_type():
schema = build_schema(
"""
input Query {
test: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Query root type must be Object type,"
" it cannot be Query.",
"locations": [(2, 13)],
}
]
schema_with_def = build_schema(
"""
schema {
query: SomeInputObject
}
input SomeInputObject {
test: String
}
"""
)
assert validate_schema(schema_with_def) == [
{
"message": "Query root type must be Object type,"
" it cannot be SomeInputObject.",
"locations": [(3, 22)],
}
]
def rejects_a_schema_whose_mutation_type_is_an_input_type():
schema = build_schema(
"""
type Query {
field: String
}
input Mutation {
test: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Mutation root type must be Object type if provided,"
" it cannot be Mutation.",
"locations": [(6, 13)],
}
]
schema_with_def = build_schema(
"""
schema {
query: Query
mutation: SomeInputObject
}
type Query {
field: String
}
input SomeInputObject {
test: String
}
"""
)
assert validate_schema(schema_with_def) == [
{
"message": "Mutation root type must be Object type if provided,"
" it cannot be SomeInputObject.",
"locations": [(4, 25)],
}
]
def rejects_a_schema_whose_subscription_type_is_an_input_type():
schema = build_schema(
"""
type Query {
field: String
}
input Subscription {
test: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Subscription root type must be Object type if"
" provided, it cannot be Subscription.",
"locations": [(6, 13)],
}
]
schema_with_def = build_schema(
"""
schema {
query: Query
subscription: SomeInputObject
}
type Query {
field: String
}
input SomeInputObject {
test: String
}
"""
)
assert validate_schema(schema_with_def) == [
{
"message": "Subscription root type must be Object type if"
" provided, it cannot be SomeInputObject.",
"locations": [(4, 29)],
}
]
def rejects_a_schema_extended_with_invalid_root_types():
schema = build_schema(
"""
input SomeInputObject {
test: String
}
scalar SomeScalar
enum SomeEnum {
ENUM_VALUE
}
"""
)
schema = extend_schema(
schema,
parse(
"""
extend schema {
query: SomeInputObject
}
"""
),
)
schema = extend_schema(
schema,
parse(
"""
extend schema {
mutation: SomeScalar
}
"""
),
)
schema = extend_schema(
schema,
parse(
"""
extend schema {
subscription: SomeEnum
}
"""
),
)
assert validate_schema(schema) == [
{
"message": "Query root type must be Object type,"
" it cannot be SomeInputObject.",
"locations": [(3, 26)],
},
{
"message": "Mutation root type must be Object type"
" if provided, it cannot be SomeScalar.",
"locations": [(3, 29)],
},
{
"message": "Subscription root type must be Object type"
" if provided, it cannot be SomeEnum.",
"locations": [(3, 33)],
},
]
def rejects_a_schema_whose_types_are_incorrectly_type():
# invalid schema cannot be built with Python
# construct invalid schema manually
schema = GraphQLSchema(SomeObjectType)
schema.type_map = {
"SomeType": {"name": "SomeType"}, # type: ignore
"SomeDirective": SomeDirective, # type: ignore
}
assert validate_schema(schema) == [
{"message": "Expected GraphQL named type but got: {'name': 'SomeType'}."},
{"message": "Expected GraphQL named type but got: @SomeDirective."},
]
def rejects_a_schema_whose_directives_are_incorrectly_typed():
schema = GraphQLSchema(
SomeObjectType,
directives=[None, "SomeDirective", SomeScalarType], # type: ignore
)
assert validate_schema(schema) == [
{"message": "Expected directive but got: None."},
{"message": "Expected directive but got: 'SomeDirective'."},
{"message": "Expected directive but got: SomeScalar."},
]
def describe_type_system_root_types_must_all_be_different_if_provided():
def accepts_a_schema_with_different_root_types():
schema = build_schema(
"""
type SomeObject1 {
field: String
}
type SomeObject2 {
field: String
}
type SomeObject3 {
field: String
}
schema {
query: SomeObject1
mutation: SomeObject2
subscription: SomeObject3
}
"""
)
assert validate_schema(schema) == []
def rejects_a_schema_where_the_same_type_is_used_for_multiple_root_types():
schema = build_schema(
"""
type SomeObject {
field: String
}
type UniqueObject {
field: String
}
schema {
query: SomeObject
mutation: UniqueObject
subscription: SomeObject
}
"""
)
assert validate_schema(schema) == [
{
"message": "All root types must be different, 'SomeObject' type"
" is used as query and subscription root types.",
"locations": [(11, 22), (13, 29)],
}
]
def rejects_a_schema_where_the_same_type_is_used_for_all_root_types():
schema = build_schema(
"""
type SomeObject {
field: String
}
schema {
query: SomeObject
mutation: SomeObject
subscription: SomeObject
}
"""
)
assert validate_schema(schema) == [
{
"message": "All root types must be different, 'SomeObject' type"
" is used as query, mutation, and subscription root types.",
"locations": [(7, 22), (8, 25), (9, 29)],
}
]
def describe_type_system_objects_must_have_fields():
def accepts_an_object_type_with_fields_object():
schema = build_schema(
"""
type Query {
field: SomeObject
}
type SomeObject {
field: String
}
"""
)
assert validate_schema(schema) == []
def rejects_an_object_type_with_missing_fields():
schema = build_schema(
"""
type Query {
test: IncompleteObject
}
type IncompleteObject
"""
)
assert validate_schema(schema) == [
{
"message": "Type IncompleteObject must define one or more fields.",
"locations": [(6, 13)],
}
]
manual_schema = schema_with_field_type(
GraphQLObjectType("IncompleteObject", {})
)
msg = validate_schema(manual_schema)[0].message
assert msg == "Type IncompleteObject must define one or more fields."
manual_schema_2 = schema_with_field_type(
GraphQLObjectType("IncompleteObject", lambda: {})
)
msg = validate_schema(manual_schema_2)[0].message
assert msg == "Type IncompleteObject must define one or more fields."
def rejects_an_object_type_with_incorrectly_named_fields():
schema = schema_with_field_type(
GraphQLObjectType("SomeObject", {"__badName": GraphQLField(GraphQLString)})
)
msg = validate_schema(schema)[0].message
assert msg == (
"Name '__badName' must not begin with '__',"
" which is reserved by GraphQL introspection."
)
def describe_type_system_field_args_must_be_properly_named():
def accepts_field_args_with_valid_names():
schema = schema_with_field_type(
GraphQLObjectType(
"SomeObject",
{
"goodField": GraphQLField(
GraphQLString, args={"goodArg": GraphQLArgument(GraphQLString)}
)
},
)
)
assert validate_schema(schema) == []
def rejects_field_args_with_invalid_names():
schema = schema_with_field_type(
GraphQLObjectType(
"SomeObject",
{
"badField": GraphQLField(
GraphQLString,
args={"__badName": GraphQLArgument(GraphQLString)},
)
},
)
)
msg = validate_schema(schema)[0].message
assert msg == (
"Name '__badName' must not begin with '__',"
" which is reserved by GraphQL introspection."
)
def describe_type_system_union_types_must_be_valid():
def accepts_a_union_type_with_member_types():
schema = build_schema(
"""
type Query {
test: GoodUnion
}
type TypeA {
field: String
}
type TypeB {
field: String
}
union GoodUnion =
| TypeA
| TypeB
"""
)
assert validate_schema(schema) == []
def rejects_a_union_type_with_empty_types():
schema = build_schema(
"""
type Query {
test: BadUnion
}
union BadUnion
"""
)
schema = extend_schema(
schema,
parse(
"""
directive @test on UNION
extend union BadUnion @test
"""
),
)
assert validate_schema(schema) == [
{
"message": "Union type BadUnion must define one or more member types.",
"locations": [(6, 13), (4, 17)],
}
]
def rejects_a_union_type_with_duplicated_member_type():
schema = build_schema(
"""
type Query {
test: BadUnion
}
type TypeA {
field: String
}
type TypeB {
field: String
}
union BadUnion =
| TypeA
| TypeB
| TypeA
"""
)
assert validate_schema(schema) == [
{
"message": "Union type BadUnion can only include type TypeA once.",
"locations": [(15, 17), (17, 17)],
}
]
schema = extend_schema(schema, parse("extend union BadUnion = TypeB"))
assert validate_schema(schema) == [
{
"message": "Union type BadUnion can only include type TypeA once.",
"locations": [(15, 17), (17, 17)],
},
{
"message": "Union type BadUnion can only include type TypeB once.",
"locations": [(16, 17), (1, 25)],
},
]
def rejects_a_union_type_with_non_object_member_types():
# invalid schema cannot be built with Python
schema = build_schema(
"""
type Query {
test: BadUnion
}
type TypeA {
field: String
}
type TypeB {
field: String
}
union BadUnion =
| TypeA
| String
| TypeB
"""
)
schema = extend_schema(schema, parse("extend union BadUnion = Int"))
assert validate_schema(schema) == [
{
"message": "Union type BadUnion can only include Object types,"
" it cannot include String.",
"locations": [(16, 17)],
},
{
"message": "Union type BadUnion can only include Object types,"
" it cannot include Int.",
"locations": [(1, 25)],
},
]
bad_union_member_types = [
GraphQLString,
GraphQLNonNull(SomeObjectType),
GraphQLList(SomeObjectType),
SomeInterfaceType,
SomeUnionType,
SomeEnumType,
SomeInputObjectType,
]
for member_type in bad_union_member_types:
# invalid union type cannot be built with Python
bad_union = GraphQLUnionType(
"BadUnion", types=[member_type] # type: ignore
)
bad_schema = schema_with_field_type(bad_union)
assert validate_schema(bad_schema) == [
{
"message": "Union type BadUnion can only include Object types,"
+ f" it cannot include {inspect(member_type)}."
}
]
def describe_type_system_input_objects_must_have_fields():
def accepts_an_input_object_type_with_fields():
schema = build_schema(
"""
type Query {
field(arg: SomeInputObject): String
}
input SomeInputObject {
field: String
}
"""
)
assert validate_schema(schema) == []
def rejects_an_input_object_type_with_missing_fields():
schema = build_schema(
"""
type Query {
field(arg: SomeInputObject): String
}
input SomeInputObject
"""
)
schema = extend_schema(
schema,
parse(
"""
directive @test on INPUT_OBJECT
extend input SomeInputObject @test
"""
),
)
assert validate_schema(schema) == [
{
"message": "Input Object type SomeInputObject"
" must define one or more fields.",
"locations": [(6, 13), (4, 17)],
}
]
def accepts_an_input_object_with_breakable_circular_reference():
schema = build_schema(
"""
type Query {
field(arg: SomeInputObject): String
}
input SomeInputObject {
self: SomeInputObject
arrayOfSelf: [SomeInputObject]
nonNullArrayOfSelf: [SomeInputObject]!
nonNullArrayOfNonNullSelf: [SomeInputObject!]!
intermediateSelf: AnotherInputObject
}
input AnotherInputObject {
parent: SomeInputObject
}
"""
)
assert validate_schema(schema) == []
def rejects_an_input_object_with_non_breakable_circular_reference():
schema = build_schema(
"""
type Query {
field(arg: SomeInputObject): String
}
input SomeInputObject {
startLoop: AnotherInputObject!
}
input AnotherInputObject {
nextInLoop: YetAnotherInputObject!
}
input YetAnotherInputObject {
closeLoop: SomeInputObject!
}
"""
)
assert validate_schema(schema) == [
{
"message": "Cannot reference Input Object 'SomeInputObject'"
" within itself through a series of non-null fields:"
" 'startLoop.nextInLoop.closeLoop'.",
"locations": [(7, 15), (11, 15), (15, 15)],
}
]
def rejects_an_input_object_with_multiple_non_breakable_circular_reference():
schema = build_schema(
"""
type Query {
field(arg: SomeInputObject): String
}
input SomeInputObject {
startLoop: AnotherInputObject!
}
input AnotherInputObject {
closeLoop: SomeInputObject!
startSecondLoop: YetAnotherInputObject!
}
input YetAnotherInputObject {
closeSecondLoop: AnotherInputObject!
nonNullSelf: YetAnotherInputObject!
}
"""
)
assert validate_schema(schema) == [
{
"message": "Cannot reference Input Object 'SomeInputObject'"
" within itself through a series of non-null fields:"
" 'startLoop.closeLoop'.",
"locations": [(7, 15), (11, 15)],
},
{
"message": "Cannot reference Input Object 'AnotherInputObject'"
" within itself through a series of non-null fields:"
" 'startSecondLoop.closeSecondLoop'.",
"locations": [(12, 15), (16, 15)],
},
{
"message": "Cannot reference Input Object 'YetAnotherInputObject'"
" within itself through a series of non-null fields:"
" 'nonNullSelf'.",
"locations": [(17, 15)],
},
]
def rejects_an_input_object_type_with_incorrectly_typed_fields():
schema = build_schema(
"""
type Query {
field(arg: SomeInputObject): String
}
type SomeObject {
field: String
}
union SomeUnion = SomeObject
input SomeInputObject {
badObject: SomeObject
badUnion: SomeUnion
goodInputObject: SomeInputObject
}
"""
)
assert validate_schema(schema) == [
{
"message": "The type of SomeInputObject.badObject must be Input Type"
" but got: SomeObject.",
"locations": [(13, 26)],
},
{
"message": "The type of SomeInputObject.badUnion must be Input Type"
" but got: SomeUnion.",
"locations": [(14, 25)],
},
]
def rejects_an_input_object_type_with_required_arguments_that_is_deprecated():
schema = build_schema(
"""
type Query {
field(arg: SomeInputObject): String
}
input SomeInputObject {
badField: String! @deprecated
optionalField: String @deprecated
anotherOptionalField: String! = "" @deprecated
}
"""
)
assert validate_schema(schema) == [
{
"message": "Required input field SomeInputObject.badField"
" cannot be deprecated.",
"locations": [(7, 33), (7, 25)],
}
]
def describe_type_system_enum_types_must_be_well_defined():
def rejects_an_enum_type_without_values():
schema = build_schema(
"""
type Query {
field: SomeEnum
}
enum SomeEnum
"""
)
schema = extend_schema(
schema,
parse(
"""
directive @test on ENUM
extend enum SomeEnum @test
"""
),
)
assert validate_schema(schema) == [
{
"message": "Enum type SomeEnum must define one or more values.",
"locations": [(6, 13), (4, 17)],
}
]
def rejects_an_enum_type_with_incorrectly_named_values():
schema = schema_with_field_type(
GraphQLEnumType("SomeEnum", values={"__badName": {}})
)
assert validate_schema(schema) == [
{
"message": "Name '__badName' must not begin with '__',"
" which is reserved by GraphQL introspection."
}
]
def describe_type_system_object_fields_must_have_output_types():
def _schema_with_object_field(type_: GraphQLOutputType) -> GraphQLSchema:
bad_object_type = GraphQLObjectType(
"BadObject", {"badField": GraphQLField(type_)}
)
return GraphQLSchema(
GraphQLObjectType("Query", {"f": GraphQLField(bad_object_type)}),
types=[SomeObjectType],
)
@mark.parametrize("type_", output_types, ids=get_name)
def accepts_an_output_type_as_an_object_field_type(type_):
schema = _schema_with_object_field(type_)
assert validate_schema(schema) == []
def rejects_an_empty_object_field_type():
# noinspection PyTypeChecker
schema = _schema_with_object_field(None) # type: ignore
assert validate_schema(schema) == [
{
"message": "The type of BadObject.badField must be Output Type"
" but got: None."
}
]
@mark.parametrize("type_", not_output_types, ids=get_name)
def rejects_a_non_output_type_as_an_object_field_type(type_):
schema = _schema_with_object_field(type_)
assert validate_schema(schema) == [
{
"message": "The type of BadObject.badField must be Output Type"
f" but got: {type_}."
}
]
@mark.parametrize("type_", not_graphql_types, ids=get_name)
def rejects_a_non_type_value_as_an_object_field_type(type_):
schema = _schema_with_object_field(type_)
assert validate_schema(schema) == [
{
"message": "The type of BadObject.badField must be Output Type"
f" but got: {inspect(type_)}.",
},
{"message": f"Expected GraphQL named type but got: {inspect(type_)}."},
]
def rejects_with_relevant_locations_for_a_non_output_type():
schema = build_schema(
"""
type Query {
field: [SomeInputObject]
}
input SomeInputObject {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "The type of Query.field must be Output Type"
" but got: [SomeInputObject].",
"locations": [(3, 22)],
}
]
def describe_type_system_objects_can_only_implement_unique_interfaces():
def rejects_an_object_implementing_a_non_type_value():
query_type = GraphQLObjectType(
"BadObject",
{"f": GraphQLField(GraphQLString)},
)
# noinspection PyTypeChecker
query_type.interfaces = (None,)
schema = GraphQLSchema(query_type)
assert validate_schema(schema) == [
{
"message": "Type BadObject must only implement Interface types,"
" it cannot implement None."
}
]
def rejects_an_object_implementing_a_non_interface_type():
schema = build_schema(
"""
type Query {
test: BadObject
}
input SomeInputObject {
field: String
}
type BadObject implements SomeInputObject {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Type BadObject must only implement Interface types,"
" it cannot implement SomeInputObject."
}
]
def rejects_an_object_implementing_the_same_interface_twice():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface & AnotherInterface {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Type AnotherObject can only implement"
" AnotherInterface once.",
"locations": [(10, 43), (10, 62)],
}
]
def rejects_an_object_implementing_same_interface_twice_due_to_extension():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: String
}
"""
)
extended_schema = extend_schema(
schema, parse("extend type AnotherObject implements AnotherInterface")
)
assert validate_schema(extended_schema) == [
{
"message": "Type AnotherObject can only implement"
" AnotherInterface once.",
"locations": [(10, 43), (1, 38)],
}
]
def describe_type_system_interface_extensions_should_be_valid():
def rejects_object_implementing_extended_interface_due_to_missing_field():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: String
}
"""
)
extended_schema = extend_schema(
schema,
parse(
"""
extend interface AnotherInterface {
newField: String
}
extend type AnotherObject {
differentNewField: String
}
"""
),
)
assert validate_schema(extended_schema) == [
{
"message": "Interface field AnotherInterface.newField expected"
" but AnotherObject does not provide it.",
"locations": [(3, 19), (10, 13), (6, 17)],
}
]
def rejects_object_implementing_extended_interface_due_to_missing_args():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: String
}
"""
)
extended_schema = extend_schema(
schema,
parse(
"""
extend interface AnotherInterface {
newField(test: Boolean): String
}
extend type AnotherObject {
newField: String
}
"""
),
)
assert validate_schema(extended_schema) == [
{
"message": "Interface field argument"
" AnotherInterface.newField(test:) expected"
" but AnotherObject.newField does not provide it.",
"locations": [(3, 28), (7, 19)],
}
]
def rejects_object_implementing_extended_interface_due_to_type_mismatch():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: String
}
"""
)
extended_schema = extend_schema(
schema,
parse(
"""
extend interface AnotherInterface {
newInterfaceField: NewInterface
}
interface NewInterface {
newField: String
}
interface MismatchingInterface {
newField: String
}
extend type AnotherObject {
newInterfaceField: MismatchingInterface
}
# Required to prevent unused interface errors
type DummyObject implements NewInterface & MismatchingInterface {
newField: String
}
"""
),
)
assert validate_schema(extended_schema) == [
{
"message": "Interface field AnotherInterface.newInterfaceField"
" expects type NewInterface"
" but AnotherObject.newInterfaceField"
" is type MismatchingInterface.",
"locations": [(3, 38), (15, 38)],
}
]
def describe_type_system_interface_fields_must_have_output_types():
def _schema_with_interface_field(type_: GraphQLOutputType) -> GraphQLSchema:
fields = {"badField": GraphQLField(type_)}
bad_interface_type = GraphQLInterfaceType("BadInterface", fields)
bad_implementing_type = GraphQLObjectType(
"BadImplementing",
fields,
interfaces=[bad_interface_type],
)
return GraphQLSchema(
GraphQLObjectType("Query", {"f": GraphQLField(bad_interface_type)}),
types=[bad_implementing_type, SomeObjectType],
)
@mark.parametrize("type_", output_types, ids=get_name)
def accepts_an_output_type_as_an_interface_field_type(type_):
schema = _schema_with_interface_field(type_)
assert validate_schema(schema) == []
def rejects_an_empty_interface_field_type():
# noinspection PyTypeChecker
schema = _schema_with_interface_field(None) # type: ignore
assert validate_schema(schema) == [
{
"message": "The type of BadImplementing.badField must be Output Type"
" but got: None.",
},
{
"message": "The type of BadInterface.badField must be Output Type"
" but got: None.",
},
]
@mark.parametrize("type_", not_output_types, ids=get_name)
def rejects_a_non_output_type_as_an_interface_field_type(type_):
schema = _schema_with_interface_field(type_)
assert validate_schema(schema) == [
{
"message": "The type of BadImplementing.badField must be Output Type"
f" but got: {type_}.",
},
{
"message": "The type of BadInterface.badField must be Output Type"
f" but got: {type_}.",
},
]
@mark.parametrize("type_", not_graphql_types, ids=get_name)
def rejects_a_non_type_value_as_an_interface_field_type(type_):
schema = _schema_with_interface_field(type_)
assert validate_schema(schema) == [
{
"message": "The type of BadImplementing.badField must be Output Type"
f" but got: {inspect(type_)}.",
},
{
"message": "The type of BadInterface.badField must be Output Type"
f" but got: {inspect(type_)}.",
},
{"message": f"Expected GraphQL named type but got: {inspect(type_)}."},
]
def rejects_a_non_output_type_as_an_interface_field_with_locations():
schema = build_schema(
"""
type Query {
test: SomeInterface
}
interface SomeInterface {
field: SomeInputObject
}
input SomeInputObject {
foo: String
}
type SomeObject implements SomeInterface {
field: SomeInputObject
}
"""
)
assert validate_schema(schema) == [
{
"message": "The type of SomeInterface.field must be Output Type"
" but got: SomeInputObject.",
"locations": [(7, 22)],
},
{
"message": "The type of SomeObject.field must be Output Type"
" but got: SomeInputObject.",
"locations": [(15, 22)],
},
]
def accepts_an_interface_not_implemented_by_at_least_one_object():
schema = build_schema(
"""
type Query {
test: SomeInterface
}
interface SomeInterface {
foo: String
}
"""
)
assert validate_schema(schema) == []
def describe_type_system_arguments_must_have_input_types():
def _schema_with_arg(type_: GraphQLInputType) -> GraphQLSchema:
args = {"badArg": GraphQLArgument(type_)}
bad_object_type = GraphQLObjectType(
"BadObject",
{"badField": GraphQLField(GraphQLString, args)},
)
return GraphQLSchema(
GraphQLObjectType("Query", {"f": GraphQLField(bad_object_type)}),
directives=[
GraphQLDirective(
"BadDirective",
[DirectiveLocation.QUERY],
args,
)
],
)
@mark.parametrize("type_", input_types, ids=get_name)
def accepts_an_input_type_as_a_field_arg_type(type_):
schema = _schema_with_arg(type_)
assert validate_schema(schema) == []
def rejects_an_empty_field_arg_type():
# noinspection PyTypeChecker
schema = _schema_with_arg(None) # type: ignore
assert validate_schema(schema) == [
{
"message": "The type of @BadDirective(badArg:) must be Input Type"
" but got: None."
},
{
"message": "The type of BadObject.badField(badArg:) must be Input Type"
" but got: None."
},
]
@mark.parametrize("type_", not_input_types, ids=get_name)
def rejects_a_non_input_type_as_a_field_arg_type(type_):
schema = _schema_with_arg(type_)
assert validate_schema(schema) == [
{
"message": "The type of @BadDirective(badArg:) must be Input Type"
f" but got: {type_}."
},
{
"message": "The type of BadObject.badField(badArg:) must be Input Type"
f" but got: {type_}."
},
]
@mark.parametrize("type_", not_graphql_types, ids=get_name)
def rejects_a_non_type_value_as_a_field_arg_type(type_):
schema = _schema_with_arg(type_)
assert validate_schema(schema) == [
{
"message": "The type of @BadDirective(badArg:) must be Input Type"
f" but got: {inspect(type_)}."
},
{
"message": "The type of BadObject.badField(badArg:) must be Input Type"
f" but got: {inspect(type_)}."
},
{"message": f"Expected GraphQL named type but got: {inspect(type_)}."},
]
def rejects_a_required_argument_that_is_deprecated():
schema = build_schema(
"""
directive @BadDirective(
badArg: String! @deprecated
optionalArg: String @deprecated
anotherOptionalArg: String! = "" @deprecated
) on FIELD
type Query {
test(
badArg: String! @deprecated
optionalArg: String @deprecated
anotherOptionalArg: String! = "" @deprecated
): String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Required argument @BadDirective(badArg:)"
" cannot be deprecated.",
"locations": [(3, 31), (3, 23)],
},
{
"message": "Required argument Query.test(badArg:)"
" cannot be deprecated.",
"locations": [(10, 33), (10, 25)],
},
]
def rejects_a_non_input_type_as_a_field_arg_with_locations():
schema = build_schema(
"""
type Query {
test(arg: SomeObject): String
}
type SomeObject {
foo: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "The type of Query.test(arg:) must be Input Type"
" but got: SomeObject.",
"locations": [(3, 25)],
},
]
def describe_type_system_input_object_fields_must_have_input_types():
def _schema_with_input_field(type_: GraphQLInputType) -> GraphQLSchema:
bad_input_object_type = GraphQLInputObjectType(
"BadInputObject", {"badField": GraphQLInputField(type_)}
)
return GraphQLSchema(
GraphQLObjectType(
"Query",
{
"f": GraphQLField(
GraphQLString,
args={"badArg": GraphQLArgument(bad_input_object_type)},
)
},
)
)
@mark.parametrize("type_", input_types, ids=get_name)
def accepts_an_input_type_as_an_input_field_type(type_):
schema = _schema_with_input_field(type_)
assert validate_schema(schema) == []
def rejects_an_empty_input_field_type():
# noinspection PyTypeChecker
schema = _schema_with_input_field(None) # type: ignore
assert validate_schema(schema) == [
{
"message": "The type of BadInputObject.badField must be Input Type"
" but got: None."
}
]
@mark.parametrize("type_", not_input_types, ids=get_name)
def rejects_a_non_input_type_as_an_input_field_type(type_):
schema = _schema_with_input_field(type_)
assert validate_schema(schema) == [
{
"message": "The type of BadInputObject.badField must be Input Type"
f" but got: {type_}."
}
]
@mark.parametrize("type_", not_graphql_types, ids=get_name)
def rejects_a_non_type_value_as_an_input_field_type(type_):
schema = _schema_with_input_field(type_)
assert validate_schema(schema) == [
{
"message": "The type of BadInputObject.badField must be Input Type"
f" but got: {inspect(type_)}."
},
{"message": f"Expected GraphQL named type but got: {inspect(type_)}."},
]
def rejects_with_relevant_locations_for_a_non_input_type():
schema = build_schema(
"""
type Query {
test(arg: SomeInputObject): String
}
input SomeInputObject {
foo: SomeObject
}
type SomeObject {
bar: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "The type of SomeInputObject.foo must be Input Type"
" but got: SomeObject.",
"locations": [(7, 20)],
}
]
def describe_objects_must_adhere_to_interfaces_they_implement():
def accepts_an_object_which_implements_an_interface():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: String): String
}
"""
)
assert validate_schema(schema) == []
def accepts_an_object_which_implements_an_interface_and_with_more_fields():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: String): String
anotherField: String
}
"""
)
assert validate_schema(schema) == []
def accepts_an_object_which_implements_an_interface_field_with_more_args():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: String, anotherInput: String): String
}
"""
)
assert validate_schema(schema) == []
def rejects_an_object_missing_an_interface_field():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
anotherField: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field AnotherInterface.field expected but"
" AnotherObject does not provide it.",
"locations": [(7, 15), (10, 13)],
}
]
def rejects_an_object_with_an_incorrectly_typed_interface_field():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: String): Int
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field AnotherInterface.field"
" expects type String but"
" AnotherObject.field is type Int.",
"locations": [(7, 37), (11, 37)],
}
]
def rejects_an_object_with_a_differently_typed_interface_field():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
type A { foo: String }
type B { foo: String }
interface AnotherInterface {
field: A
}
type AnotherObject implements AnotherInterface {
field: B
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field AnotherInterface.field"
" expects type A but AnotherObject.field is type B.",
"locations": [(10, 22), (14, 22)],
}
]
def accepts_an_object_with_a_subtyped_interface_field_interface():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: AnotherInterface
}
type AnotherObject implements AnotherInterface {
field: AnotherObject
}
"""
)
assert validate_schema(schema) == []
def accepts_an_object_with_a_subtyped_interface_field_union():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
type SomeObject {
field: String
}
union SomeUnionType = SomeObject
interface AnotherInterface {
field: SomeUnionType
}
type AnotherObject implements AnotherInterface {
field: SomeObject
}
"""
)
assert validate_schema(schema) == []
def rejects_an_object_missing_an_interface_argument():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field argument"
" AnotherInterface.field(input:) expected"
" but AnotherObject.field does not provide it.",
"locations": [(7, 21), (11, 15)],
}
]
def rejects_an_object_with_an_incorrectly_typed_interface_argument():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: Int): String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field argument"
" AnotherInterface.field(input:) expects type String"
" but AnotherObject.field(input:) is type Int.",
"locations": [(7, 28), (11, 28)],
}
]
def rejects_an_object_with_an_incorrectly_typed_field_and_argument():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: Int): Int
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field AnotherInterface.field expects"
" type String but AnotherObject.field is type Int.",
"locations": [(7, 37), (11, 34)],
},
{
"message": "Interface field argument"
" AnotherInterface.field(input:) expects type String"
" but AnotherObject.field(input:) is type Int.",
"locations": [(7, 28), (11, 28)],
},
]
def rejects_object_implementing_an_interface_field_with_additional_args():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(baseArg: String): String
}
type AnotherObject implements AnotherInterface {
field(
baseArg: String,
requiredArg: String!
optionalArg1: String,
optionalArg2: String = "",
): String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Object field AnotherObject.field includes required"
" argument requiredArg that is missing from the"
" Interface field AnotherInterface.field.",
"locations": [(13, 17), (7, 15)],
}
]
def accepts_an_object_with_an_equivalently_wrapped_interface_field_type():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: [String]!
}
type AnotherObject implements AnotherInterface {
field: [String]!
}
"""
)
assert validate_schema(schema) == []
def rejects_an_object_with_a_non_list_interface_field_list_type():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: [String]
}
type AnotherObject implements AnotherInterface {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field AnotherInterface.field expects type"
" [String] but AnotherObject.field is type String.",
"locations": [(7, 22), (11, 22)],
}
]
def rejects_an_object_with_a_list_interface_field_non_list_type():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: [String]
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field AnotherInterface.field expects type"
" String but AnotherObject.field is type [String].",
"locations": [(7, 22), (11, 22)],
}
]
def accepts_an_object_with_a_subset_non_null_interface_field_type():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: String!
}
"""
)
assert validate_schema(schema) == []
def rejects_an_object_with_a_superset_nullable_interface_field_type():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String!
}
type AnotherObject implements AnotherInterface {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field AnotherInterface.field expects type"
" String! but AnotherObject.field is type String.",
"locations": [(7, 22), (11, 22)],
}
]
def rejects_an_object_missing_a_transitive_interface():
schema = build_schema(
"""
type Query {
test: AnotherObject
}
interface SuperInterface {
field: String!
}
interface AnotherInterface implements SuperInterface {
field: String!
}
type AnotherObject implements AnotherInterface {
field: String!
}
"""
)
assert validate_schema(schema) == [
{
"message": "Type AnotherObject must implement SuperInterface"
" because it is implemented by AnotherInterface.",
"locations": [(10, 51), (14, 43)],
}
]
def describe_interfaces_must_adhere_to_interface_they_implement():
def accepts_an_interface_which_implements_an_interface():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field(input: String): String
}
interface ChildInterface implements ParentInterface {
field(input: String): String
}
"""
)
assert validate_schema(schema) == []
def accepts_an_interface_which_implements_an_interface_along_with_more_fields():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field(input: String): String
}
interface ChildInterface implements ParentInterface {
field(input: String): String
anotherField: String
}
"""
)
assert validate_schema(schema) == []
def accepts_an_interface_which_implements_an_interface_with_additional_args():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field(input: String): String
}
interface ChildInterface implements ParentInterface {
field(input: String, anotherInput: String): String
}
"""
)
assert validate_schema(schema) == []
def rejects_an_interface_missing_an_interface_field():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field(input: String): String
}
interface ChildInterface implements ParentInterface {
anotherField: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field ParentInterface.field expected"
" but ChildInterface does not provide it.",
"locations": [(7, 15), (10, 13)],
}
]
def rejects_an_interface_with_an_incorrectly_typed_interface_field():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field(input: String): String
}
interface ChildInterface implements ParentInterface {
field(input: String): Int
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field ParentInterface.field expects type String"
" but ChildInterface.field is type Int.",
"locations": [(7, 37), (11, 37)],
}
]
def rejects_an_interface_with_a_differently_typed_interface_field():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
type A { foo: String }
type B { foo: String }
interface ParentInterface {
field: A
}
interface ChildInterface implements ParentInterface {
field: B
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field ParentInterface.field expects type A"
" but ChildInterface.field is type B.",
"locations": [(10, 22), (14, 22)],
}
]
def accepts_an_interface_with_a_subtyped_interface_field_interface():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field: ParentInterface
}
interface ChildInterface implements ParentInterface {
field: ChildInterface
}
"""
)
assert validate_schema(schema) == []
def accepts_an_interface_with_a_subtyped_interface_field_union():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
type SomeObject {
field: String
}
union SomeUnionType = SomeObject
interface ParentInterface {
field: SomeUnionType
}
interface ChildInterface implements ParentInterface {
field: SomeObject
}
"""
)
assert validate_schema(schema) == []
def rejects_an_interface_implementing_a_non_interface_type():
schema = build_schema(
"""
type Query {
field: String
}
input SomeInputObject {
field: String
}
interface BadInterface implements SomeInputObject {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Type BadInterface must only implement Interface types,"
" it cannot implement SomeInputObject.",
}
]
def rejects_an_interface_missing_an_interface_argument():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field(input: String): String
}
interface ChildInterface implements ParentInterface {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field argument ParentInterface.field(input:)"
" expected but ChildInterface.field does not provide it.",
"locations": [(7, 21), (11, 15)],
}
]
def rejects_an_interface_with_an_incorrectly_typed_interface_argument():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field(input: String): String
}
interface ChildInterface implements ParentInterface {
field(input: Int): String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field argument ParentInterface.field(input:)"
" expects type String but ChildInterface.field(input:) is type Int.",
"locations": [(7, 28), (11, 28)],
}
]
def rejects_an_interface_with_both_an_incorrectly_typed_field_and_argument():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field(input: String): String
}
interface ChildInterface implements ParentInterface {
field(input: Int): Int
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field ParentInterface.field expects type String"
" but ChildInterface.field is type Int.",
"locations": [(7, 37), (11, 34)],
},
{
"message": "Interface field argument ParentInterface.field(input:)"
" expects type String but ChildInterface.field(input:) is type Int.",
"locations": [(7, 28), (11, 28)],
},
]
def rejects_an_interface_implementing_an_interface_field_with_additional_args():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field(baseArg: String): String
}
interface ChildInterface implements ParentInterface {
field(
baseArg: String,
requiredArg: String!
optionalArg1: String,
optionalArg2: String = "",
): String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Object field ChildInterface.field includes"
" required argument requiredArg that is missing"
" from the Interface field ParentInterface.field.",
"locations": [(13, 17), (7, 15)],
}
]
def accepts_an_interface_with_an_equivalently_wrapped_interface_field_type():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field: [String]!
}
interface ChildInterface implements ParentInterface {
field: [String]!
}
"""
)
assert validate_schema(schema) == []
def rejects_an_interface_with_a_non_list_interface_field_list_type():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field: [String]
}
interface ChildInterface implements ParentInterface {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field ParentInterface.field"
" expects type [String] but ChildInterface.field is type String.",
"locations": [(7, 22), (11, 22)],
}
]
def rejects_an_interface_with_a_list_interface_field_non_list_type():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field: String
}
interface ChildInterface implements ParentInterface {
field: [String]
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field ParentInterface.field expects type String"
" but ChildInterface.field is type [String].",
"locations": [(7, 22), (11, 22)],
}
]
def accepts_an_interface_with_a_subset_non_null_interface_field_type():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field: String
}
interface ChildInterface implements ParentInterface {
field: String!
}
"""
)
assert validate_schema(schema) == []
def rejects_an_interface_with_a_superset_nullable_interface_field_type():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface ParentInterface {
field: String!
}
interface ChildInterface implements ParentInterface {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Interface field ParentInterface.field expects type String!"
" but ChildInterface.field is type String.",
"locations": [(7, 22), (11, 22)],
}
]
def rejects_an_object_missing_a_transitive_interface():
schema = build_schema(
"""
type Query {
test: ChildInterface
}
interface SuperInterface {
field: String!
}
interface ParentInterface implements SuperInterface {
field: String!
}
interface ChildInterface implements ParentInterface {
field: String!
}
"""
)
assert validate_schema(schema) == [
{
"message": "Type ChildInterface must implement SuperInterface"
" because it is implemented by ParentInterface.",
"locations": [(10, 50), (14, 49)],
}
]
def rejects_a_self_reference_interface():
schema = build_schema(
"""
type Query {
test: FooInterface
}
interface FooInterface implements FooInterface {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Type FooInterface cannot implement itself"
" because it would create a circular reference.",
"locations": [(6, 47)],
}
]
def rejects_a_circular_interface_implementation():
schema = build_schema(
"""
type Query {
test: FooInterface
}
interface FooInterface implements BarInterface {
field: String
}
interface BarInterface implements FooInterface {
field: String
}
"""
)
assert validate_schema(schema) == [
{
"message": "Type FooInterface cannot implement BarInterface"
" because it would create a circular reference.",
"locations": [(10, 47), (6, 47)],
},
{
"message": "Type BarInterface cannot implement FooInterface"
" because it would create a circular reference.",
"locations": [(6, 47), (10, 47)],
},
]
def describe_assert_valid_schema():
def does_not_throw_on_valid_schemas():
schema = build_schema(
(
"""
type Query {
foo: String
}
"""
)
)
assert_valid_schema(schema)
def combines_multiple_errors():
schema = build_schema("type SomeType")
with raises(TypeError) as exc_info:
assert_valid_schema(schema)
assert (
str(exc_info.value)
== dedent(
"""
Query root type must be provided.
Type SomeType must define one or more fields.
"""
).rstrip()
)
| [
"[email protected]"
] | |
2529dcd223ae0f492e3e32aa11b0fa54221c14a0 | f72c689bd0d756b4817cc03cb434a228343c8936 | /test/functional/mining_pos_coldStaking.py | ce715939394dcfcc628065c3e9e74e679628ec5d | [
"MIT"
] | permissive | CircuitProject/Circuit-Core | 7f68a8b4cb180a715cb24e247b899d8d8dc29e95 | 831dc33d57050ea2955983b2e8f1fc088a819e97 | refs/heads/main | 2023-04-09T00:08:37.954538 | 2021-04-12T19:09:42 | 2021-04-12T19:09:42 | 357,308,816 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 23,769 | py | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# -*- coding: utf-8 -*-
from io import BytesIO
from time import sleep
from test_framework.messages import CTransaction, CTxIn, CTxOut, COIN, COutPoint
from test_framework.mininode import network_thread_start
from test_framework.circuit_node import CircuitTestNode
from test_framework.script import CScript, OP_CHECKSIG
from test_framework.test_framework import CircuitTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
p2p_port,
bytes_to_hex_str,
set_node_times,
sync_blocks,
sync_mempools,
)
from decimal import Decimal
# filter utxos based on first 5 bytes of scriptPubKey
def getDelegatedUtxos(utxos):
return [x for x in utxos if x["scriptPubKey"][:10] == '76a97b63d1']
class CIRCUIT_ColdStakingTest(CircuitTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [['-nuparams=v5_shield:201']] * self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_chain(self):
# Start with PoW cache: 200 blocks
self.log.info("Initializing test directory " + self.options.tmpdir)
self._initialize_chain()
self.enable_mocktime()
def init_test(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description)
self.DEFAULT_FEE = 0.05
# Setup the p2p connections and start up the network thread.
self.test_nodes = []
for i in range(self.num_nodes):
self.test_nodes.append(CircuitTestNode())
self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))
network_thread_start() # Start up network handling in another thread
# Let the test nodes get in sync
for i in range(self.num_nodes):
self.test_nodes[i].wait_for_verack()
def setColdStakingEnforcement(self, fEnable=True):
sporkName = "SPORK_19_COLDSTAKING_MAINTENANCE"
# update spork 19 with node[0]
if fEnable:
self.log.info("Enabling cold staking with SPORK 19...")
res = self.deactivate_spork(0, sporkName)
else:
self.log.info("Disabling cold staking with SPORK 19...")
res = self.activate_spork(0, sporkName)
assert_equal(res, "success")
sleep(1)
# check that node[1] receives it
assert_equal(fEnable, not self.is_spork_active(1, sporkName))
self.log.info("done")
def isColdStakingEnforced(self):
# verify from node[1]
return not self.is_spork_active(1, "SPORK_19_COLDSTAKING_MAINTENANCE")
def run_test(self):
self.description = "Performs tests on the Cold Staking P2CS implementation"
self.init_test()
NUM_OF_INPUTS = 20
INPUT_VALUE = 249
# nodes[0] - coin-owner
# nodes[1] - cold-staker
# First put cold-staking in maintenance mode
self.setColdStakingEnforcement(False)
# double check
assert (not self.isColdStakingEnforced())
# 1) nodes[0] and nodes[2] mine 25 blocks each
# --------------------------------------------
print("*** 1 ***")
self.log.info("Mining 50 Blocks...")
for peer in [0, 2]:
for j in range(25):
self.mocktime = self.generate_pow(peer, self.mocktime)
sync_blocks(self.nodes)
# 2) node[1] sends his entire balance (50 mature rewards) to node[2]
# - node[2] stakes a block - node[1] locks the change
# - node[0] shields 250 coins (to be delegated later)
print("*** 2 ***")
self.log.info("Emptying node1 balance")
assert_equal(self.nodes[1].getbalance(), 50 * 250)
txid = self.nodes[1].sendtoaddress(self.nodes[2].getnewaddress(), (50 * 250 - 0.01))
assert (txid is not None)
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# lock the change output (so it's not used as stake input in generate_pos)
for x in self.nodes[1].listunspent():
assert (self.nodes[1].lockunspent(False, [{"txid": x['txid'], "vout": x['vout']}]))
# check that it cannot stake
sleep(1)
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
# create shielded balance for node 0
self.log.info("Shielding some coins for node0...")
self.nodes[0].shieldsendmany("from_transparent", [{"address": self.nodes[0].getnewshieldaddress(),
"amount": Decimal('250.00')}], 1)
self.sync_all()
for i in range(6):
self.mocktime = self.generate_pow(0, self.mocktime)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getshieldbalance(), 250)
# 3) nodes[0] generates a owner address
# nodes[1] generates a cold-staking address.
# ---------------------------------------------
print("*** 3 ***")
owner_address = self.nodes[0].getnewaddress()
self.log.info("Owner Address: %s" % owner_address)
staker_address = self.nodes[1].getnewstakingaddress()
staker_privkey = self.nodes[1].dumpprivkey(staker_address)
self.log.info("Staking Address: %s" % staker_address)
# 4) Check enforcement.
# ---------------------
print("*** 4 ***")
# Check that SPORK 17 is disabled
assert (not self.isColdStakingEnforced())
self.log.info("Creating a stake-delegation tx before cold staking enforcement...")
assert_raises_rpc_error(-4, "Failed to accept tx in the memory pool (reason: cold-stake-inactive (code 16))\nTransaction canceled.",
self.nodes[0].delegatestake, staker_address, INPUT_VALUE, owner_address,
False, False, False, True)
self.log.info("Good. Cold Staking NOT ACTIVE yet.")
# Enable via SPORK
self.setColdStakingEnforcement()
# double check
assert (self.isColdStakingEnforced())
# 5) nodes[0] delegates a number of inputs for nodes[1] to stake em.
# ------------------------------------------------------------------
print("*** 5 ***")
self.log.info("First check warning when using external addresses...")
assert_raises_rpc_error(-5, "Only the owner of the key to owneraddress will be allowed to spend these coins",
self.nodes[0].delegatestake, staker_address, INPUT_VALUE, "yCgCXC8N5VThhfiaVuKaNLkNnrWduzVnoT")
self.log.info("Good. Warning triggered.")
self.log.info("Now force the use of external address creating (but not sending) the delegation...")
res = self.nodes[0].rawdelegatestake(staker_address, INPUT_VALUE, "yCgCXC8N5VThhfiaVuKaNLkNnrWduzVnoT", True)
assert(res is not None and res != "")
self.log.info("Good. Warning NOT triggered.")
self.log.info("Now delegate with internal owner address..")
self.log.info("Try first with a value (0.99) below the threshold")
assert_raises_rpc_error(-8, "Invalid amount",
self.nodes[0].delegatestake, staker_address, 0.99, owner_address)
self.log.info("Nice. it was not possible.")
self.log.info("Then try (creating but not sending) with the threshold value (1.00)")
res = self.nodes[0].rawdelegatestake(staker_address, 1.00, owner_address)
assert(res is not None and res != "")
self.log.info("Good. Warning NOT triggered.")
self.log.info("Now creating %d real stake-delegation txes..." % NUM_OF_INPUTS)
for i in range(NUM_OF_INPUTS-1):
res = self.nodes[0].delegatestake(staker_address, INPUT_VALUE, owner_address)
assert(res != None and res["txid"] != None and res["txid"] != "")
assert_equal(res["owner_address"], owner_address)
assert_equal(res["staker_address"], staker_address)
# delegate the shielded balance
res = self.nodes[0].delegatestake(staker_address, INPUT_VALUE, owner_address, False, False, True)
assert (res != None and res["txid"] != None and res["txid"] != "")
assert_equal(res["owner_address"], owner_address)
assert_equal(res["staker_address"], staker_address)
fee = self.nodes[0].viewshieldtransaction(res["txid"])['fee']
# sync and mine 2 blocks
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
self.log.info("%d Txes created." % NUM_OF_INPUTS)
# check balances:
self.expected_balance = NUM_OF_INPUTS * INPUT_VALUE
self.expected_immature_balance = 0
self.checkBalances()
# also shielded balance of node 0 (250 - 249 - fee)
assert_equal(self.nodes[0].getshieldbalance(), round(Decimal(1)-Decimal(fee), 8))
# 6) check that the owner (nodes[0]) can spend the coins.
# -------------------------------------------------------
print("*** 6 ***")
self.log.info("Spending back one of the delegated UTXOs...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
assert_equal(NUM_OF_INPUTS, len(delegated_utxos))
assert_equal(len(delegated_utxos), len(self.nodes[0].listcoldutxos()))
u = delegated_utxos[0]
txhash = self.spendUTXOwithNode(u, 0)
assert(txhash != None)
self.log.info("Good. Owner was able to spend - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
# check balances after spend.
self.expected_balance -= float(u["amount"])
self.checkBalances()
self.log.info("Balances check out after spend")
assert_equal(NUM_OF_INPUTS-1, len(self.nodes[0].listcoldutxos()))
# 7) check that the staker CANNOT use the coins to stake yet.
# He needs to whitelist the owner first.
# -----------------------------------------------------------
print("*** 7 ***")
self.log.info("Trying to generate a cold-stake block before whitelisting the owner...")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
self.log.info("Nice. Cold staker was NOT able to create the block yet.")
self.log.info("Whitelisting the owner...")
ret = self.nodes[1].delegatoradd(owner_address)
assert(ret)
self.log.info("Delegator address %s whitelisted" % owner_address)
# 8) check that the staker CANNOT spend the coins.
# ------------------------------------------------
print("*** 8 ***")
self.log.info("Trying to spend one of the delegated UTXOs with the cold-staking key...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
assert_greater_than(len(delegated_utxos), 0)
u = delegated_utxos[0]
assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed (Script failed an OP_CHECKCOLDSTAKEVERIFY operation",
self.spendUTXOwithNode, u, 1)
self.log.info("Good. Cold staker was NOT able to spend (failed OP_CHECKCOLDSTAKEVERIFY)")
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# 9) check that the staker can use the coins to stake a block with internal miner.
# --------------------------------------------------------------------------------
print("*** 9 ***")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], NUM_OF_INPUTS-1)
self.log.info("Generating one valid cold-stake block...")
self.mocktime = self.generate_pos(1, self.mocktime)
self.log.info("New block created by cold-staking. Trying to submit...")
newblockhash = self.nodes[1].getbestblockhash()
self.log.info("Block %s submitted" % newblockhash)
# Verify that nodes[0] accepts it
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getblockcount(), self.nodes[1].getblockcount())
assert_equal(newblockhash, self.nodes[0].getbestblockhash())
self.log.info("Great. Cold-staked block was accepted!")
# check balances after staked block.
self.expected_balance -= INPUT_VALUE
self.expected_immature_balance += (INPUT_VALUE + 250)
self.checkBalances()
self.log.info("Balances check out after staked block")
# 10) check that the staker can use the coins to stake a block with a rawtransaction.
# ----------------------------------------------------------------------------------
print("*** 10 ***")
self.log.info("Generating another valid cold-stake block...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, staker_privkey)
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
assert (ret is None)
self.log.info("Block %s submitted." % new_block.hash)
assert_equal(new_block.hash, self.nodes[1].getbestblockhash())
# Verify that nodes[0] accepts it
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getblockcount(), self.nodes[1].getblockcount())
assert_equal(new_block.hash, self.nodes[0].getbestblockhash())
self.log.info("Great. Cold-staked block was accepted!")
self.mocktime += 60
set_node_times(self.nodes, self.mocktime)
# check balances after staked block.
self.expected_balance -= INPUT_VALUE
self.expected_immature_balance += (INPUT_VALUE + 250)
self.checkBalances()
self.log.info("Balances check out after staked block")
# 11) check that the staker cannot stake a block changing the coinstake scriptPubkey.
# ----------------------------------------------------------------------------------
print("*** 11 ***")
self.log.info("Generating one invalid cold-stake block (changing first coinstake output)...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block (with dummy key)
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, "")
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
self.log.info("Block %s submitted." % new_block.hash)
assert("rejected" in ret)
# Verify that nodes[0] rejects it
sync_blocks(self.nodes)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, new_block.hash)
self.log.info("Great. Malicious cold-staked block was NOT accepted!")
self.checkBalances()
self.log.info("Balances check out after (non) staked block")
# 12) neither adding different outputs to the coinstake.
# ------------------------------------------------------
print("*** 12 ***")
self.log.info("Generating another invalid cold-stake block (adding coinstake output)...")
stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
stakeInputs = self.get_prevouts(1, stakeable_coins)
assert_greater_than(len(stakeInputs), 0)
# Create the block
new_block = self.stake_next_block(1, stakeInputs, self.mocktime, staker_privkey)
# Add output (dummy key address) to coinstake (taking 100 CRCT from the pot)
self.add_output_to_coinstake(new_block, 100)
self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
# Try to submit the block
ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
self.log.info("Block %s submitted." % new_block.hash)
assert_equal(ret, "bad-p2cs-outs")
# Verify that nodes[0] rejects it
sync_blocks(self.nodes)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, new_block.hash)
self.log.info("Great. Malicious cold-staked block was NOT accepted!")
self.checkBalances()
self.log.info("Balances check out after (non) staked block")
# 13) Now node[0] gets mad and spends all the delegated coins, voiding the P2CS contracts.
# ----------------------------------------------------------------------------------------
self.log.info("Let's void the contracts.")
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
print("*** 13 ***")
self.log.info("Cancel the stake delegation spending the delegated utxos...")
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
# remove one utxo to spend later
final_spend = delegated_utxos.pop()
txhash = self.spendUTXOsWithNode(delegated_utxos, 0)
assert(txhash != None)
self.log.info("Good. Owner was able to void the stake delegations - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# deactivate SPORK 17 and check that the owner can still spend the last utxo
self.setColdStakingEnforcement(False)
assert (not self.isColdStakingEnforced())
txhash = self.spendUTXOsWithNode([final_spend], 0)
assert(txhash != None)
self.log.info("Good. Owner was able to void a stake delegation (with SPORK 17 disabled) - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
# check balances after big spend.
self.expected_balance = 0
self.checkBalances()
self.log.info("Balances check out after the delegations have been voided.")
# re-activate SPORK17
self.setColdStakingEnforcement()
assert (self.isColdStakingEnforced())
# 14) check that coinstaker is empty and can no longer stake.
# -----------------------------------------------------------
print("*** 14 ***")
self.log.info("Trying to generate one cold-stake block again...")
assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], 0)
self.log.info("Cigar. Cold staker was NOT able to create any more blocks.")
# 15) check balances when mature.
# -----------------------------------------------------------
print("*** 15 ***")
self.log.info("Staking 100 blocks to mature the cold stakes...")
for i in range(2):
for peer in [0, 2]:
for j in range(25):
self.mocktime = self.generate_pos(peer, self.mocktime)
sync_blocks(self.nodes)
self.expected_balance = self.expected_immature_balance
self.expected_immature_balance = 0
self.checkBalances()
delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
txhash = self.spendUTXOsWithNode(delegated_utxos, 0)
assert (txhash != None)
self.log.info("Good. Owner was able to spend the cold staked coins - tx: %s" % str(txhash))
sync_mempools(self.nodes)
self.mocktime = self.generate_pos(2, self.mocktime)
sync_blocks(self.nodes)
# check tx
self.check_tx_in_chain(0, txhash)
self.check_tx_in_chain(1, txhash)
self.expected_balance = 0
self.checkBalances()
def checkBalances(self):
w_info = self.nodes[0].getwalletinfo()
self.log.info("OWNER - Delegated %f / Cold %f [%f / %f]" % (
float(w_info["delegated_balance"]), w_info["cold_staking_balance"],
float(w_info["immature_delegated_balance"]), w_info["immature_cold_staking_balance"]))
assert_equal(float(w_info["delegated_balance"]), self.expected_balance)
assert_equal(float(w_info["immature_delegated_balance"]), self.expected_immature_balance)
assert_equal(float(w_info["cold_staking_balance"]), 0)
w_info = self.nodes[1].getwalletinfo()
self.log.info("STAKER - Delegated %f / Cold %f [%f / %f]" % (
float(w_info["delegated_balance"]), w_info["cold_staking_balance"],
float(w_info["immature_delegated_balance"]), w_info["immature_cold_staking_balance"]))
assert_equal(float(w_info["delegated_balance"]), 0)
assert_equal(float(w_info["cold_staking_balance"]), self.expected_balance)
assert_equal(float(w_info["immature_cold_staking_balance"]), self.expected_immature_balance)
def spendUTXOwithNode(self, utxo, node_n):
new_addy = self.nodes[node_n].getnewaddress()
inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}]
out_amount = (float(utxo["amount"]) - self.DEFAULT_FEE)
outputs = {}
outputs[new_addy] = out_amount
spendingTx = self.nodes[node_n].createrawtransaction(inputs, outputs)
spendingTx_signed = self.nodes[node_n].signrawtransaction(spendingTx)
return self.nodes[node_n].sendrawtransaction(spendingTx_signed["hex"])
def spendUTXOsWithNode(self, utxos, node_n):
new_addy = self.nodes[node_n].getnewaddress()
inputs = []
outputs = {}
outputs[new_addy] = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
outputs[new_addy] += float(utxo["amount"])
outputs[new_addy] -= self.DEFAULT_FEE
spendingTx = self.nodes[node_n].createrawtransaction(inputs, outputs)
spendingTx_signed = self.nodes[node_n].signrawtransaction(spendingTx)
return self.nodes[node_n].sendrawtransaction(spendingTx_signed["hex"])
def add_output_to_coinstake(self, block, value, peer=1):
coinstake = block.vtx[1]
if not hasattr(self, 'DUMMY_KEY'):
self.init_dummy_key()
coinstake.vout.append(
CTxOut(value * COIN, CScript([self.DUMMY_KEY.get_pubkey(), OP_CHECKSIG])))
coinstake.vout[1].nValue -= value * COIN
# re-sign coinstake
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(block.prevoutStake))
coinstake.vin[0] = CTxIn(prevout)
stake_tx_signed_raw_hex = self.nodes[peer].signrawtransaction(
bytes_to_hex_str(coinstake.serialize()))['hex']
block.vtx[1] = CTransaction()
block.vtx[1].from_hex(stake_tx_signed_raw_hex)
# re-sign block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.re_sign_block()
if __name__ == '__main__':
CIRCUIT_ColdStakingTest().main()
| [
"[email protected]"
] | |
4b382d0d55c82e75b7927ff2b9427af967ea657b | c842cbf891b367442246f5b354f46cf48a2e3e5d | /src/FinalResult_csv.py | be18343ef9c3f10be84826845ce9eb9e3b435a3e | [] | no_license | srishti77/sampleRecom | 0582de4862f991b18ef089b92befc4dd0aa95095 | 414e1c07e94b4b0169544548b05d142500fee0fe | refs/heads/master | 2020-03-19T12:03:04.583604 | 2018-09-23T18:17:53 | 2018-09-23T18:17:53 | 136,492,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py |
import os
import re
import pandas as pd
def getFileNames():
dir = os.listdir("D:\Recommender System\Clean Data\Ranks")
return dir
directory = getFileNames()
newDataset = pd.DataFrame(columns=['ProjectName','methodName','methodBody','methodBodyLength','TotalMN','Prefix','Rank','AllOccurrance', 'FirstOccurrance', 'LastOccurrance'])
for i in range(0,len(directory)):
dataset = pd.read_csv(directory[i])
for j in range(0, len(dataset)):
print(j)
ProjectName = dataset['ProjectName'][j]
methodName = dataset['methodName'][j]
methodBody = dataset['methodBody'][j]
methodBodyLength = dataset['methodBodyLength'][j]
TotalMN = dataset['TotalMN'][j]
Prefix = dataset['Prefix'][j]
Rank = dataset['Rank'][j]
occurrance = dataset['AllOccurrance'][j]
value = occurrance
value = value.replace('[', '')
value = value.replace(']', '')
value = value.replace(',', '')
value = value.split()
if value:
if len(value) > 1:
firstOcc = value[0]
lastOcc = value[-1]
else:
firstOcc = value[0]
lastOcc = 0
else:
firstOcc = 0
lastOcc = 0
dict = {'ProjectName': ProjectName, 'methodName': methodName,
'methodBody': methodBody,'methodBodyLength':methodBodyLength, 'TotalMN': TotalMN, 'Prefix': Prefix, 'Rank': Rank , 'AllOccurrance': occurrance, 'FirstOccurrance':firstOcc , 'LastOccurrance':lastOcc}
newDataset = newDataset.append(dict, ignore_index= True)
newDataset.to_csv('final_result.csv')
| [
"="
] | = |
960c232f5a94c8236ba800deb22c0043a177002b | 9c50f57a9cb32b44e86a0cdcbf61ead34754b085 | /ๆ็ฉ้ด/pythonๅบ็ก/day12/ๅญฆๅ็ฎก็็ณป็ป.py | f3144e4039c11b48bbaf2522ae173f668aa2d929 | [] | no_license | a1403893559/rg201python | c3f115011981393c86a0150e5281096651712ad4 | 448f04c86e4c7fd30e3a2a4f9121b934ae1d49be | refs/heads/master | 2020-03-15T23:32:17.723403 | 2018-03-18T12:59:43 | 2018-03-18T12:59:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | stu_a ={
'name':'liwenhao',
'age':'18'
}
stu_b = {
'name':'zhangjiayi',
'age':'1'
}
stu_c = {
'name':'yuanjianbo',
'age':'19'
}
def printStu(stu):
for k,v in stu.items():
print('k=%s,v=%s'%(k,v))
printStu(stu_a)
printStu(stu_b)
printStu(stu_c)
| [
"[email protected]"
] | |
a4984cb32bd61b190a795036bc63f2ad9c115d58 | 6683c316188abc02fc2093dfd78e994bac6cbd44 | /asynchandler/sync.py | e4f1e38bb4b44925b711f259a36cef10184be27d | [] | no_license | dexity/surfsnippets | e19d307d4b66336e88d1c361a00e55df089f79a4 | 00c73ebb33a2036b898c05575250e6bb28256ae7 | refs/heads/master | 2021-08-30T12:06:36.453468 | 2017-12-17T21:31:53 | 2017-12-17T21:31:53 | 114,569,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | #
# Copyright 2012 Alex Dementsov
#
# Uses inet socket to listen on incoming requests to perform blocking request
# handling (e.g. logging).
#
import os
import socket
import threading
import time
PORT = 8080
HOST = "127.0.0.1"
SOCK_FLAGS = socket.AI_PASSIVE | socket.AI_ADDRCONFIG
counter = 0 # global variable
def get_inet_socket(backlog=128):
"Blocking socket"
res = socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, SOCK_FLAGS)
af, socktype, proto, canonname, sockaddr = res[0]
sock = socket.socket(af, socktype, proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(sockaddr)
sock.listen(backlog)
return sock
def make_log(recv):
"Perform logging"
global counter
counter += 1
print "num = %s" % counter
print recv
time.sleep(1)
def main():
# Create server socket
isock = get_inet_socket()
while True:
# Get data from the inet client
conn, addr = isock.accept()
recv = conn.recv(1024)
# Blocking request handling
make_log(recv)
# Respond to the inet client
conn.send("Doobie Doo")
conn.close()
isock.close()
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
103bad2469586a40386703071346fe3f1a1168e1 | 1bdb0da31d14102ca03ee2df44f0ec522b0701a4 | /EmiliaRomagna/HeraER/3-DataReportCollectionCollection.py | 4b9b8c7bbef9659e460f47671de906b26e1f467e | [] | no_license | figuriamoci/Acqua | dc073d90c3c5e5899b22005685847916de1dfd95 | aef22fcd0c80c92441e0e3df2468d7a2f23a848a | refs/heads/master | 2020-12-15T04:00:26.855139 | 2020-06-08T21:17:55 | 2020-06-08T21:17:55 | 234,986,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,439 | py | ##
from selenium import webdriver
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import logging,pandas as pd
import acqua.aqueduct as aq
import acqua.parametri as parm
gestore = "HeraER"
aq.setEnv('EmiliaRomagna//'+gestore)
url = 'https://www.gruppohera.it/gruppo/attivita_servizi/business_acqua/canale_acqua/'
options = webdriver.ChromeOptions()
options.add_argument( '--ignore-certificate-errors' )
options.add_argument( '--incognito' )
options.add_argument( '--headless' )
useThisDictionary = parm.crea_dizionario('Metadata/SynParametri.csv')
parametersAdmitted = parm.getParametersAdmitted('Metadata/SynParametri.csv')
#
locationList = pd.read_csv('Metadata/LocationList.csv')
dataReportCollection = pd.DataFrame()
#locationList = locationList.iloc[57:60]
for i,location in locationList.iterrows():
alias_city = location['alias_city']
alias_address = location['alias_address']
driver = webdriver.Chrome( "chromedriver", options=options )
driver.implicitly_wait( 10 ) # seconds
driver.get( url )
time.sleep( 3 )
#
divWebElement = WebDriverWait( driver, 10 ).until(EC.visibility_of( driver.find_element_by_class_name( "form_scopricosabevi_campi" ) ) )
inputWebElement = divWebElement.find_element_by_class_name( "ui-autocomplete-input" )
inputWebElement.send_keys( alias_city.split("'")[0].split("รฌ")[0] ) #workaround: Prende solo la prima parte, prima dell'eventuale apice
time.sleep( 3 )
#Putroppo non รจ sufficiente impostare la city, per due motivi: 1) non funziona con l'apice, 2) ci possono essere piu' cittร con lo stesso prefisso, per cui in alcuni casi bisogna selezionare la cittร dala lista.
try:
popUpWebElement = WebDriverWait( driver, 10 ).until( EC.visibility_of( driver.find_element_by_id( "ui-id-2" ) ) )
optionsWebElement = popUpWebElement.find_elements_by_tag_name("li")
if len(optionsWebElement)>1:
a = {we.text:we for we in optionsWebElement}
driver.execute_script("arguments[0].click();", a[alias_city])
except:
pass
submitWebElement = divWebElement.find_element_by_class_name( "submit" )
driver.execute_script("arguments[0].click();", submitWebElement)
#
try:
time.sleep( 2 )
tableWebElement = WebDriverWait( driver, 10 ).until(EC.visibility_of( driver.find_element_by_id( "scopricosabevi-container" ) ) )
tableHtml = tableWebElement.find_elements_by_tag_name("table")[0].get_attribute('outerHTML')
#
table = pd.read_html( tableHtml, decimal=',', thousands='.' )[0]
table.set_index( table.columns[0], inplace=True )
parms = table.loc[parametersAdmitted].iloc[:,0]
#
premessa = driver.find_element_by_class_name("tdclose")
data_report = premessa.text.split("\n")[1]
#
row = {'alias_city': alias_city, 'alias_address': alias_address, 'data_report': data_report}
stdParms = parm.standardize( useThisDictionary, parms.to_dict() )
row.update( stdParms )
dataReportCollection = dataReportCollection.append( row, ignore_index=True )
logging.info( "Hacked %s (%s/%s)!", alias_city, i + 1, len( locationList ) )
except:
logging.critical( "Skiped %s!", alias_city )
driver.close()
##
dataReportCollection.to_csv('Metadata/DataReportCollection.csv',index=False)
| [
"[email protected]"
] | |
b4a83143c9715fd87929a295483544c5b43b5b4a | 644bcdabf35261e07c2abed75986d70f736cb414 | /python-project/Les_listes/Tri_selection_Test.py | 6b1dccdeb2e57c4958f597688da6975535f8ac5c | [] | no_license | matcianfa/playground-X1rXTswJ | f967ab2c2cf3905becafb6d77e89a31414d014de | 67859b496e407200afb2b1d2b32bba5ed0fcc3f0 | refs/heads/master | 2023-04-03T11:56:15.878757 | 2023-03-24T15:52:37 | 2023-03-24T15:52:37 | 122,226,979 | 5 | 20 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | # A modifier si besoin
nom_fonction="ma_fonction"
#liste des valeurs ร tester
# Attention de bien mettre dans un tuplet ou une liste les valeurs ร tester mรชme si la fonction n'a qu'un argument.
valeurs_a_tester=[[[1,5,6]],[[3,2,5,7,10,1]],[[10,9,8,7,6,5,4,3,2,1]],[[10,9,8,7,6,5,4,3,2,1,1,2,3,4,5,6,7,8,9,10]],[[1]]]
#message d'aide si besoin
help="N'oublie pas d'utiliser return pour renvoyer le resultat."
#------------------------------------
# Les imports
import sys
# Ma boite ร outils
from ma_bao import *
# Donne les noms du dossier et du module (automatiquement avec __file__
chemin,module=donner_chemin_nom(__file__)
# On teste s'il n'y a pas d'erreurs de synthaxe etc. et on les montre si besoin
tester("from {} import *".format(module),globals())
# On renomme ma fonction f
f=eval(nom_fonction)
# Si le mot de passe est bon on affiche la correction
try :
cheat(chemin+module,mdp)
except: pass
# On rรฉcupรจre la fonction solution
exec("from {}_Correction import {} as f_sol".format(module,nom_fonction))
#--------------------------------------
def test():
try:
for valeur in valeurs_a_tester:
rep=f(*valeur)
sol=f_sol(*valeur)
assert str(rep) == str(sol), "En testant les valeurs {} le rรฉsultat obtenu est {} au lieu de {}".format(",".join([str(val) for val in valeur]),str(rep),str(sol))
send_msg("Tests validรฉs","En testant les valeurs {} le rรฉsultat obtenu est bien {}".format(",".join([str(val) for val in valeur]),str(rep)))
success(chemin+module)
except AssertionError as e:
fail()
send_msg("Oops! ", e)
if help:
send_msg("Aide ๐ก", help)
#--------------------------------------
if __name__ == "__main__": test()
| [
"[email protected]"
] | |
a21fbc0faec9c014211bb36d1ae2ae1a5b3b9a45 | 99eb4013a12ddac44042d3305a16edac1c9e2d67 | /shexer/io/graph/yielder/multi_rdflib_triple_yielder.py | 0f89fcce622acfcbe7d035a433bb4d6357b8e2c5 | [
"Apache-2.0"
] | permissive | DaniFdezAlvarez/shexer | cd4816991ec630a81fd9dd58a291a78af7aee491 | 7ab457b6fa4b30f9e0e8b0aaf25f9b4f4fcbf6d9 | refs/heads/master | 2023-05-24T18:46:26.209094 | 2023-05-09T18:25:27 | 2023-05-09T18:25:27 | 132,451,334 | 24 | 2 | Apache-2.0 | 2023-05-03T18:39:57 | 2018-05-07T11:32:26 | Python | UTF-8 | Python | false | false | 1,725 | py | from shexer.io.graph.yielder.multifile_base_triples_yielder import MultifileBaseTripleYielder
from shexer.io.graph.yielder.rdflib_triple_yielder import RdflibParserTripleYielder
from shexer.consts import TURTLE
class MultiRdfLibTripleYielder(MultifileBaseTripleYielder):
def __init__(self, list_of_files, input_format=TURTLE, allow_untyped_numbers=False,
namespaces_dict=None, compression_mode=None, zip_archive_file=None):
super(MultiRdfLibTripleYielder, self).__init__(list_of_files=list_of_files,
allow_untyped_numbers=allow_untyped_numbers)
self._input_format = input_format
self._namespaces_dict = namespaces_dict if namespaces_dict is not None else {}
self._compression_mode = compression_mode
self._zip_archive_file = zip_archive_file
def _yield_triples_of_last_yielder(self, parse_namespaces=True):
for a_triple in self._last_yielder.yield_triples(parse_namespaces):
yield a_triple
def _constructor_file_yielder(self, a_source_file):
return RdflibParserTripleYielder(source=a_source_file,
allow_untyped_numbers=self._allow_untyped_numbers,
input_format=self._input_format,
compression_mode=self._compression_mode,
zip_archive_file=self._zip_archive_file)
@property
def namespaces(self):
return self._namespaces_dict # TODO This is not entirely correct. But this method will be rarely used
# and can have a huge performance cost in case the graphs hadnt been parsed yet
| [
"[email protected]"
] | |
dcce9e815ad8f7b9c9fd8145653e29fbb870d7f7 | 9147fb079b5abdc97d2ff7f3f578b9b4a957dfb9 | /utils/python/linkedQueue.py | 9fa388017779496fed3a8365a930f9f6aa97d32e | [] | no_license | XBOOS/playgournd | db52bd1891d3405116dbf739ea0ae76d322e2b09 | 893b5f9e2be6ce854342f4879d9cd8db87caee2b | refs/heads/master | 2021-01-10T14:34:45.292669 | 2016-03-28T15:09:56 | 2016-03-28T15:09:56 | 44,814,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | #!/usr/bin/env python
# encoding: utf-8
class LinkedQueue:
class _Node:
__slot__ = '_element','_next'
def __init__(self,element,next):
self.element = element
self.next = next
def __init__(self):
self.head = None
self.tail = None
self.size = 0
def __len__(self):
return self.size
def is_empty(self):
return self.size == 0
def first(self):
if self.is_empty():
raise Exception("Queue is empty")
else:
return self.head.element
def enqueue(self,e):
newNode = self._Node(e,None)
if self.is_empty():
self.head = newNode
else:
self.tail.next = newNode
self.tail = newNode
self.size +=1
def dequeue(self):
if self.is_empty():
raise Exception("Queue is empty")
else:
self.size -=1
tmp = self.head.element
self.head = self.head.next
if self.is_empty():
self.tail = None
return tmp
| [
"[email protected]"
] | |
5afa1a4402e9090797402eba39d4c47968047b57 | d536a1e39d4b3d00ee228530490d1b03fe544f6a | /properties_wino.py | 1c28a6534843672fb0d22eda31dddf77f0a7cbc1 | [] | no_license | jeffstorlie/parcels_in_minnesota_update_v02 | dda6460b2e1f63c156d8e853a9cfcb0712b64591 | 8d7de35ddd385f2fd119374c88d8b39a14a4e064 | refs/heads/master | 2021-01-11T11:30:55.407132 | 2016-10-31T18:43:31 | 2016-10-31T18:44:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,420 | py | from parcels_base_classes import countyEtlParams
class countyEtl(countyEtlParams):
'''Class per county with unique parameters defined.'''
def __init__(self):
self.county_name = 'Winona'
self.cty_fips = r'169'
self.county_id = r'169'
self.cty_abbr = r'WINO'
self.mngeo_web_id = r'Winona 169'
self.sourceZipFile = r'WINO_parcels.zip'
self.sourcePolygons = r'Winona_co_parcels.shp'
self.sourceOwnershipZipFile = r''
self.sourceOwnershipTable = r''
self.joinInField = r''
self.joinJoinField = r''
self.PIN_exists.fieldTransferList = []
self.BLDG_NUM_exists.fieldTransferList = []
self.BLDG_NUM_exists.transferType = ''
self.PREFIX_DIR_exists.fieldTransferList = []
self.PREFIX_DIR_exists.transferType = ''
self.PREFIXTYPE_exists.fieldTransferList = []
self.PREFIXTYPE_exists.transferType = ''
self.STREETNAME_exists.fieldTransferList = []
self.STREETNAME_exists.transferType = ''
self.STREETTYPE_exists.fieldTransferList = []
self.STREETTYPE_exists.transferType = ''
self.SUFFIX_DIR_exists.fieldTransferList = []
self.SUFFIX_DIR_exists.transferType = ''
self.UNIT_INFO_exists.fieldTransferList = []
self.UNIT_INFO_exists.transferType = ''
self.CITY_exists.fieldTransferList = []
self.CITY_exists.transferType = ''
self.CITY_USPS_exists.fieldTransferList = []
self.CITY_USPS_exists.transferType = ''
self.ZIP_exists.fieldTransferList = []
self.ZIP_exists.transferType = ''
self.ZIP4_exists.fieldTransferList = []
self.ZIP4_exists.transferType = ''
self.PLAT_NAME_exists.fieldTransferList = []
self.PLAT_NAME_exists.transferType = ''
self.BLOCK_exists.fieldTransferList = []
self.BLOCK_exists.transferType = ''
self.LOT_exists.fieldTransferList = []
self.LOT_exists.transferType = ''
self.ACRES_POLY_exists.fieldTransferList = []
self.ACRES_DEED_exists.fieldTransferList = []
self.USE1_DESC_exists.fieldTransferList = []
self.USE1_DESC_exists.transferType = ''
self.USE2_DESC_exists.fieldTransferList = []
self.USE2_DESC_exists.transferType = ''
self.USE3_DESC_exists.fieldTransferList = []
self.USE3_DESC_exists.transferType = ''
self.USE4_DESC_exists.fieldTransferList = []
self.USE4_DESC_exists.transferType = ''
self.MULTI_USES_exists.fieldTransferList = []
self.MULTI_USES_exists.transferType = ''
self.LANDMARK_exists.fieldTransferList = []
self.LANDMARK_exists.transferType = ''
self.OWNER_NAME_exists.fieldTransferList = []
self.OWNER_NAME_exists.transferType = ''
self.OWNER_MORE_exists.fieldTransferList = []
self.OWNER_MORE_exists.transferType = ''
self.OWN_ADD_L1_exists.fieldTransferList = []
self.OWN_ADD_L1_exists.transferType = ''
self.OWN_ADD_L2_exists.fieldTransferList = []
self.OWN_ADD_L2_exists.transferType = ''
self.OWN_ADD_L3_exists.fieldTransferList = []
self.OWN_ADD_L3_exists.transferType = ''
self.OWN_ADD_L4_exists.fieldTransferList = []
self.OWN_ADD_L4_exists.transferType = ''
self.TAX_NAME_exists.fieldTransferList = []
self.TAX_NAME_exists.transferType = ''
self.TAX_ADD_L1_exists.fieldTransferList = []
self.TAX_ADD_L1_exists.transferType = ''
self.TAX_ADD_L2_exists.fieldTransferList = []
self.TAX_ADD_L2_exists.transferType = ''
self.TAX_ADD_L3_exists.fieldTransferList = []
self.TAX_ADD_L3_exists.transferType = ''
self.TAX_ADD_L4_exists.fieldTransferList = []
self.TAX_ADD_L4_exists.transferType = ''
self.OWNERSHIP_exists.fieldTransferList = []
self.OWNERSHIP_exists.transferType = ''
self.HOMESTEAD_exists.fieldTransferList = []
self.HOMESTEAD_exists.transferType = ''
self.TAX_YEAR_exists.fieldTransferList = []
self.MARKET_YEAR_exists.fieldTransferList = []
self.EMV_LAND_exists.fieldTransferList = []
self.EMV_BLDG_exists.fieldTransferList = []
self.EMV_TOTAL_exists.fieldTransferList = []
self.TAX_CAPAC_exists.fieldTransferList = []
self.TOTAL_TAX_exists.fieldTransferList = []
self.SPEC_ASSES_exists.fieldTransferList = []
self.TAX_EXEMPT_exists.fieldTransferList = []
self.TAX_EXEMPT_exists.transferType = ''
self.XUSE1_DESC_exists.fieldTransferList = []
self.XUSE1_DESC_exists.transferType = ''
self.XUSE2_DESC_exists.fieldTransferList = []
self.XUSE2_DESC_exists.transferType = ''
self.XUSE3_DESC_exists.fieldTransferList = []
self.XUSE3_DESC_exists.transferType = ''
self.XUSE4_DESC_exists.fieldTransferList = []
self.XUSE4_DESC_exists.transferType = ''
self.DWELL_TYPE_exists.fieldTransferList = []
self.DWELL_TYPE_exists.transferType = ''
self.HOME_STYLE_exists.fieldTransferList = []
self.HOME_STYLE_exists.transferType = ''
self.FIN_SQ_FT_exists.fieldTransferList = []
self.GARAGE_exists.fieldTransferList = []
self.GARAGE_exists.transferType = ''
self.GARAGESQFT_exists.fieldTransferList = []
self.BASEMENT_exists.fieldTransferList = []
self.BASEMENT_exists.transferType = ''
self.HEATING_exists.fieldTransferList = []
self.HEATING_exists.transferType = ''
self.COOLING_exists.fieldTransferList = []
self.COOLING_exists.transferType = ''
self.YEAR_BUILT_exists.fieldTransferList = []
self.NUM_UNITS_exists.fieldTransferList = []
self.SALE_DATE_exists.fieldTransferList = []
self.SALE_DATE_exists.transferType = 'Date'
self.SALE_VALUE_exists.fieldTransferList = []
self.SCHOOL_DST_exists.fieldTransferList = []
self.SCHOOL_DST_exists.transferType = ''
self.WSHD_DIST_exists.fieldTransferList = []
self.WSHD_DIST_exists.transferType = ''
self.GREEN_ACRE_exists.fieldTransferList = []
self.GREEN_ACRE_exists.transferType = ''
self.OPEN_SPACE_exists.fieldTransferList = []
self.OPEN_SPACE_exists.transferType = ''
self.AG_PRESERV_exists.fieldTransferList = []
self.AG_PRESERV_exists.transferType = ''
self.AGPRE_ENRD_exists.fieldTransferList = []
self.AGPRE_ENRD_exists.transferType = 'Date'
self.AGPRE_EXPD_exists.fieldTransferList = []
self.AGPRE_EXPD_exists.transferType = 'Date'
self.PARC_CODE_exists.fieldTransferList = []
self.SECTION_exists.fieldTransferList = []
self.TOWNSHIP_exists.fieldTransferList = []
self.RANGE_exists.fieldTransferList = []
self.RANGE_DIR_exists.fieldTransferList = []
self.LEGAL_DESC_exists.fieldTransferList = []
self.LEGAL_DESC_exists.transferType = ''
self.EDIT_DATE_exists.fieldTransferList = []
self.EDIT_DATE_exists.transferType = 'Date'
self.EXPORT_DATE_exists.fieldTransferList = []
self.EXPORT_DATE_exists.transferType = 'Date'
self.ORIG_PIN_exists.fieldTransferList = []
self.ORIG_PIN_exists.transferType = ''
def returnCountyBase(self):
return county_name
| [
"[email protected]"
] | |
176e51af1e406c546c6b63fd573169f168852da6 | b68d4e7826c29a22b002ff9c10583faeb7a10455 | /uwsgi-tutorial/lib/python3.4/linecache.py | b990adaebbbaf6bf0beca6cd2c87ab63efc6ebe2 | [] | no_license | mikanyman/.pyenv_versions-legacy | ec392821290bd38873b25824c4261b15dc1a5067 | 5a42c7c21e800610f4f5f322d73d1dbd62a081b9 | refs/heads/master | 2022-10-13T10:22:13.956161 | 2017-01-31T20:10:04 | 2017-01-31T20:10:04 | 80,555,789 | 0 | 1 | null | 2022-09-30T13:39:01 | 2017-01-31T19:49:56 | Python | UTF-8 | Python | false | false | 61 | py | /home/mnyman/.pyenv/versions/3.4.1/lib/python3.4/linecache.py | [
"[email protected]"
] | |
198cd3ffee04006604e8671de6d40a54f1429eb8 | 356a5c583fb77c53bf00b38a0d9eca761590bcf1 | /shuwei_fengge/practice_one/Company/match_check/chin_check.py | 6cdd9f0f46b5455f972faf3f52d49dd285b15a22 | [
"MIT"
] | permissive | sunyihuan326/DeltaLab | f7df7915bf4aacb7628f82ada68278c29f5942eb | 3d20fde0763e6167c705b0c06bd033ad953719ed | refs/heads/master | 2021-07-21T17:16:32.567956 | 2018-10-29T06:46:09 | 2018-10-29T06:48:27 | 108,956,765 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | # coding:utf-8
'''
Created on 2017/12/27.
@author: chk01
'''
from practice_one.Company.load_material.utils import *
def get_face_feature():
for typ in ['A', 'B', 'C', 'D', 'E']:
print('ๅผๅง{}ๅๅฏผๅ
ฅ'.format(typ))
dir_path = os.listdir(root_dir + '/src/face_' + typ)
m = len(dir_path)
n = 13
X = np.zeros([m, n, 2]) + 999
Y = np.zeros([m, 1]) + 999
for i, sourceDir in enumerate(dir_path):
_id = int(sourceDir.split('.')[0].replace(typ, '')) - 1
full_path = root_dir + '/src/face_' + typ + '/' + sourceDir
landmark72, _, _, _, _ = get_baseInfo(full_path)
landmark72 = landmark72_trans(landmark72)
_data = point2feature_chin(landmark72)
X[_id] = _data
Y[_id] = _id + 1
print('load--->{}---ๅพ{}'.format(typ, _id + 1))
scio.savemat('../load_material/feature_matrix/chin_' + typ, {"X": X, "Y": Y})
print('ๅฎๆ{}ๅฏผๅ
ฅ'.format(typ))
if __name__ == '__main__':
get_face_feature()
| [
"[email protected]"
] | |
36ec8c4c0a90a6281609966b27752ac39c7224de | eacff46eda2c6b509449979a16002b96d4645d8e | /Collections-a-installer/community-general-2.4.0/tests/unit/plugins/modules/remote_management/oneview/oneview_module_loader.py | 3b41cee1b5579abf11141d265940c504e13f7613 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | d-amien-b/simple-getwordpress | 5e6d4d15d5f87124ab591e46b63fec552998fdc3 | da90d515a0aa837b633d50db4d91d22b031c04a2 | refs/heads/master | 2023-04-08T22:13:37.347545 | 2021-04-06T09:25:51 | 2021-04-06T09:25:51 | 351,698,069 | 0 | 0 | MIT | 2021-03-31T16:16:45 | 2021-03-26T07:30:00 | HTML | UTF-8 | Python | false | false | 2,399 | py | # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from ansible_collections.community.general.tests.unit.compat.mock import Mock
# FIXME: These should be done inside of a fixture so that they're only mocked during
# these unittests
sys.modules['hpOneView'] = Mock()
sys.modules['hpOneView.oneview_client'] = Mock()
ONEVIEW_MODULE_UTILS_PATH = 'ansible_collections.community.general.plugins.module_utils.oneview'
from ansible_collections.community.general.plugins.module_utils.oneview import (OneViewModuleException,
OneViewModuleTaskError,
OneViewModuleResourceNotFound,
OneViewModuleBase)
from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_ethernet_network import EthernetNetworkModule
from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_ethernet_network_info import EthernetNetworkInfoModule
from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_fc_network import FcNetworkModule
from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_fc_network_info import FcNetworkInfoModule
from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_fcoe_network import FcoeNetworkModule
from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_fcoe_network_info import FcoeNetworkInfoModule
from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_network_set import NetworkSetModule
from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_network_set_info import NetworkSetInfoModule
from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_san_manager import SanManagerModule
from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_san_manager_info import SanManagerInfoModule
| [
"[email protected]"
] | |
30f4b31f122eb742591df22a134ba0aa5db77f97 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_036/ch27_2020_03_19_18_10_54_857075.py | f14b6d255fcb55368e95df12e32f95de78723692 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | duvida=True
a= input('Tem duvidas? ')
if a=='nรฃo':
duvida=False
while duvida==True:
print('Pratique mais')
a= input('Tem duvidas? ')
if a == 'nรฃo':
duvida=False
print('Atรฉ a proxima') | [
"[email protected]"
] | |
d91a7976973b3aaecd3a5b8d9a949e02417aa5e6 | 066de66c8264c4c38b8124b67232fc93ab8c84ba | /code/ultrasound_image/erosion_dilation.py | c320fe883543f5852af6357112816ae6908e1a20 | [] | no_license | Monologuethl/Monologuethl | d2c8ecd9c3a79c26014387db13fafd602b346401 | 844e097c29747ddf8b185bd8a36f81c03627869f | refs/heads/master | 2021-09-27T07:59:40.284650 | 2021-09-15T07:17:30 | 2021-09-15T07:17:30 | 149,366,150 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | # ex2tron's blog:
# http://ex2tron.wang
import cv2
import numpy as np
# 1.่
่ไธ่จ่
path = r'C:\Users\Tong\Desktop\edgs\1.png'
img = cv2.imread(path, 0)
# kernel = np.ones((5, 5), np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
dilation = cv2.dilate(img, kernel) # ่จ่
erosion = cv2.erode(dilation, kernel) # ่
่
cv2.imshow('erosion/dilation', erosion)
cv2.waitKey(0)
#
#
# # 2.ๅฎไน็ปๆๅ
็ด
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) # ็ฉๅฝข็ปๆ
# print(kernel)
#
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # ๆคญๅ็ปๆ
# print(kernel)
#
# kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5)) # ๅๅญๅฝข็ปๆ
# print(kernel)
#
# # 3.ๅผ่ฟ็ฎไธ้ญ่ฟ็ฎ
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # ๅฎไน็ปๆๅ
็ด
# ๅผ่ฟ็ฎ
# img = cv2.imread(path, 0)
# opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
#
# cv2.imshow('opening', opening)
# cv2.waitKey(0)
# ้ญ่ฟ็ฎ
# img = cv2.imread(path, 0)
# closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
#
# cv2.imshow('closing', closing)
# cv2.waitKey(0)
#
# # 4.ๅฝขๆๅญฆๆขฏๅบฆ
# img = cv2.imread(path, 0)
# gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
#
# cv2.imshow('morphological gradient', gradient)
# cv2.waitKey(0)
#
#
# # 5.้กถๅธฝ
# tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
# cv2.imshow('top hat', tophat)
# cv2.waitKey(0)
#
#
# # 6.้ปๅธฝ
# blackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)
# cv2.imshow('black hat', blackhat)
# cv2.waitKey(0)
| [
"[email protected]"
] | |
e6247518456092b0faf2d226a25b043f1ac0f02c | 1ebba24841912613f9c70dffee05270c4f1f4adb | /willie/willie/logger.py | d42b1b8105e094e9f41a3d5715773f794ba2532f | [
"EFL-2.0",
"MIT"
] | permissive | freifunk-darmstadt/ffda-jarvis | 4953af0cd8629c9b9632806eb0a7440fcf94da57 | 127f3333c837c592177f84b361e3c050e00f2d3f | refs/heads/master | 2020-04-06T06:56:21.472931 | 2017-10-23T23:00:57 | 2017-10-23T23:10:03 | 32,585,430 | 0 | 8 | MIT | 2017-12-20T00:46:26 | 2015-03-20T13:32:00 | Python | UTF-8 | Python | false | false | 1,757 | py | # coding=utf8
from __future__ import unicode_literals
import logging
class IrcLoggingHandler(logging.Handler):
def __init__(self, bot, level):
super(IrcLoggingHandler, self).__init__(level)
self._bot = bot
self._channel = bot.config.core.logging_channel
def emit(self, record):
try:
msg = self.format(record)
self._bot.msg(self._channel, msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class ChannelOutputFormatter(logging.Formatter):
def __init__(self):
super(ChannelOutputFormatter, self).__init__(
fmt='[%(filename)s] %(msg)s'
)
def formatException(self, exc_info):
# logging will through a newline between the message and this, but
# that's fine because Willie will strip it back out anyway
return ' - ' + repr(exc_info[1])
def setup_logging(bot):
level = bot.config.core.logging_level or 'WARNING'
logging.basicConfig(level=level)
logger = logging.getLogger('willie')
if bot.config.core.logging_channel:
handler = IrcLoggingHandler(bot, level)
handler.setFormatter(ChannelOutputFormatter())
logger.addHandler(handler)
def get_logger(name=None):
"""Return a logger for a module, if the name is given.
This is equivalent to `logging.getLogger('willie.modules.' + name)` when
name is given, and `logging.getLogger('willie')` when it is not. The latter
case is intended for use in Willie's core; modules should call
`get_logger(__name__)` to get a logger."""
if name:
return logging.getLogger('willie.modules.' + name)
else:
return logging.getLogger('willie')
| [
"[email protected]"
] | |
6df027365ad433140946c4c093f2ee39cfc26526 | 2dd560dc468af0af4ca44cb4cd37a0b807357063 | /Leetcode/1529. Bulb Switcher IV/solution1.py | f1587218df3a9dfc14a2d51b3abd51f8be2498f8 | [
"MIT"
] | permissive | hi0t/Outtalent | 460fe4a73788437ba6ce9ef1501291035c8ff1e8 | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | refs/heads/master | 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | class Solution:
def minFlips(self, target: str) -> int:
result = 0
t = 1
for c in target:
if c == t:
t ^= 1
result += 1
return result
| [
"[email protected]"
] | |
08b83b2689775ecf6db9cf7dabdbce19d580cae5 | 3fc4cac282465350d9b2983527140fc735a0d273 | /0916/04_graphSigmoid.py | 040f25ec9ae21cfcc14c0de514865e0b9198d73b | [] | no_license | Orderlee/SBA_STUDY | 2cfeea54d4a9cbfd0c425e1de56324afcc547b81 | 4642546e7546f896fc8b06e9daba25d27c29e154 | refs/heads/master | 2022-12-25T01:08:05.168970 | 2020-09-27T14:57:23 | 2020-09-27T14:57:23 | 299,050,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | import numpy as np
import matplotlib.pyplot as plt
def sigmoid(weight,x,b=0 ,asc=True):
if asc == True:
return 1/(1+np.exp(-weight * x -b))
else:
return 1/(1 + np.exp(+weight * x + b))
# arange ํจ์๋ ํ์ด์ฌ range์ ์ ์ฌํจ
x = np.arange(-5.0,5.1,0.1)
weight,bias = 1,0
y1 = sigmoid(weight,x)
mylabel= 'y=' +str(weight) +'*x +' + str(bias)
plt.plot(x,y1, color='g', label=mylabel)
weight,bias = 5,0
y2 = sigmoid(weight,x,bias)
mylabel= 'y=' +str(weight) +'*x +' + str(bias)
plt.plot(x,y2, color='b', label=mylabel)
weight,bias = 5,3
y3 = sigmoid(weight,x,bias)
mylabel= 'y=' +str(weight) +'*x +' + str(bias)
plt.plot(x,y3, color='r', label=mylabel)
weight,bias = 5,3
y4 = sigmoid(weight,x,bias,asc=False)
mylabel= 'y=' +str(weight) +'*x +' + str(bias)
plt.plot(x,y4, color='r', label=mylabel)
plt.axhline(y=0,color='black',linewidth=1, linestyle='dashed')
plt.axhline(y=1,color='black',linewidth=1, linestyle='dashed')
plt.title('sigmoid function')
plt.ylim(-0.1,1.1)
plt.legend(loc='best')
filename='sigmoid_function.png'
plt.savefig(filename)
print(filename +' ํ์ผ์ด ์ ์ฅ๋์์ต๋๋ค.')
print('finished') | [
"[email protected]"
] | |
6aff12c0caf1b015d7f25d02d7caf535f388b708 | 6f30245f27a9568155f69648faf148c278136029 | /hhapps/stock/api/models/stocks.py | 523f821e7c018e0a2f210f16c8b01f46550ef3e9 | [] | no_license | r202-coe-psu/hh-apps | 82495ffec7fb09155afa4e8f571051aad824acb4 | a15453b7f502a2a71ccb89ba4c4ebe95ef3ca86f | refs/heads/master | 2021-05-03T05:48:40.766349 | 2017-08-06T22:45:30 | 2017-08-06T22:45:30 | 120,584,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | import mongoengine as me
import datetime
from .common import User, Building
class Stock(me.Document):
name = me.StringField(required=True)
description = me.StringField()
tags = me.ListField(me.StringField())
owner = me.EmbeddedDocumentField(User)
building = me.EmbeddedDocumentField(Building)
status = me.StringField(required=True, default='deactivate')
created_date = me.DateTimeField(required=True,
default=datetime.datetime.utcnow)
updated_date = me.DateTimeField(required=True,
default=datetime.datetime.utcnow,
auto_now=True)
meta = {'collection': 'stocks'}
| [
"[email protected]"
] | |
8ee86e9a15eadb51909cffb7a5aab5e11ece6a78 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYGluGluToHToTauTau_M-250_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467513/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_24/run_cfg.py | 276345c186bf8da137a00aa8ad103b6b8f80b0b9 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYGluGluToHToTauTau_M-250_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467513/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/SUSYGluGluToHToTauTau_M-250_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_74_1_st7.root',
'/store/cmst3/group/cmgtools/CMG/SUSYGluGluToHToTauTau_M-250_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_75_1_riU.root',
'/store/cmst3/group/cmgtools/CMG/SUSYGluGluToHToTauTau_M-250_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_76_1_eBF.root')
)
| [
"[email protected]"
] | |
8fdca3e58563ff24f711fef8de5dab6419e9d7cf | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /pn7QpvW2fW9grvYYE_9.py | 43273e64b5256518234f94d6178e37a7d1fb8af5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py |
def find_fulcrum(lst):
for i in range(1,len(lst)):
if i!=(len(lst)-1):
n1 = lst[:i]
n2 = lst[i+1:]
if sum(n1) == sum(n2):
print(n1)
print(n2)
return lst[i]
break
else:
pass
else:
return -1
| [
"[email protected]"
] | |
5fb948480b014d14344099331d759dc63ddab101 | 0e5658deaa630a603a7134847518408c09e3a6d0 | /vendor/riffyn-sdk/riffyn_nexus_sdk_v1/models/update_resource_body.py | bc11422adca154c297d4d58893e0fe48cba7194f | [] | no_license | jace-ys/lab-automation | cb0d0d2b88ec64e235cffca8bbf556b22c55ab1e | 27be3a942b111404844f29aa9a0dd957b7fde459 | refs/heads/master | 2023-06-01T20:30:55.557975 | 2021-06-16T07:46:20 | 2021-06-16T08:15:38 | 297,329,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,270 | py | # coding: utf-8
"""
Riffyn Nexus REST API V1
## Vocabulary Before you begin, please familiarize yourself with our [Glossary of Terms](https://help.riffyn.com/hc/en-us/articles/360045503694). ## Getting Started If you'd like to play around with the API, there are several free GUI tools that will allow you to send requests and receive responses. We suggest using the free app [Postman](https://www.getpostman.com/). ## Authentication Begin with a call the [authenticate](#/authentication/authenticate) endpoint using [HTTP Basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) with your `username` and `password` to retrieve either an API Key or an Access Token. For example: curl -X POST -u '<username>' https://api.app.riffyn.com/v1/auth -v You may then use either the API Key or the accessToken for all future requests to the API. For example: curl -H 'access-token: <ACCESS_TOKEN>' https://api.app.riffyn.com/v1/units -v curl -H 'api-key: <API_KEY>' https://api.app.riffyn.com/v1/units -v The tokens' values will be either in the message returned by the `/authenticate` endpoint or in the createApiKey `/auth/api-key` or CreateAccesToken `/auth/access-token` endpoints. The API Key will remain valid until it is deauthorized by revoking it through the Security Settings in the Riffyn Nexus App UI. The API Key is best for running scripts and longer lasting interactions with the API. The Access Token will expire automatically and is best suited to granting applications short term access to the Riffyn Nexus API. Make your requests by sending the HTTP header `api-key: $API_KEY`, or `access-token: $ACCESS_TOKEN`. In Postman, add your preferred token to the headers under the Headers tab for any request other than the original request to `/authenticate`. If you are enrolled in MultiFactor Authentication (MFA) the `status` returned by the `/authenticate` endpoint will be `MFA_REQUIRED`. A `passCode`, a `stateToken`, and a `factorId` must be passed to the [/verify](#/authentication/verify) endpoint to complete the authentication process and achieve the `SUCCESS` status. MFA must be managed in the Riffyn Nexus App UI. ## Paging and Sorting The majority of endpoints that return a list of data support paging and sorting through the use of three properties, `limit`, `offset`, and `sort`. Please see the list of query parameters, displayed below each endpoint's code examples, to see if paging or sorting is supported for that specific endpoint. Certain endpoints return data that's added frequently, like resources. As a result, you may want filter results on either the maximum or minimum creation timestamp. This will prevent rows from shifting their position from the top of the list, as you scroll though subsequent pages of a multi-page response. Before querying for the first page, store the current date-time (in memory, a database, a file...). On subsequent pages you *may* include the `before` query parameter, to limit the results to records created before that date-time. E.g. before loading page one, you store the current date time of `2016-10-31T22:00:00Z` (ISO date format). Later, when generating the URL for page two, you *could* limit the results by including the query parameter `before=1477951200000` (epoch timestamp). ## Postman endpoint examples There is a YAML file with the examples of the request on Riffyn Nexus API [Click here](/v1/collection) to get the file. If you don't know how to import the collection file, [here](https://learning.postman.com/docs/postman/collections/data-formats/#importing-postman-data) are the steps. ## Client SDKs You may write your own API client, or you may use one of ours. [Click here](/v1/clients) to select your programming language and download an API client. # noqa: E501
OpenAPI spec version: 4.2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class UpdateResourceBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'description': 'str',
'properties': 'list[UpdateResourceBodyProperties]',
'components': 'list[UpdateResourceBodyComponents]',
'is_instrument': 'bool'
}
attribute_map = {
'name': 'name',
'description': 'description',
'properties': 'properties',
'components': 'components',
'is_instrument': 'isInstrument'
}
def __init__(self, name=None, description=None, properties=None, components=None, is_instrument=None): # noqa: E501
"""UpdateResourceBody - a model defined in Swagger""" # noqa: E501
self._name = None
self._description = None
self._properties = None
self._components = None
self._is_instrument = None
self.discriminator = None
self.name = name
if description is not None:
self.description = description
if properties is not None:
self.properties = properties
if components is not None:
self.components = components
if is_instrument is not None:
self.is_instrument = is_instrument
@property
def name(self):
"""Gets the name of this UpdateResourceBody. # noqa: E501
The name of resource. If you do not provide this value, the existing name will be deleted # noqa: E501
:return: The name of this UpdateResourceBody. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UpdateResourceBody.
The name of resource. If you do not provide this value, the existing name will be deleted # noqa: E501
:param name: The name of this UpdateResourceBody. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this UpdateResourceBody. # noqa: E501
A brief description of the new resource. If you do not provide this value, the existing description will be deleted # noqa: E501
:return: The description of this UpdateResourceBody. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UpdateResourceBody.
A brief description of the new resource. If you do not provide this value, the existing description will be deleted # noqa: E501
:param description: The description of this UpdateResourceBody. # noqa: E501
:type: str
"""
self._description = description
@property
def properties(self):
"""Gets the properties of this UpdateResourceBody. # noqa: E501
The full list of immutable property types and values you would like to assign to the resource. Existing immutable values will be deleted and replaced with these values # noqa: E501
:return: The properties of this UpdateResourceBody. # noqa: E501
:rtype: list[UpdateResourceBodyProperties]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this UpdateResourceBody.
The full list of immutable property types and values you would like to assign to the resource. Existing immutable values will be deleted and replaced with these values # noqa: E501
:param properties: The properties of this UpdateResourceBody. # noqa: E501
:type: list[UpdateResourceBodyProperties]
"""
self._properties = properties
@property
def components(self):
"""Gets the components of this UpdateResourceBody. # noqa: E501
Defines the components of this resource. E.g. The components of coffee are water and coffee grounds. # noqa: E501
:return: The components of this UpdateResourceBody. # noqa: E501
:rtype: list[UpdateResourceBodyComponents]
"""
return self._components
@components.setter
def components(self, components):
"""Sets the components of this UpdateResourceBody.
Defines the components of this resource. E.g. The components of coffee are water and coffee grounds. # noqa: E501
:param components: The components of this UpdateResourceBody. # noqa: E501
:type: list[UpdateResourceBodyComponents]
"""
self._components = components
@property
def is_instrument(self):
"""Gets the is_instrument of this UpdateResourceBody. # noqa: E501
Deprecated - Indicated if the new resource is an instrument. # noqa: E501
:return: The is_instrument of this UpdateResourceBody. # noqa: E501
:rtype: bool
"""
return self._is_instrument
@is_instrument.setter
def is_instrument(self, is_instrument):
"""Sets the is_instrument of this UpdateResourceBody.
Deprecated - Indicated if the new resource is an instrument. # noqa: E501
:param is_instrument: The is_instrument of this UpdateResourceBody. # noqa: E501
:type: bool
"""
self._is_instrument = is_instrument
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UpdateResourceBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateResourceBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
ee58c2d78d9dcc0d17e6b124f569983d5e86a0fc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02269/s622276745.py | 706ea0b7997e1bf35c0458a5c0dfaf699bc42e03 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | class HashTable:
@staticmethod
def h1(key):
return key % 1046527
@staticmethod
def h2(key):
return 1 + (key % (1046527 - 1))
def __init__(self, size):
self.size = size
self.values = [None for _ in range(size)]
self.tables = str.maketrans({
'A':'1', 'C':'2', 'G':'3', 'T':'4',
})
def translate_key(self, value):
return int(value.translate(self.tables))
def find(self, value):
count = 0
key = self.translate_key(value)
while True:
h = (HashTable.h1(key) + count * HashTable.h2(key)) % 1046527
if self.values[h] == value:
return True
elif self.values[h] is None:
return False
count += 1
return False
def insert(self, value):
count = 0
key = self.translate_key(value)
while True:
h = (HashTable.h1(key) + count * HashTable.h2(key)) % 1046527
if self.values[h] is None:
self.values[h] = value
break
count += 1
num = int(input())
dictionary = HashTable(1046527)
for _ in range(num):
command, value = input().split()
if command == "insert":
dictionary.insert(value)
elif command == "find":
is_find = dictionary.find(value)
print("yes" if is_find else "no")
| [
"[email protected]"
] | |
9dcf4b2832852c6fe2a69c4925b60ca92503bbfc | 9114d6d9f535ad0fc6149f1ef1c49f7005926cdc | /week4/ji/Step/10/Q1978.py | ed20837262a31ccf27e24743440b0d5488895159 | [] | no_license | HyeonGyuChi/knu_likelion_study | 914c20af54e3973361551a3defa88b14f9731853 | 81ba84bbd00d76b5a2870637cf4cce8b05e15678 | refs/heads/master | 2020-05-03T12:59:01.539664 | 2019-05-21T13:14:10 | 2019-05-21T13:14:10 | 178,641,355 | 2 | 0 | null | 2019-04-06T16:57:07 | 2019-03-31T04:14:57 | null | UTF-8 | Python | false | false | 2,483 | py | '''
๋ฐฑ์ค ๋จ๊ณ๋ณํ๊ธฐ step 10
https://www.acmicpc.net/problem/1978
'''
'''
์ฝ์ = ์ด๋ค์๋ก ๋๋์์ ๋ ๋๋จธ์ง๊ฐ 0์ธ ์
์์ = ์ฝ์์ค 1๊ณผ ์๊ธฐ ์์ ๋ฟ์ธ ์(๋จ, 1์ ์์๊ฐ x)
์์๋ฅผ ๊ฑธ๋ฌ๋ด๋ ๊ท์น์ ์์
'์๋ผํ ์ค์ฒด๋ค์ค์ ์ฒด' ๋ฐฉ๋ฒ์ ์ด์ฉํด ๋ฐฐ์๋ค์ ์ง์๋๊ฐ๋ ๋ฐฉ๋ฒ์ ์ด์ฉ
๋ง์ฝ 1~100 ๊น์ง์์ ์์๋ฅผ ๊ตฌํด์ผ ํ๋ค๋ฉด
2์๋ฐฐ์, 3์๋ฐฐ์, 5์๋ฐฐ์ ๋ฑ์ ์์๋ก ์๋ฅผ ์ง์๋๊ฐ๋ ๋ฒ
์ฆ, ํ์ํ๋ ์๊ฐ
ํ์ํ๋ ์๋ณด๋ค ์์ ๊ธฐ์กด์ ์์๋ฅผ ํฌํจํ ๊ณฑ์ผ๋ก ํํ์ด ๋ถ๊ฐํ ๋ ์์
์?
์์๊ฐ ์ถ๊ฐ๋ ๋๋ง๋ค ์์๋ฅผ ํฌํจํ ๋ฐฐ์๋ค์ ๋ชจ๋ ์ ๊ฑฐ
๊ธฐ์กด์ ์์์ ๋ฐฐ์๋ค์ ์ ๊ฑฐํ๋๋ฐ๋ ๋จ์์๋ ์๋ผ๋ฉด ๊ทธ ์๋ ์์
๊ธฐ์กด์ ์์์ ๊ณฑ์
์ผ๋ก ํํ๊ฐ๋ฅ ํ๋ค๋ฉด ์์๊ฐ ์๋๋ฏ๋ก
๊ทธ ์์ ๋ฐฐ์๋ฅผ ๋ชจ๋ ์ ์ธ
'''
'''
์
๋ ฅํ ์๋ฅผ 2๋ถํฐ ์
๋ ฅํ ์ ์ด์ ๊น์ง ๋๋ด์ ๋ ๋๋จธ์ง๊ฐ 0์ด ์๋๋ผ๋ฉด ์์
# 100 * 1000 ์ผ๊ฒฝ์ฐ 100๋ฒ * 999๋ฒ์ ํด์ผํจ
# ๋ฐ์ ์๊ณ ๋ฆฌ์ฆ์ ๊ฒฝ์ฐ max๋ฅผ ์ฐพ์์ผ ํ๋ ์๊ฐ๋ณต์ก๋๊ฐ ์ถ๊ฐ๋์ง๋ง
# max๋ฅผ ๊ธฐ์ค์ผ๋ก 1~max์ฌ์ด์ ์์๋ฅผ ์ฐพ๊ธฐ์ํด ํ ์ฌ์ดํด๋น
ํ์์์์ ๊ฐ์๋งํผ๋ง ๋ฐ๋ณตํ๋ฏ๋ก
# ์๊ฐ๋ณต์ก๋ ์ ๋ฆฌ
# 1. ์
๋ ฅํ ์๋ค ์ค ๊ฐ์ฅ ํฐ max๋ฅผ ์ฐพ์
# 2. max๋ฅผ ๊ธฐ์ค์ผ๋ก 1~max์ฌ์ด์ ์์๋ฅผ ์ฐพ์
# 3. ์
๋ ฅํ ์๊ฐ 2์์ ์ฐพ์ ์์๋ค ์ค ํ๋ ์ธ์ง ํ์ธ
'''
# 1. ๊ฐ์ฅ max๊ฐ ๊ตฌํ๊ธฐ
count = int(input()) # ์
๋ ฅํ ๊ฐ์
data = list(map(int, input().split())) # ์
๋ ฅํ ๋ฐ์ดํฐ
max = max(data) # ์
๋ ฅํ ์์ค ๊ฐ์ฅ ํฐ ๊ฐ
# 2.1~max์ฌ์ด์ ์์ ์ฐพ๊ธฐ
prime = [] # ์์๋ฅผ ์ ์ฅํ ๋ฐฐ์ด
# 1์ ์์๊ฐ ์๋๋ฏ๋ก 2๋ถํฐ ๊ฒ์
for num in range(2,max+1) :
IsPrime = True # ์์์ธ์ง ํ๋ณ
for p in prime :
if num % p == 0 : # ์์์ ๊ณฑ์ผ๋ก ํํ๊ฐ๋ฅ
IsPrime = False
break # (ํ๋ฒ์ด๋ผ๋ ํํ๊ฐ๋ฅํ๋ค๋ฉด ์ข
๋ฃ)
if IsPrime : # ๋ชจ๋ ์์์ ๊ณฑ์ผ๋ก ํํ์ด ๋ถ๊ฐํ๋ค๋ฉด ์์
prime.append(num) # ์์์ถ๊ฐ
#3. ์
๋ ฅ data๊ฐ max์ฌ์ด์ ์์๋ค ์ค ํ๋์ธ์ง ํ์ธํ์ฌ ๊ฐ์์ฐพ๊ธฐ
prime_count = 0
for num in data :
if num in prime:
prime_count+= 1
print(prime_count)
''' ์ฉ๋ ์ฝ๋ > ๋ฌด์จ๋ง์ด์ผ;
input()
print(sum((10103**~-A%A<2)-(A in[1,561,645,946])for A in map(int,input().split())))
''' | [
"[email protected]"
] | |
7cc8fb975e2594868fba5c6a7d293a405155e56f | 0b793bce2da8c3d09b7956c0672ddbffd46feaed | /codechef/ltime27_mnmx.py | 2d3c50f204e2d847bf17cc8fb2717afc78c92dad | [
"MIT"
] | permissive | knuu/competitive-programming | c6c4e08fb231937d988bdc5a60a8ad6b31b97616 | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | refs/heads/master | 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | def read_list(t): return [t(x) for x in input().split()]
def read_line(t): return t(input())
def read_lines(t, N): return [t(input()) for _ in range(N)]
for i in range(read_line(int)):
N = read_line(int)
print(min(read_list(int)) * (N-1))
| [
"[email protected]"
] | |
1b76b2e7b6aec0cfb2273733e398fb2a5faa6cba | 00a9295409b78a53ce790f7ab44931939f42c0e0 | /FPGA/apio/iCEBreaker/FIR_Filter/sympy/venv/lib/python3.8/site-packages/sympy/physics/__init__.py | 58134cc0bd994e507cfc140be6c8a6f0fee9425f | [
"Apache-2.0"
] | permissive | klei22/Tech-OnBoarding-Class | c21f0762d2d640d5e9cb124659cded5c865b32d4 | 960e962322c37be9117e0523641f8b582a2beceb | refs/heads/master | 2022-11-10T13:17:39.128342 | 2022-10-25T08:59:48 | 2022-10-25T08:59:48 | 172,292,871 | 2 | 3 | Apache-2.0 | 2019-05-19T00:26:32 | 2019-02-24T03:50:35 | C | UTF-8 | Python | false | false | 219 | py | """
A module that helps solving problems in physics
"""
from . import units
from .matrices import mgamma, msigma, minkowski_tensor, mdft
__all__ = [
'units',
'mgamma', 'msigma', 'minkowski_tensor', 'mdft',
]
| [
"[email protected]"
] | |
db382e2598d7f7d28f75a456f6e4a16fa2887c82 | f09c8bd2f4e6eb99d8c90dbd7e36400ca5b86c2b | /test_color_space.py | 928a75d5e812967d8d54bc3a4a53af89e6e62a4a | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | lejeunel/selective-search | 6fa2f15323f7c42bde3801175a65bb9f2c58676b | a2f2ec6cea5c011b2d3763875c9173f2948d4fd4 | refs/heads/master | 2022-10-19T00:23:24.306458 | 2019-10-09T08:38:39 | 2019-10-09T08:38:39 | 99,547,790 | 0 | 0 | MIT | 2022-09-30T19:45:54 | 2017-08-07T07:07:39 | Makefile | UTF-8 | Python | false | false | 3,314 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import numpy
from color_space import *
class TestColorSpace:
def _assert_range(self, img):
assert img.dtype == numpy.uint8
assert img.shape == (10, 10, 3)
assert 0 <= numpy.min(img)
assert 1 < numpy.max(img) <= 255
def setup_method(self, method):
self.I = numpy.ndarray((10, 10, 3), dtype=numpy.uint8)
self.I[:, :, 0] = 50
self.I[:, :, 1] = 100
self.I[:, :, 2] = 150
self.Irand = numpy.random.randint(0, 256, (10, 10, 3)).astype(numpy.uint8)
def test_to_grey_range(self):
self._assert_range(to_grey(self.Irand))
def test_to_grey_value(self):
img = to_grey(self.I)
grey_value = int(0.2125 * 50 + 0.7154 * 100 + 0.0721 * 150)
assert ((img == grey_value).all())
def test_to_Lab_range(self):
self._assert_range(to_Lab(self.Irand))
def test_to_Lab_value(self):
img = to_Lab(self.I)
def test_to_rgI_range(self):
self._assert_range(to_rgI(self.Irand))
def test_to_rgI_value(self):
img = to_rgI(self.I)
grey_value = int(0.2125 * 50 + 0.7154 * 100 + 0.0721 * 150)
assert ((img[:, :, 0] == 50).all())
assert ((img[:, :, 1] == 100).all())
assert ((img[:, :, 2] == grey_value).all())
def test_to_HSV_range(self):
self._assert_range(to_HSV(self.Irand))
def test_to_HSV_value(self):
img = to_HSV(self.I)
h, s, v = 148, 170, 150
assert ((img[:, :, 0] == h).all())
assert ((img[:, :, 1] == s).all())
assert ((img[:, :, 2] == v).all())
def test_to_nRGB_range(self):
self._assert_range(to_nRGB(self.Irand))
def test_to_nRGB_value(self):
img = to_nRGB(self.I)
denom = numpy.sqrt(50 ** 2 + 100 ** 2 + 150 ** 2) / 255.0
r, g, b = 50 / denom, 100 / denom, 150 / denom
assert ((img[:, :, 0] == int(r)).all())
assert ((img[:, :, 1] == int(g)).all())
assert ((img[:, :, 2] == int(b)).all())
def test_to_Hue_range(self):
self._assert_range(to_Hue(self.Irand))
def test_to_Hue_value(self):
img = to_Hue(self.I)
expected_h = 148
assert ((img[:, :, 0] == expected_h).all())
assert ((img[:, :, 1] == expected_h).all())
assert ((img[:, :, 2] == expected_h).all())
def test_convert_color_nonexisting_color(self):
with pytest.raises(KeyError):
convert_color(self.Irand, 'nonexisting-colorspace')
def test_convert_color_give_singlechannel_image(self):
I = numpy.random.randint(0, 255, (10, 10)).astype(numpy.uint8)
assert numpy.array_equal(convert_color(I, 'rgb')[:, :, 0], I)
def test_convert_color_value(self):
assert numpy.array_equal(convert_color(self.Irand, 'rgb'), self.Irand)
assert numpy.array_equal(convert_color(self.Irand, 'lab'), to_Lab(self.Irand))
assert numpy.array_equal(convert_color(self.Irand, 'rgi'), to_rgI(self.Irand))
assert numpy.array_equal(convert_color(self.Irand, 'hsv'), to_HSV(self.Irand))
assert numpy.array_equal(convert_color(self.Irand, 'nrgb'), to_nRGB(self.Irand))
assert numpy.array_equal(convert_color(self.Irand, 'hue'), to_Hue(self.Irand))
| [
"[email protected]"
] | |
25aa1fe53e08944cde76920458b0c7db7238e2d6 | d13aeac99a5af94d8f27ee64c6aa7c354fa56497 | /backend/shiny_smoke_26439/urls.py | 43c4e9f037be6718bda97425918834b20d966782 | [] | no_license | crowdbotics-apps/shiny-smoke-26439 | ec68cde6e144fe64884685542316f8afab1a09cb | 28b1e1e55476dedee990f6c0fd15b1c700cbfdf3 | refs/heads/master | 2023-05-01T00:15:17.114141 | 2021-05-09T15:01:46 | 2021-05-09T15:01:46 | 365,779,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | """shiny_smoke_26439 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Shiny Smoke"
admin.site.site_title = "Shiny Smoke Admin Portal"
admin.site.index_title = "Shiny Smoke Admin"
# swagger
api_info = openapi.Info(
title="Shiny Smoke API",
default_version="v1",
description="API documentation for Shiny Smoke App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"[email protected]"
] | |
72e94c64794120f05d3a89ff10fff155d3090d7b | 7ae0ffa2168cf6ac22aca623f6d92ff84bf82a9e | /python/p071.py | 62ff3afd861e905e5117e1ffba796abd5081d8bc | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | nttan/Project-Euler-solutions | a4e53783ae5da36cb45ea33bd2f33aed277db627 | 5e7d16559bd9ad072448a35f8edac576081056f6 | refs/heads/master | 2021-01-18T13:40:53.315063 | 2016-01-06T04:49:16 | 2016-01-06T04:49:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | #
# Solution to Project Euler problem 71
# by Project Nayuki
#
# http://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
import sys
if sys.version_info.major == 2:
range = xrange
def compute():
maxnumer = 0
maxdenom = 1
for d in range(2, 1000001):
n = d * 3 // 7
if d % 7 == 0:
n -= 1
if n * maxdenom > d * maxnumer:
maxnumer = n
maxdenom = d
return str(maxnumer)
if __name__ == "__main__":
print(compute())
| [
"[email protected]"
] | |
c653396b8ff06da0ed0fbbe3a1a55261e785f72a | df4a7c46c46d1eca6570493b9707bdf64e54f8d3 | /py/226.invert-binary-tree.py | 4f998161a7caf7bd11cb762cee6bb52c513da42f | [] | no_license | CharmSun/my-leetcode | 52a39bf719c507fb7032ed424fe857ba7340aea3 | 5325a56ba8c40d74d9fef2b19bac63a4e2c44a38 | refs/heads/master | 2023-03-29T06:39:49.614264 | 2021-03-28T16:33:52 | 2021-03-28T16:33:52 | 261,364,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | #
# @lc app=leetcode id=226 lang=python3
#
# [226] Invert Binary Tree
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
# ๆๅๅบ้ๅฝ
def invertTree(self, root: TreeNode) -> TreeNode:
if not root:
return None
left_child = self.invertTree(root.left)
right_child = self.invertTree(root.right)
root.left = right_child
root.right = left_child
return root
# @lc code=end
| [
"[email protected]"
] | |
92e956d90cbde985404e992cfb58c3b2353845ee | 6e4e6b64c035881f1cff39db616b0a80e1568c51 | /ABC095/q2.py | 03ae7f4a8faaa3f620ab29a4a57a1a728edb1f6e | [] | no_license | Lischero/Atcoder | f7471a85ee553e3ae791e3e5670468aea1fa53cc | f674d6a20a56eebdafa6d50d5d2d0f4030e5eace | refs/heads/master | 2020-05-21T16:23:36.095929 | 2018-10-18T04:27:55 | 2018-10-18T04:27:55 | 60,671,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | # -*- coding:utf-8 -*-
if __name__ == "__main__":
n, x = map(int, input().split())
m = [ int(input()) for _ in range(n) ]
ans = len(m)
singleWeight = sum(m)
surplus = x - singleWeight
if surplus > 0:
ans += surplus//min(m)
surplus -= min(m)*(surplus//min(m))
else:
pass
print(ans)
| [
"[email protected]"
] | |
c0be77f66a52f0549e9eed79e2857478ca5e0776 | c5b9f0fabffb6b2d13c6e350c8187a922709ac60 | /devel/.private/pal_detection_msgs/lib/python2.7/dist-packages/pal_detection_msgs/msg/_Gesture.py | ae0174ce103dd369b02deae542cd4e68ed7ceb15 | [] | no_license | MohamedEhabHafez/Sorting_Aruco_Markers | cae079fdce4a14561f5e092051771d299b06e789 | 0f820921c9f42b39867565441ed6ea108663ef6c | refs/heads/master | 2020-12-09T02:43:00.731223 | 2020-01-15T17:31:29 | 2020-01-15T17:31:29 | 233,154,293 | 0 | 0 | null | 2020-10-13T18:46:44 | 2020-01-11T00:41:38 | Makefile | UTF-8 | Python | false | false | 8,082 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pal_detection_msgs/Gesture.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
class Gesture(genpy.Message):
_md5sum = "1bc7e8240ed437c7df9ff2c69342d63a"
_type = "pal_detection_msgs/Gesture"
_has_header = True #flag to mark the presence of a Header object
_full_text = """## Contains data relative to recognized gestures
Header header
# Gesture identifier
string gestureId
# Position of the hand when the gesture was recognized in the camera frame in m
geometry_msgs/Point position3D
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
"""
__slots__ = ['header','gestureId','position3D']
_slot_types = ['std_msgs/Header','string','geometry_msgs/Point']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,gestureId,position3D
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Gesture, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.gestureId is None:
self.gestureId = ''
if self.position3D is None:
self.position3D = geometry_msgs.msg.Point()
else:
self.header = std_msgs.msg.Header()
self.gestureId = ''
self.position3D = geometry_msgs.msg.Point()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.gestureId
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3d().pack(_x.position3D.x, _x.position3D.y, _x.position3D.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.position3D is None:
self.position3D = geometry_msgs.msg.Point()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.gestureId = str[start:end].decode('utf-8')
else:
self.gestureId = str[start:end]
_x = self
start = end
end += 24
(_x.position3D.x, _x.position3D.y, _x.position3D.z,) = _get_struct_3d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.gestureId
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3d().pack(_x.position3D.x, _x.position3D.y, _x.position3D.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.position3D is None:
self.position3D = geometry_msgs.msg.Point()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.gestureId = str[start:end].decode('utf-8')
else:
self.gestureId = str[start:end]
_x = self
start = end
end += 24
(_x.position3D.x, _x.position3D.y, _x.position3D.z,) = _get_struct_3d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
| [
"[email protected]"
] | |
422fd25c0af34230a5b3f5b6c60c72f3edeca6ac | 98aee99dcb9a10f5aac6817388261d46015706a2 | /app.py | f9ca46a33b70689729092c39216f55b96f63f4f2 | [] | no_license | anselmo-2010/Inventar-Flask-urok_47- | e403db991a56240e775f8de020f0a2bfc6b29085 | 83f6cf0f67c312c5bcda6625e617e3b3c7fad8bb | refs/heads/main | 2023-06-11T18:42:46.761072 | 2021-07-02T14:34:04 | 2021-07-02T14:34:04 | 372,032,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def homepage():
f = open('goods.txt', 'r', encoding='utf-8')
txt = f.readlines()
return render_template('index.html', goods = txt)
@app.route('/add/', methods=["POST"])
def add():
good = request.form["good"]
f = open('goods.txt', 'a+', encoding='utf-8')
f.write(good + "\n")
f.close()
return """
<h1>ะะฝะฒะตะฝัะฐัั ะฟะพะฟะพะปะฝะตะฝ</h1>
<a href='/'>ะะพะผะพะน</a>
"""
| [
"[email protected]"
] | |
bf156e026fa3ba1cc2248d87172ffa47bf378cbe | 109a830aad476305f029274d75e28bec8b54f597 | /mainapp/migrations/0002_somemodel.py | f1f20acd554c87122b21108f39125efa4850890d | [] | no_license | Dapucla/EP | 53b156088046abfd6833eba95dc4393ebeb93f4e | 9368032b4b289b20ec1bdf0033d3fe199223d200 | refs/heads/master | 2023-06-19T08:02:55.984888 | 2021-07-11T22:52:24 | 2021-07-11T22:52:24 | 330,009,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # Generated by Django 3.1.5 on 2021-01-06 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SomeModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
],
),
]
| [
"[email protected]"
] | |
63b0aea12c879c660f7baaf98db4273cad84fa67 | a7cfae2264e31df40294ec4f1f0686f1450ee75a | /pikachuwechat/modules/core/analysisfriends.py | d070994ae2536c058c12926f6b97bb0c5a6b3f2e | [
"Apache-2.0"
] | permissive | CharlesPikachu/pikachuwechat | 5e991127ac2d30c802fc50cba63b4fc14367428a | 056974fbe733843a9390172aba823c6474b2b857 | refs/heads/master | 2023-05-23T07:52:54.688351 | 2022-07-20T14:59:11 | 2022-07-20T14:59:11 | 169,863,170 | 45 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,316 | py | '''
Function:
ๅพฎไฟกๅฅฝๅๅๆ
Author:
Charles
ๅพฎไฟกๅ
ฌไผๅท:
Charles็็ฎๅกไธ
'''
import os
import itchat
from ..utils import Logger, checkDir
from pyecharts.charts import Pie, Map
from pyecharts import options as opts
'''ๅพฎไฟกๅฅฝๅๅๆ'''
class AnalysisFriends():
func_name = 'ๅพฎไฟกๅฅฝๅๅๆ'
logger_handle = Logger(func_name+'.log')
def __init__(self, **kwargs):
self.options = kwargs
self.savedir = kwargs.get('savedir', 'results')
checkDir(self.savedir)
'''ๅค้จ่ฐ็จ่ฟ่ก'''
def run(self):
# ็ปๅฝ
try: itchat.auto_login(hotReload=True)
except: itchat.auto_login(hotReload=True, enableCmdQR=True)
# ่ทๅพๆๆๅฅฝๅไฟกๆฏ
AnalysisFriends.logger_handle.info('run getFriendsInfo...')
friends_info = self.getFriendsInfo()
# ๅๆๅฅฝๅๅฐๅๅๅธ
AnalysisFriends.logger_handle.info('run analysisArea...')
self.analysisArea(friends_info=friends_info)
# ๅๆๅฅฝๅๆงๅซๅๅธ
AnalysisFriends.logger_handle.info('run analysisSex...')
self.analysisSex(friends_info=friends_info)
'''ๅๆๅฅฝๅๅฐๅๅๅธ'''
def analysisArea(self, title='ๅๆๅฅฝๅๅฐๅๅๅธ', friends_info=None):
area_infos = {'ๆช็ฅ': 0}
for item in friends_info.get('province'):
if not item: area_infos['ๆช็ฅ'] += 1
else:
if item in area_infos: area_infos[item] += 1
else: area_infos[item] = 1
map_ = Map(init_opts=dict(theme='purple-passion', page_title=title))
map_.add(title, data_pair=tuple(zip(area_infos.keys(), area_infos.values())), maptype='china')
map_.set_global_opts(
title_opts=opts.TitleOpts(title=title),
visualmap_opts=opts.VisualMapOpts(max_=200),
)
map_.render(os.path.join(self.savedir, '%s.html' % title))
'''ๅๆๅฅฝๅๆงๅซๅๅธ'''
def analysisSex(self, title='ๅๆๅฅฝๅๆงๅซๅๅธ', friends_info=None):
sex_infos = {'็ท': 0, 'ๅฅณ': 0, 'ๆช็ฅ': 0}
for item in friends_info.get('sex'):
if item == 0: sex_infos['ๆช็ฅ'] += 1
elif item == 1: sex_infos['็ท'] += 1
elif item == 2: sex_infos['ๅฅณ'] += 1
pie = Pie(init_opts=dict(theme='westeros', page_title=title)).add(title, data_pair=tuple(zip(sex_infos.keys(), sex_infos.values())), rosetype='area')
pie.set_global_opts(title_opts=opts.TitleOpts(title=title))
pie.render(os.path.join(self.savedir, '%s.html' % title))
'''่ทๅพๆ้็ๅพฎไฟกๅฅฝๅไฟกๆฏ'''
def getFriendsInfo(self):
friends = itchat.get_friends()
friends_info = dict(
province=self.getKeyInfo(friends, "Province"),
city=self.getKeyInfo(friends, "City"),
nickname=self.getKeyInfo(friends, "Nickname"),
sex=self.getKeyInfo(friends, "Sex"),
signature=self.getKeyInfo(friends, "Signature"),
remarkname=self.getKeyInfo(friends, "RemarkName"),
pyquanpin=self.getKeyInfo(friends, "PYQuanPin")
)
return friends_info
'''ๆ นๆฎkeyๅผๅพๅฐๅฏนๅบ็ไฟกๆฏ'''
def getKeyInfo(self, friends, key):
return list(map(lambda friend: friend.get(key), friends)) | [
"[email protected]"
] | |
966f59944c7e49128f1479f400e957dbb60a6017 | 1b15b42087d58002432daff45fafb7eb4d0ca2d8 | /733_flood_fill_4.py | 321b4905637e4356c76b96e3ed8d65a27e4642e2 | [] | no_license | georgebzhang/Python_LeetCode | 2b92be66880eaf4642a603897386622dc81fbaf3 | c1703358ceeed67e3e85de05eda74447f31176a2 | refs/heads/master | 2020-04-26T01:38:33.750580 | 2019-06-21T21:51:13 | 2019-06-21T21:51:13 | 173,209,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | from collections import deque
class Solution(object):
def floodFill(self, image, sr, sc, newColor):
dirs = ((-1, 0), (1, 0), (0, -1), (0, 1))
def neighbors(i0, j0):
result = []
for di, dj in dirs:
i, j = i0 + di, j0 + dj
if 0 <= i < N and 0 <= j < M and image[j][i] == color:
result.append((i, j))
return result
def dfs(i, j):
if (i, j) in visited:
return
visited.add((i, j))
image[j][i] = newColor
for n in neighbors(i, j):
dfs(*n)
M, N = len(image), len(image[0])
color = image[sr][sc]
visited = set()
dfs(sc, sr)
return image
def print_image(self, image):
for row in image:
print(row)
def print_ans(self, ans):
self.print_image(ans)
def test(self):
image = [[1, 1, 1], [1, 1, 0], [1, 0, 1]]
sr = 1
sc = 1
newColor = 2
self.print_image(image)
print()
ans = self.floodFill(image, sr, sc, newColor)
self.print_ans(ans)
if __name__ == '__main__':
s = Solution()
s.test()
| [
"[email protected]"
] | |
873f1fc4b4c80fdde0de3cbc54f6538b5a07d1dc | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/meta/SendInvitesWindowMeta.py | fa3db686b6a0c9f2e14f6b560eccda5cbbab4bca | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,344 | py | # 2017.05.04 15:24:37 Stลednรญ Evropa (letnรญ ฤas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/SendInvitesWindowMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class SendInvitesWindowMeta(AbstractWindowView):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends AbstractWindowView
"""
def showError(self, value):
self._printOverrideError('showError')
def setOnlineFlag(self, value):
self._printOverrideError('setOnlineFlag')
def sendInvites(self, accountsToInvite, comment):
self._printOverrideError('sendInvites')
def getAllAvailableContacts(self):
self._printOverrideError('getAllAvailableContacts')
def as_onReceiveSendInvitesCooldownS(self, value):
if self._isDAAPIInited():
return self.flashObject.as_onReceiveSendInvitesCooldown(value)
def as_setDefaultOnlineFlagS(self, onlineFlag):
if self._isDAAPIInited():
return self.flashObject.as_setDefaultOnlineFlag(onlineFlag)
def as_setInvalidUserTagsS(self, tags):
"""
:param tags: Represented by Vector.<String> (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setInvalidUserTags(tags)
def as_setWindowTitleS(self, value):
if self._isDAAPIInited():
return self.flashObject.as_setWindowTitle(value)
def as_onContactUpdatedS(self, contact):
if self._isDAAPIInited():
return self.flashObject.as_onContactUpdated(contact)
def as_onListStateChangedS(self, isEmpty):
if self._isDAAPIInited():
return self.flashObject.as_onListStateChanged(isEmpty)
def as_enableDescriptionS(self, isEnabled):
if self._isDAAPIInited():
return self.flashObject.as_enableDescription(isEnabled)
def as_enableMassSendS(self, isEnabled, addAllTooltip):
if self._isDAAPIInited():
return self.flashObject.as_enableMassSend(isEnabled, addAllTooltip)
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\meta\SendInvitesWindowMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:24:37 Stลednรญ Evropa (letnรญ ฤas)
| [
"[email protected]"
] | |
67e2f761958dc0dc7fd2fdf6192a2e8992a00d81 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/op_cmp_int-174.py | 6a4dcd0a82e8a93715c662e6aaa1d9e59d55555d | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | x:int = 42
y:int = 7
print(x == y)
print(x != y)
print(x < y)
print(x <= y)
print(x > y)
print(x >= y)
print(x == x)
print(x != x)
print(x < x)
print(x <= x)
print(x > x)
$Exp
| [
"[email protected]"
] | |
1251b3e38bcfdfb9156e1e5fb3bf21bd2970de42 | a2d3f2787cd26f2bf90f30ba9516d1675a69f8be | /emission/tests/storageTests/TestUsefulQueries.py | 3e66eb5cfee060b7bc51368d76aa66184a92430b | [
"BSD-3-Clause"
] | permissive | njriasan/e-mission-server | 318833ba06cb7f40ddb7b8d2ac3da4d049e7c846 | 23224ddcfd29f31c13f75d819d9ad8530aea052f | refs/heads/master | 2020-05-02T11:02:00.528836 | 2019-03-27T19:21:31 | 2019-03-27T19:21:31 | 177,915,408 | 1 | 0 | BSD-3-Clause | 2019-03-27T04:01:32 | 2019-03-27T04:01:31 | null | UTF-8 | Python | false | false | 4,761 | py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
from datetime import datetime
import logging
# Our imports
from emission.core.get_database import get_section_db
import emission.storage.decorations.useful_queries as tauq
class UsefulQueriesTests(unittest.TestCase):
def setUp(self):
get_section_db().remove({"_id": "foo_1"})
get_section_db().remove({"_id": "foo_2"})
get_section_db().remove({"_id": "foo_3"})
def tearDown(self):
get_section_db().remove({"_id": "foo_1"})
get_section_db().remove({"_id": "foo_2"})
get_section_db().remove({"_id": "foo_3"})
self.assertEqual(get_section_db().find({'_id': 'foo_1'}).count(), 0)
self.assertEqual(get_section_db().find({'_id': 'foo_2'}).count(), 0)
self.assertEqual(get_section_db().find({'_id': 'foo_3'}).count(), 0)
def testGetAllSections(self):
get_section_db().insert({"_id": "foo_1", "trip_id": "bar"})
get_section_db().insert({"_id": "foo_2", "trip_id": "bar"})
get_section_db().insert({"_id": "foo_3", "trip_id": "baz"})
self.assertEqual(len(tauq.get_all_sections("foo_1")), 2)
def testGetAllSectionsForUserDay(self):
dt1 = datetime(2015, 1, 1, 1, 1, 1)
dt2 = datetime(2015, 1, 1, 2, 1, 1)
dt3 = datetime(2015, 1, 1, 3, 1, 1)
get_section_db().insert({"_id": "foo_1",
"type":"move",
"trip_id": "trip_1",
"section_id": 3,
"section_start_datetime": dt1,
"section_end_datetime": dt2})
get_section_db().insert({"_id": "foo_2",
"type":"place",
"trip_id": "trip_2",
"section_start_datetime": dt2,
"section_end_datetime": dt3})
get_section_db().insert({"_id": "foo_3",
"type": "move",
"trip_id": "trip_3",
"section_id": 0,
"section_start_datetime": dt3})
self.assertEqual(tauq.get_trip_before("foo_3")["_id"], "foo_1")
def testGetTripBefore(self):
dt1 = datetime(2015, 1, 1, 1, 1, 1)
dt2 = datetime(2015, 1, 1, 2, 1, 1)
dt3 = datetime(2015, 1, 2, 3, 1, 1)
get_section_db().insert({"_id": "foo_1",
"user_id": "test_user",
"type":"move",
"section_id": 3,
"section_start_datetime": dt1,
"section_end_datetime": dt2})
get_section_db().insert({"_id": "foo_2",
"user_id": "test_user",
"type":"place",
"section_start_datetime": dt2,
"section_end_datetime": dt3})
get_section_db().insert({"_id": "foo_3",
"user_id": "test_user",
"type": "move",
"section_id": 0,
"section_start_datetime": dt3})
secList = tauq.get_all_sections_for_user_day("test_user", 2015, 1, 1)
self.assertEqual(len(secList), 1)
self.assertEqual(secList[0]._id, "foo_1")
def testGetBounds(self):
dt1 = datetime(2015, 1, 1, 1, 1, 1)
dt2 = datetime(2015, 1, 1, 2, 1, 1)
dt3 = datetime(2015, 1, 2, 3, 1, 1)
sectionJsonList = []
sectionJsonList.append({"_id": "foo_1",
"user_id": "test_user",
"type":"move",
"section_id": 3,
"section_start_datetime": dt1,
"section_end_datetime": dt2,
"section_start_point": {"coordinates": [1,2], "type": "Point"},
"section_end_point": {"coordinates": [3,4], "type": "Point"}})
sectionJsonList.append({"_id": "foo_2",
"user_id": "test_user",
"type":"place",
"section_start_datetime": dt2,
"section_end_datetime": dt3,
"section_start_point": {"coordinates": [5,6], "type": "Point"},
"section_end_point": {"coordinates": [7,8], "type": "Point"}})
sectionJsonList.append({"_id": "foo_3",
"user_id": "test_user",
"type": "move",
"section_id": 0,
"section_start_datetime": dt3,
"section_start_point": {"coordinates": [9,10], "type": "Point"},
"section_end_point": {"coordinates": [11,12], "type": "Point"}})
bounds = tauq.get_bounds(sectionJsonList)
self.assertEqual(bounds[0].lat, 2)
self.assertEqual(bounds[0].lon, 1)
self.assertEqual(bounds[1].lat, 12)
self.assertEqual(bounds[1].lon, 11)
if __name__ == '__main__':
import emission.tests.common as etc
etc.configLogging()
unittest.main()
| [
"[email protected]"
] | |
566bc16353fd71873f7a98faf1a9959d315c2f00 | 118546c7bf7fe3063ed68e1c6270b33ed500c3c9 | /thread/ex03.py | 29313ac54622434d7c81d0b0c0d100abbc3d47d8 | [] | no_license | yoonah95/Python_practice | 83b1070f1c95d57a9ea81d2ec3898521f98544f4 | 1e8fbded66e789ba77b3af5499520b8e8e01a6a1 | refs/heads/master | 2022-06-12T20:55:38.490142 | 2020-05-08T02:20:20 | 2020-05-08T02:20:20 | 256,125,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py |
import threading
import queue
class _Operation(threading.Thread):
def __init__(self,sem,*args,**kwds):
self.sem = sem
self.method = kwds.pop('target')
super().__init__(targer=self.wrappedTarget , args=args,kwds=kwds,daemon=True)
def wrappedTarget(self,*args,**kwds):
self.method()
if isinstance(self.sem,threading.Semaphore):
self.sem.release()
class OperationQueue:
def __init__(self,numberOfConcurrentTask=1):
self.queue = queue.Queue()
self.sem = threading.Semaphore(numberOfConcurrentTask)
def add(self,method,*args,**kwds):
task = _Operation(self.sem,method,*args,**kwds)
self.queue.put(task)
def mainloop(self):
while True:
t = self.queue.get()
self.sem.acquire()
t.start()
def start(self,run_async=False):
t = threading.Thread(target=self.mainloop,daemon=True)
t.start()
if not run_async:
t.join()
def foo(n):
for i in range(n):
print(i)
time.sleep(0.25)
q = OperationQueue(3)
q.start(True)
for _ in range(100):
q.add(foo,random.randrange(2,40))
time.sleep(40)
| [
"[email protected]"
] | |
884dc27b316f6a8a853f77c0600bfa03921dc5a5 | e95fb0661cbdcd490b44503462fbd20bd40b2771 | /sfrCore/helpers/__init__.py | cd35adffb541bf9b7fd3f97c31217cef216f753f | [] | no_license | mwbenowitz/sfr-core-test | 785139cd12c86c058d7ec13344a46337dda7b982 | 6b848fcef6dd122e6e11efed1d7f6a6dcfd5a1ed | refs/heads/master | 2020-05-19T23:40:07.644881 | 2019-05-07T13:38:21 | 2019-05-07T13:38:21 | 185,272,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from sfrCore.helpers.errors import DataError, DBError
from sfrCore.helpers.logger import createLog | [
"[email protected]"
] | |
02d41a37e03a917b39eb8dbf16d9c74ed1b6566d | 7baa4fe99adf5d05f40d25a117adc7c022ab76f7 | /examples/greedy_planner_example.py | d31d9e0f841a0f79cbfe9115b4d96586ce3a3972 | [
"BSD-3-Clause"
] | permissive | yonetaniryo/planning_python | 2c75b2f462c18b027a1b65bffa4f18a946a5a29f | 3d7d3c06cc577445a9b5b423f2907f5efa830a0f | refs/heads/master | 2022-06-09T04:18:37.326931 | 2020-05-11T04:27:56 | 2020-05-11T04:27:56 | 262,938,335 | 0 | 0 | BSD-3-Clause | 2020-05-11T04:29:39 | 2020-05-11T04:29:38 | null | UTF-8 | Python | false | false | 3,289 | py | #!/usr/bin/env python
"""A minimal example that loads an environment from a png file, runs greedy planner and returns the path.
The search process and final path are rendered
Author: Mohak Bhardwaj
Date: 28 October, 2017
"""
import sys
sys.path.insert(0, "..")
import matplotlib.pyplot as plt
import time
from planning_python.environment_interface.env_2d import Env2D
from planning_python.state_lattices.common_lattice.xy_analytic_lattice import XYAnalyticLattice
from planning_python.cost_functions.cost_function import PathLengthNoAng, UnitCost
from planning_python.heuristic_functions.heuristic_function import EuclideanHeuristicNoAng, ManhattanHeuristicNoAng
from planning_python.data_structures.planning_problem import PlanningProblem
from planning_python.planners.greedy_planner import GreedyPlanner
import os
#Step1: Set some problem parameters
x_lims = [0, 200] # low(inclusive), upper(exclusive) extents of world in x-axis
y_lime = [0, 200] # low(inclusive), upper(exclusive) extents of world in y-axis
start = (0, 0) #start state(world coordinates)
goal = (199,199) #goal state(world coordinates)
visualize = True
#Step 2: Load environment from file
envfile = os.path.abspath("../../motion_planning_datasets/single_bugtrap/train/1.png")
env_params = {'x_lims': [0, 200], 'y_lims': [0, 200]}
e = Env2D()
e.initialize(envfile, env_params)
#Step 3: Create lattice to overlay on environment
lattice_params = dict()
lattice_params['x_lims'] = [0, 200] # Usefule to calculate number of cells in lattice
lattice_params['y_lims'] = [0, 200] # Useful to calculate number of cells in lattice
lattice_params['resolution'] = [1, 1] # Useful to calculate number of cells in lattice + conversion from discrete to continuous space and vice-versa
lattice_params['origin'] = start # Used for conversion from discrete to continuous and vice-versa.
lattice_params['rotation'] = 0 # Can rotate lattice with respect to world
lattice_params['connectivity'] = 'eight_connected' #Lattice connectivity (can be four or eight connected for xylattice)
lattice_params['path_resolution'] = 1 #Resolution for defining edges and doing collision checking (in meters)
l = XYAnalyticLattice(lattice_params)
#Step 4: Create cost and heuristic objects
cost_fn = PathLengthNoAng() #Penalize length of path
heuristic_fn = EuclideanHeuristicNoAng()
#(Additionally, you can precalculate edges and costs on lattice for speed-ups)
l.precalc_costs(cost_fn) #useful when lattice remains same across problems
#Step 5: Create a planning problem
prob_params = {'heuristic_weight': 1.0}
start_n = l.state_to_node(start)
goal_n = l.state_to_node(goal)
prob = PlanningProblem(prob_params)
prob.initialize(e, l, cost_fn, heuristic_fn, start_n, goal_n, visualize=visualize)
#Step 6: Create Planner object and ask it to solve the planning problem
planner = GreedyPlanner()
planner.initialize(prob)
path, path_cost, num_expansions, plan_time, came_from, cost_so_far, c_obs = planner.plan()
print('Path: ', path)
print('Path Cost: ', path_cost)
print('Number of Expansions: ', num_expansions)
print('Time taken: ', plan_time)
e.initialize_plot(start, goal)
e.plot_path(path, 'solid', 'red', 3)
plt.show()
| [
"[email protected]"
] | |
c42fa53fdbb01814855b836a54b9f8b8a3f3465c | 3db5e39d9bbe1c86229a26e7d19e3ceb37f902e3 | /Baekjoon/backtracking/6603_๋ก๋.py | 8ac6e5866cc031be2dcc4ef32624dfba585f1c2b | [] | no_license | sweetrain096/rain-s_python | 5ca2fe5e7f97a681b6e75e64264687a723be1976 | eb285eb50eeebfaa2b4a4d7816314e2073faab00 | refs/heads/master | 2021-07-19T16:06:01.389283 | 2020-05-29T14:56:16 | 2020-05-29T14:56:16 | 162,240,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | import sys
sys.stdin = open("6603_input.txt")
def dfs(start):
now = data[start]
visited[now] = 1
result.append(now)
# print(result)
if len(result) == 6:
print(' '.join(map(str, result)))
for i in range(start + 1, total + 1):
if not visited[data[i]]:
dfs(i)
visited[now] = 0
result.pop()
while True:
data = list(map(int, input().split()))
total = data[0]
if not total:
break
result = []
for start in range(1, total - 6 + 2):
visited = [0] * 50
dfs(start)
print()
# import sys
# sys.stdin = open("6603_input.txt")
#
# def dfs(start):
# start = data[start]
# visited[start] = 1
# result.append(start)
# # print(result)
# if len(result) == 6:
#
# print(result)
# for i in range(start + 1, total + 1):
# if not visited[data[i]]:
# dfs(i)
# visited[start] = 0
# result.pop()
#
#
# while True:
# data = list(map(int, input().split()))
# total = data[0]
# if not total:
# break
# print(data)
# result = []
# visited = [0] * 50
# for start in range(1, total - 6 + 2):
# dfs(start)
#
#
# # def dfs(start, visited):
# # now = data[start]
# # print(now)
# # if not visited[now]:
# # print(visited)
# # visited[now] = 1
# # result.append(now)
# #
# # if len(result) == 6:
# # print("result", result)
# # return
# #
# # for i in range(start, total + 1):
# # next = data[i]
# # if not visited[next] and len(result) < 6:
# #
# # # print(' '.join(map(str, result)))
# # print(result)
# #
# # dfs(i, visited)
# # t = result.pop()
# # visited[t] = 0
# # print("pop", t, result) | [
"[email protected]"
] | |
ab2d48cc4933834d30c23e0ad66a2bad6a4eeaf9 | 11cb48f645b2a051a822e4a3a9dbdad87ff0f2f7 | /meals/migrations/0005_auto_20200419_1329.py | 368ff57c85ccb43d20b95a5fc929bbab8de69f96 | [] | no_license | arpan-shrestha/Django-resturent | 54e3c77e6a23c780e4346e479683d97769cc80a5 | 30fa9a155b8377b16f95b7cade303b8dccf73bee | refs/heads/master | 2022-07-16T14:53:16.032576 | 2020-05-19T11:29:21 | 2020-05-19T11:29:21 | 261,817,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # Generated by Django 2.2.2 on 2020-04-19 13:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('meals', '0004_meals_category'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name': 'category', 'verbose_name_plural': ' categories'},
),
]
| [
"[email protected]"
] | |
64e7f9d24119950e166280edbb3d7e26fed07c9e | ec9aa6dd7405d5483e9ae09f700bb718f10cb4b5 | /backend/home/migrations/0001_load_initial_data.py | 74093808df7d90106c11cd7f5251a25ef8971ab4 | [] | no_license | crowdbotics-apps/whatsa-29124 | 0c41ef1e063ff30cb95496a56f3398128dd90c2a | d3defa32ca7ad10787f4db6384426c428b3af230 | refs/heads/master | 2023-06-28T04:04:21.757506 | 2021-07-24T20:30:05 | 2021-07-24T20:30:05 | 389,195,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "whatsa-29124.botics.co"
site_params = {
"name": "Whatsa",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
0f1aede94dec481182ebc9f1c26b4fc0290fea06 | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/python/kernel_tests/conv_ops_3d_test.py | f4616fd661f989c1c3e4939a3d062b0260f8572e | [
"Apache-2.0"
] | permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 18,347 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NDHWC", False), ("NDHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCDHW" format is only supported on CUDA.
test_configs += [("NCDHW", True)]
return test_configs
class Conv3DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu:
if not test_util.CudaSupportsHalfMatMulAndConv():
return [dtypes.float32]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16]
else:
return [dtypes.float64, dtypes.float32, dtypes.float16]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
padding, data_format, dtype, use_gpu):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing numbers from 0 to 1.
# We keep the input tensor values fairly small to avoid overflowing float16
# during the conv3d.
x1 = [f * 1.0 / total_size_1 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 / total_size_2 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if data_format == "NCDHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv3d(t1, t2, strides, padding=padding,
data_format=data_format)
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
results = []
for data_format, use_gpu in GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_format,
dtype,
use_gpu=use_gpu)
results.append(result)
with self.test_session() as sess:
values = sess.run(results)
for value in values:
print("expected = ", expected)
print("actual = ", value)
tol = 1e-6
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(expected, value.flatten(), atol=tol, rtol=tol)
def testConv3D1x1x1Filter(self):
expected_output = [
0.18518519, 0.22222222, 0.25925926, 0.40740741, 0.5, 0.59259259,
0.62962963, 0.77777778, 0.92592593, 0.85185185, 1.05555556, 1.25925926,
1.07407407, 1.33333333, 1.59259259, 1.2962963, 1.61111111, 1.92592593
]
# These are equivalent to the Conv2D1x1 case.
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 1, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 1, 2, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
# Expected values computed using scipy's correlate function.
def testConv3D2x2x2Filter(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 4.2650463, 4.35763889, 4.45023148,
6.73032407, 6.89236111, 7.05439815, 7.22337963, 7.39930556, 7.57523148,
9.68865741, 9.93402778, 10.17939815, 10.18171296, 10.44097222,
10.70023148
]
# expected_shape = [1, 3, 1, 2, 5]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
stride=1,
padding="VALID",
expected=expected_output)
def testConv3DStrides(self):
expected_output = [
0.06071429, 0.08988095, 0.10238095, 0.11488095, 0.12738095, 0.13988095,
0.08452381, 0.26071429, 0.35238095, 0.36488095, 0.37738095, 0.38988095,
0.40238095, 0.23452381, 0.46071429, 0.61488095, 0.62738095, 0.63988095,
0.65238095, 0.66488095, 0.38452381, 1.12738095, 1.48988095, 1.50238095,
1.51488095, 1.52738095, 1.53988095, 0.88452381, 1.32738095, 1.75238095,
1.76488095, 1.77738095, 1.78988095, 1.80238095, 1.03452381, 1.52738095,
2.01488095, 2.02738095, 2.03988095, 2.05238095, 2.06488095, 1.18452381,
2.19404762, 2.88988095, 2.90238095, 2.91488095, 2.92738095, 2.93988095,
1.68452381, 2.39404762, 3.15238095, 3.16488095, 3.17738095, 3.18988095,
3.20238095, 1.83452381, 2.59404762, 3.41488095, 3.42738095, 3.43988095,
3.45238095, 3.46488095, 1.98452381
]
self._VerifyValues(
tensor_in_sizes=[1, 5, 8, 7, 1],
filter_in_sizes=[1, 2, 3, 1, 1],
stride=[2, 3, 1], # different stride for each spatial dimension
padding="SAME",
expected=expected_output)
def testConv3D2x2x2FilterStride2(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 9.68865741, 9.93402778, 10.17939815
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv3DStride3(self):
expected_output = [
1.51140873, 1.57167659, 1.63194444, 1.56349206, 1.62673611, 1.68998016,
1.6155754, 1.68179563, 1.74801587, 1.9280754, 2.01215278, 2.09623016,
1.98015873, 2.0672123, 2.15426587, 2.03224206, 2.12227183, 2.21230159,
4.4280754, 4.65500992, 4.88194444, 4.48015873, 4.71006944, 4.93998016,
4.53224206, 4.76512897, 4.99801587, 4.84474206, 5.09548611, 5.34623016,
4.8968254, 5.15054563, 5.40426587, 4.94890873, 5.20560516, 5.46230159
]
self._VerifyValues(
tensor_in_sizes=[1, 6, 7, 8, 2],
filter_in_sizes=[3, 2, 1, 2, 3],
stride=3,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2FilterStride2Same(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 2.0162037, 2.06597222, 2.11574074,
9.68865741, 9.93402778, 10.17939815, 4.59953704, 4.73263889, 4.86574074
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
def testKernelSmallerThanStride(self):
expected_output = [
0.03703704, 0.11111111, 0.25925926, 0.33333333, 0.7037037, 0.77777778,
0.92592593, 1.
]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="SAME",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="VALID",
expected=expected_output)
expected_output = [
0.54081633, 0.58017493, 0.28061224, 0.81632653, 0.85568513, 0.40306122,
0.41873178, 0.4340379, 0.19642857, 2.46938776, 2.50874636, 1.1377551,
2.74489796, 2.78425656, 1.26020408, 1.16873178, 1.1840379, 0.51785714,
1.09511662, 1.10604956, 0.44642857, 1.17164723, 1.18258017, 0.47704082,
0.3691691, 0.37244898, 0.125
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="SAME",
expected=expected_output)
expected_output = [
0.540816, 0.580175, 0.816327, 0.855685, 2.469388, 2.508746, 2.744898,
2.784257
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="VALID",
expected=expected_output)
def testKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 2, 1],
filter_in_sizes=[2, 1, 2, 1, 2],
stride=1,
padding="VALID",
expected=[1.5625, 1.875])
def _ConstructAndTestGradientForConfig(
self, batch, input_shape, filter_shape, in_depth, out_depth, stride,
padding, test_input, data_format, use_gpu):
input_planes, input_rows, input_cols = input_shape
filter_planes, filter_rows, filter_cols = filter_shape
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
filter_shape = [
filter_planes, filter_rows, filter_cols, in_depth, out_depth
]
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if padding == "VALID":
output_planes = int(
math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
output_rows = int(
math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
output_cols = int(
math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
else:
output_planes = int(math.ceil(float(input_planes) / strides[1]))
output_rows = int(math.ceil(float(input_rows) / strides[2]))
output_cols = int(math.ceil(float(input_cols) / strides[3]))
output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
for data_type in self._DtypesToTest(use_gpu=use_gpu):
# TODO(mjanusz): Modify gradient_checker to also provide max relative
# error and synchronize the tolerance levels between the tests for forward
# and backward computations.
if data_type == dtypes.float64:
tolerance = 1e-8
elif data_type == dtypes.float32:
tolerance = 5e-3
elif data_type == dtypes.float16:
tolerance = 1e-3
with self.test_session(use_gpu=use_gpu):
orig_input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
if data_format == "NCDHW":
input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
new_strides = test_util.NHWCToNCHW(strides)
else:
input_tensor = orig_input_tensor
new_strides = strides
conv = nn_ops.conv3d(
input_tensor,
filter_tensor,
new_strides,
padding,
data_format=data_format,
name="conv")
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(conv.shape, tensor_shape.TensorShape(output_shape))
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(
orig_input_tensor, input_shape, conv, output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(
filter_tensor, filter_shape, conv, output_shape)
if data_type != dtypes.float16:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
print("conv3d gradient error = ", err)
self.assertLess(err, tolerance)
def ConstructAndTestGradient(self, **kwargs):
for data_format, use_gpu in GetTestConfigs():
self._ConstructAndTestGradientForConfig(data_format=data_format,
use_gpu=use_gpu, **kwargs)
def testInputGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 5, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(4, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 5),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(7, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 7, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(4, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=False)
def testInputGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 2, 2),
filter_shape=(3, 2, 1),
in_depth=2,
out_depth=1,
stride=1,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(7, 3, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 3, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=True)
def testFilterGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def disabledtestFilterGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=False)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
6a20354acc76f8a3df9db14e09babcd7f6f2ce58 | 336f11ee8934581f05ab620c5324c601ba864b05 | /jb_adaptive_python/Problems/Positive number/Programming/tests.py | 323f0943b1fcf1048069f4ec166be163d4f07337 | [] | no_license | ancient-clever/sandbox | 01adeee2638a23533965cf57ca873a30e7dfad3d | 87dec3bf8860a67a36154ee5d7c826d919d3111b | refs/heads/master | 2022-05-17T04:49:54.703068 | 2020-01-19T17:44:27 | 2020-01-19T17:44:27 | 206,946,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | from test_helper import check_samples
if __name__ == '__main__':
check_samples(samples=[["7","YES"]]) | [
"[email protected]"
] | |
faac230832135bc3a080bd66e534c0ecf5539e37 | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/encoding/filters/crop/crop_api.py | eb68ce39513a7783814c8a4597f68194bf384fc7 | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 3,178 | py | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.crop_filter import CropFilter
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.encoding.filters.crop.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.filters.crop.crop_filter_list_query_params import CropFilterListQueryParams
class CropApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(CropApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.customdata = CustomdataApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, crop_filter, **kwargs):
# type: (CropFilter, dict) -> CropFilter
"""Create Crop Filter
:param crop_filter: The Crop Filter to be created
:type crop_filter: CropFilter, required
:return: Crop details
:rtype: CropFilter
"""
return self.api_client.post(
'/encoding/filters/crop',
crop_filter,
type=CropFilter,
**kwargs
)
def delete(self, filter_id, **kwargs):
# type: (string_types, dict) -> BitmovinResponse
"""Delete Crop Filter
:param filter_id: Id of the Crop Filter.
:type filter_id: string_types, required
:return:
:rtype: BitmovinResponse
"""
return self.api_client.delete(
'/encoding/filters/crop/{filter_id}',
path_params={'filter_id': filter_id},
type=BitmovinResponse,
**kwargs
)
def get(self, filter_id, **kwargs):
# type: (string_types, dict) -> CropFilter
"""Crop Filter Details
:param filter_id: Id of the Crop Filter.
:type filter_id: string_types, required
:return: Crop details
:rtype: CropFilter
"""
return self.api_client.get(
'/encoding/filters/crop/{filter_id}',
path_params={'filter_id': filter_id},
type=CropFilter,
**kwargs
)
def list(self, query_params=None, **kwargs):
# type: (CropFilterListQueryParams, dict) -> CropFilter
"""List Crop Filters
:param query_params: Query parameters
:type query_params: CropFilterListQueryParams
:return: List of Crop Filters
:rtype: CropFilter
"""
return self.api_client.get(
'/encoding/filters/crop',
query_params=query_params,
pagination_response=True,
type=CropFilter,
**kwargs
)
| [
"[email protected]"
] | |
c76c48ac8d93e255cb9cc8603a3d286394a5bc90 | f7a252b63b16a8f21d6921fd1f5c20075fec4cc9 | /helpers/hadoop/wikipedia/words/merger.py | 4d6c877c43697dbe39766419075eae9710f21503 | [] | no_license | zymITsky/twittomatic | 5803b4c2db5f3c0ee1b65af86171b2c5f9b2c797 | 396c0800b594a85fbcb54e772b3bc60837ed3eab | refs/heads/master | 2020-06-01T10:44:38.887254 | 2013-02-14T20:28:17 | 2013-02-14T20:28:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | import gzip
import heapq
import glob
def merge_files(inputfiles):
files = []
for filename in inputfiles:
files.append(iter(gzip.open(filename, 'r')))
iterator = iter(heapq.merge(*files))
line = iterator.next()
prevanchor, prevcounter = line.rstrip('\n').rsplit('\t', 1)
prevcounter = int(prevcounter)
for line in iterator:
anchor, counter = line.rstrip('\n').rsplit('\t', 1)
if anchor == prevanchor:
prevcounter += int(counter)
else:
print "%s\t%s" % (prevanchor, prevcounter)
prevanchor = anchor
prevcounter = int(counter)
print "%s\t%s" % (prevanchor, prevcounter)
files = glob.glob('*.tsv.gz')
merge_files(files)
| [
"[email protected]"
] | |
daf32e5a38af751296be03149defa4907bcdf104 | d5125ccc1ef9915ffd72c575225a620aac5cb347 | /TriAquae/TriAquae/hosts/migrations/0005_initial.py | efecc6c51129f1eea7487c7975b55216d1d798b0 | [] | no_license | yurui829/stefanbo | 2231074e0e4f04438aff647563299ad1947bd760 | 449f862c81a3b4ae3e079ecb4a15b3a5cbcca701 | refs/heads/master | 2021-01-24T23:42:52.064783 | 2014-07-02T03:05:04 | 2014-07-02T03:05:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,357 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Devinfo'
db.create_table(u'hosts_devinfo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('Triaquae_Hostname', self.gf('django.db.models.fields.CharField')(default='Null', max_length=50)),
('System_Hostname', self.gf('django.db.models.fields.CharField')(default='Null', max_length=50)),
('System_Ip', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('Device_Type', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('Device_Model', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('System_Kernel', self.gf('django.db.models.fields.CharField')(default='Null', max_length=256)),
('System_Version', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('System_Mac', self.gf('django.db.models.fields.CharField')(default='Null', max_length=256)),
('Physical_Memory', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('System_Swap', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('Memory_Slots_Number', self.gf('django.db.models.fields.CharField')(default='Null', max_length=30)),
('Memory_Slots_All', self.gf('django.db.models.fields.CharField')(default='Null', max_length=2000)),
('Logical_Cpu_Cores', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('Physical_Cpu_Cores', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('Physical_Cpu_Model', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('Physical_Cpu_MHz', self.gf('django.db.models.fields.CharField')(default='Null', max_length=256)),
('Hard_Disk', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('Ethernet_Interface', self.gf('django.db.models.fields.CharField')(default='Null', max_length=364)),
('System_Hostid', self.gf('django.db.models.fields.CharField')(default='Null', max_length=30)),
('Device_Sn', self.gf('django.db.models.fields.CharField')(default='Null', max_length=164)),
('Asset_Number', self.gf('django.db.models.fields.CharField')(default='Null', max_length=164)),
('Note1', self.gf('django.db.models.fields.CharField')(default='Null', max_length=256)),
('Note2', self.gf('django.db.models.fields.CharField')(default='Null', max_length=256)),
('Note3', self.gf('django.db.models.fields.CharField')(default='Null', max_length=256)),
('Check_Time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'hosts', ['Devinfo'])
# Adding model 'Check_Devinfo'
db.create_table(u'hosts_check_devinfo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('Triaquae_Hostname', self.gf('django.db.models.fields.CharField')(max_length=50)),
('Change_Type', self.gf('django.db.models.fields.CharField')(max_length=64)),
('Old_Value', self.gf('django.db.models.fields.CharField')(max_length=64)),
('New_Value', self.gf('django.db.models.fields.CharField')(default='Null', max_length=64)),
('Change_Time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'hosts', ['Check_Devinfo'])
# Adding model 'Idc'
db.create_table(u'hosts_idc', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
))
db.send_create_signal(u'hosts', ['Idc'])
# Adding model 'Group'
db.create_table(u'hosts_group', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
))
db.send_create_signal(u'hosts', ['Group'])
# Adding model 'IP'
db.create_table(u'hosts_ip', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('hostname', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('ip', self.gf('django.db.models.fields.IPAddressField')(unique=True, max_length=15)),
('idc', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hosts.Idc'], null=True, blank=True)),
('port', self.gf('django.db.models.fields.IntegerField')(default='22')),
('os', self.gf('django.db.models.fields.CharField')(default='linux', max_length=20)),
('alert_limit', self.gf('django.db.models.fields.IntegerField')(default=5)),
('snmp_alert_limit', self.gf('django.db.models.fields.IntegerField')(default=5)),
('asset_collection', self.gf('django.db.models.fields.BooleanField')(default=True)),
('status_monitor_on', self.gf('django.db.models.fields.BooleanField')(default=True)),
('snmp_on', self.gf('django.db.models.fields.BooleanField')(default=True)),
('snmp_version', self.gf('django.db.models.fields.CharField')(default='2c', max_length=10)),
('snmp_community_name', self.gf('django.db.models.fields.CharField')(default='public', max_length=50)),
('snmp_security_level', self.gf('django.db.models.fields.CharField')(default='auth', max_length=50)),
('snmp_auth_protocol', self.gf('django.db.models.fields.CharField')(default='MD5', max_length=50)),
('snmp_user', self.gf('django.db.models.fields.CharField')(default='triaquae_snmp', max_length=50)),
('snmp_pass', self.gf('django.db.models.fields.CharField')(default='my_pass', max_length=50)),
('system_load_warning', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('system_load_critical', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('cpu_idle_warning', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('cpu_idle_critical', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('mem_usage_warning', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('mem_usage_critical', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
))
db.send_create_signal(u'hosts', ['IP'])
# Adding M2M table for field group on 'IP'
m2m_table_name = db.shorten_name(u'hosts_ip_group')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('ip', models.ForeignKey(orm[u'hosts.ip'], null=False)),
('group', models.ForeignKey(orm[u'hosts.group'], null=False))
))
db.create_unique(m2m_table_name, ['ip_id', 'group_id'])
# Adding model 'RemoteUser'
db.create_table(u'hosts_remoteuser', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
))
db.send_create_signal(u'hosts', ['RemoteUser'])
# Adding model 'TriaquaeUser'
db.create_table(u'hosts_triaquaeuser', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal(u'hosts', ['TriaquaeUser'])
# Adding M2M table for field remoteuser on 'TriaquaeUser'
m2m_table_name = db.shorten_name(u'hosts_triaquaeuser_remoteuser')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('triaquaeuser', models.ForeignKey(orm[u'hosts.triaquaeuser'], null=False)),
('remoteuser', models.ForeignKey(orm[u'hosts.remoteuser'], null=False))
))
db.create_unique(m2m_table_name, ['triaquaeuser_id', 'remoteuser_id'])
# Adding M2M table for field group on 'TriaquaeUser'
m2m_table_name = db.shorten_name(u'hosts_triaquaeuser_group')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('triaquaeuser', models.ForeignKey(orm[u'hosts.triaquaeuser'], null=False)),
('group', models.ForeignKey(orm[u'hosts.group'], null=False))
))
db.create_unique(m2m_table_name, ['triaquaeuser_id', 'group_id'])
# Adding M2M table for field ip on 'TriaquaeUser'
m2m_table_name = db.shorten_name(u'hosts_triaquaeuser_ip')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('triaquaeuser', models.ForeignKey(orm[u'hosts.triaquaeuser'], null=False)),
('ip', models.ForeignKey(orm[u'hosts.ip'], null=False))
))
db.create_unique(m2m_table_name, ['triaquaeuser_id', 'ip_id'])
# Adding model 'AuthByIpAndRemoteUser'
db.create_table(u'hosts_authbyipandremoteuser', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('authtype', self.gf('django.db.models.fields.CharField')(max_length=100)),
('ip', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hosts.IP'], null=True, blank=True)),
('remoteUser', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hosts.RemoteUser'], null=True, blank=True)),
))
db.send_create_signal(u'hosts', ['AuthByIpAndRemoteUser'])
# Adding unique constraint on 'AuthByIpAndRemoteUser', fields ['ip', 'remoteUser']
db.create_unique(u'hosts_authbyipandremoteuser', ['ip_id', 'remoteUser_id'])
# Adding model 'ServerStatus'
db.create_table(u'hosts_serverstatus', (
('host', self.gf('django.db.models.fields.IPAddressField')(max_length=15, primary_key=True)),
('hostname', self.gf('django.db.models.fields.CharField')(max_length=100)),
('host_status', self.gf('django.db.models.fields.CharField')(default='Unkown', max_length=10)),
('ping_status', self.gf('django.db.models.fields.CharField')(default='Unkown', max_length=100)),
('last_check', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('host_uptime', self.gf('django.db.models.fields.CharField')(default='Unkown', max_length=50)),
('attempt_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('breakdown_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('up_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('snmp_alert_count', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('availability', self.gf('django.db.models.fields.CharField')(default=0, max_length=20)),
))
db.send_create_signal(u'hosts', ['ServerStatus'])
# Adding model 'AlertTemp'
db.create_table(u'hosts_alerttemp', (
('host', self.gf('django.db.models.fields.IPAddressField')(max_length=15, primary_key=True)),
('snmp_status', self.gf('django.db.models.fields.CharField')(max_length=20)),
('snmp_data', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'hosts', ['AlertTemp'])
# Adding model 'OpsLog'
db.create_table(u'hosts_opslog', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('start_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('finish_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('log_type', self.gf('django.db.models.fields.CharField')(max_length=50)),
('tri_user', self.gf('django.db.models.fields.CharField')(max_length=30)),
('run_user', self.gf('django.db.models.fields.CharField')(max_length=30)),
('cmd', self.gf('django.db.models.fields.TextField')()),
('total_task', self.gf('django.db.models.fields.IntegerField')()),
('success_num', self.gf('django.db.models.fields.IntegerField')()),
('failed_num', self.gf('django.db.models.fields.IntegerField')()),
('track_mark', self.gf('django.db.models.fields.IntegerField')(unique=True)),
('note', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'hosts', ['OpsLog'])
# Adding model 'OpsLogTemp'
db.create_table(u'hosts_opslogtemp', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('user', self.gf('django.db.models.fields.CharField')(max_length=30)),
('ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('event_type', self.gf('django.db.models.fields.CharField')(max_length=50)),
('cmd', self.gf('django.db.models.fields.TextField')()),
('event_log', self.gf('django.db.models.fields.TextField')()),
('result', self.gf('django.db.models.fields.CharField')(default='unknown', max_length=30)),
('track_mark', self.gf('django.db.models.fields.IntegerField')(blank=True)),
('note', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
))
db.send_create_signal(u'hosts', ['OpsLogTemp'])
# Adding model 'TriConfig'
db.create_table(u'hosts_triconfig', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('function', self.gf('django.db.models.fields.CharField')(max_length=50)),
('function_info', self.gf('django.db.models.fields.CharField')(max_length=50)),
('username', self.gf('django.db.models.fields.CharField')(max_length=50)),
('password', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal(u'hosts', ['TriConfig'])
def backwards(self, orm):
# Removing unique constraint on 'AuthByIpAndRemoteUser', fields ['ip', 'remoteUser']
db.delete_unique(u'hosts_authbyipandremoteuser', ['ip_id', 'remoteUser_id'])
# Deleting model 'Devinfo'
db.delete_table(u'hosts_devinfo')
# Deleting model 'Check_Devinfo'
db.delete_table(u'hosts_check_devinfo')
# Deleting model 'Idc'
db.delete_table(u'hosts_idc')
# Deleting model 'Group'
db.delete_table(u'hosts_group')
# Deleting model 'IP'
db.delete_table(u'hosts_ip')
# Removing M2M table for field group on 'IP'
db.delete_table(db.shorten_name(u'hosts_ip_group'))
# Deleting model 'RemoteUser'
db.delete_table(u'hosts_remoteuser')
# Deleting model 'TriaquaeUser'
db.delete_table(u'hosts_triaquaeuser')
# Removing M2M table for field remoteuser on 'TriaquaeUser'
db.delete_table(db.shorten_name(u'hosts_triaquaeuser_remoteuser'))
# Removing M2M table for field group on 'TriaquaeUser'
db.delete_table(db.shorten_name(u'hosts_triaquaeuser_group'))
# Removing M2M table for field ip on 'TriaquaeUser'
db.delete_table(db.shorten_name(u'hosts_triaquaeuser_ip'))
# Deleting model 'AuthByIpAndRemoteUser'
db.delete_table(u'hosts_authbyipandremoteuser')
# Deleting model 'ServerStatus'
db.delete_table(u'hosts_serverstatus')
# Deleting model 'AlertTemp'
db.delete_table(u'hosts_alerttemp')
# Deleting model 'OpsLog'
db.delete_table(u'hosts_opslog')
# Deleting model 'OpsLogTemp'
db.delete_table(u'hosts_opslogtemp')
# Deleting model 'TriConfig'
db.delete_table(u'hosts_triconfig')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'hosts.alerttemp': {
'Meta': {'object_name': 'AlertTemp'},
'host': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'primary_key': 'True'}),
'snmp_data': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'snmp_status': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'hosts.authbyipandremoteuser': {
'Meta': {'unique_together': "(('ip', 'remoteUser'),)", 'object_name': 'AuthByIpAndRemoteUser'},
'authtype': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hosts.IP']", 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'remoteUser': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hosts.RemoteUser']", 'null': 'True', 'blank': 'True'})
},
u'hosts.check_devinfo': {
'Change_Time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'Change_Type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'Meta': {'object_name': 'Check_Devinfo'},
'New_Value': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'Old_Value': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'Triaquae_Hostname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'hosts.devinfo': {
'Asset_Number': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '164'}),
'Check_Time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'Device_Model': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'Device_Sn': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '164'}),
'Device_Type': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'Ethernet_Interface': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '364'}),
'Hard_Disk': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'Logical_Cpu_Cores': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'Memory_Slots_All': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '2000'}),
'Memory_Slots_Number': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '30'}),
'Meta': {'object_name': 'Devinfo'},
'Note1': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '256'}),
'Note2': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '256'}),
'Note3': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '256'}),
'Physical_Cpu_Cores': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'Physical_Cpu_MHz': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '256'}),
'Physical_Cpu_Model': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'Physical_Memory': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'System_Hostid': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '30'}),
'System_Hostname': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '50'}),
'System_Ip': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'System_Kernel': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '256'}),
'System_Mac': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '256'}),
'System_Swap': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'System_Version': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '64'}),
'Triaquae_Hostname': ('django.db.models.fields.CharField', [], {'default': "'Null'", 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'hosts.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'hosts.idc': {
'Meta': {'object_name': 'Idc'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'hosts.ip': {
'Meta': {'object_name': 'IP'},
'alert_limit': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'asset_collection': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cpu_idle_critical': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'cpu_idle_warning': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['hosts.Group']", 'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hosts.Idc']", 'null': 'True', 'blank': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'mem_usage_critical': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'mem_usage_warning': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'os': ('django.db.models.fields.CharField', [], {'default': "'linux'", 'max_length': '20'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': "'22'"}),
'snmp_alert_limit': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'snmp_auth_protocol': ('django.db.models.fields.CharField', [], {'default': "'MD5'", 'max_length': '50'}),
'snmp_community_name': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '50'}),
'snmp_on': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'snmp_pass': ('django.db.models.fields.CharField', [], {'default': "'my_pass'", 'max_length': '50'}),
'snmp_security_level': ('django.db.models.fields.CharField', [], {'default': "'auth'", 'max_length': '50'}),
'snmp_user': ('django.db.models.fields.CharField', [], {'default': "'triaquae_snmp'", 'max_length': '50'}),
'snmp_version': ('django.db.models.fields.CharField', [], {'default': "'2c'", 'max_length': '10'}),
'status_monitor_on': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'system_load_critical': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'system_load_warning': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'hosts.opslog': {
'Meta': {'object_name': 'OpsLog'},
'cmd': ('django.db.models.fields.TextField', [], {}),
'failed_num': ('django.db.models.fields.IntegerField', [], {}),
'finish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'run_user': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'success_num': ('django.db.models.fields.IntegerField', [], {}),
'total_task': ('django.db.models.fields.IntegerField', [], {}),
'track_mark': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'tri_user': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'hosts.opslogtemp': {
'Meta': {'object_name': 'OpsLogTemp'},
'cmd': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_log': ('django.db.models.fields.TextField', [], {}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'result': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '30'}),
'track_mark': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'hosts.remoteuser': {
'Meta': {'object_name': 'RemoteUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'hosts.serverstatus': {
'Meta': {'object_name': 'ServerStatus'},
'attempt_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'availability': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '20'}),
'breakdown_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'host': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'primary_key': 'True'}),
'host_status': ('django.db.models.fields.CharField', [], {'default': "'Unkown'", 'max_length': '10'}),
'host_uptime': ('django.db.models.fields.CharField', [], {'default': "'Unkown'", 'max_length': '50'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'last_check': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ping_status': ('django.db.models.fields.CharField', [], {'default': "'Unkown'", 'max_length': '100'}),
'snmp_alert_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'hosts.triaquaeuser': {
'Meta': {'object_name': 'TriaquaeUser'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['hosts.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['hosts.IP']", 'null': 'True', 'blank': 'True'}),
'remoteuser': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['hosts.RemoteUser']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
u'hosts.triconfig': {
'Meta': {'object_name': 'TriConfig'},
'function': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'function_info': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['hosts'] | [
"[email protected]"
] | |
77c86e7b6456e7ab33c161d6d8ab304d6d09e17b | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/contrib/framework/python/framework/tensor_util_test.py | ed3a7989a6ca1353a9b8069ec10dfdff872950bd | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 16,283 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class LocalVariabletest(test.TestCase):
def test_local_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
value1 = 43
variables_lib2.local_variable(value1)
variables = variables_lib.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(errors_impl.OpError, sess.run, variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
class ReduceSumNTest(test.TestCase):
def test_reduce_sum_n(self):
with self.cached_session():
a = constant_op.constant(1)
b = constant_op.constant([2])
c = constant_op.constant([[3, 4], [5, 6]])
self.assertEqual(21, tensor_util.reduce_sum_n([a, b, c]).eval())
class AssertScalarIntTest(test.TestCase):
def test_assert_scalar_int(self):
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int32))
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int64))
tensor_util.assert_scalar_int(3)
with self.assertRaisesRegexp(ValueError, "Expected integer"):
tensor_util.assert_scalar_int(
constant_op.constant(
3, dtype=dtypes.float32))
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
tensor_util.assert_scalar_int(
constant_op.constant(
[3, 4], dtype=dtypes.int32))
class WithShapeTest(test.TestCase):
def _assert_with_shape(self, tensor, expected_value, expected_shape,
unexpected_shapes):
for unexpected_shape in unexpected_shapes:
self.assertRaises(ValueError, tensor_util.with_shape, unexpected_shape,
tensor)
pattern = (
r"\[Wrong shape for %s \[expected\] \[actual\].\] \[%s\] \[%s\]" %
(tensor.name, " ".join([str(dim) for dim in unexpected_shape]),
" ".join([str(dim) for dim in expected_shape])))
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_shape(
constant_op.constant(unexpected_shape),
tensor).eval)
expected_placeholder = array_ops.placeholder(dtypes.float32)
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_same_shape(expected_placeholder,
tensor).eval,
{expected_placeholder: np.ones(unexpected_shape)})
self.assertIs(tensor, tensor_util.with_shape(expected_shape, tensor))
self.assertIs(
tensor,
tensor_util.with_same_shape(
constant_op.constant(
1, shape=expected_shape), tensor))
tensor_with_shape = tensor_util.with_shape(
constant_op.constant(expected_shape), tensor)
np.testing.assert_array_equal(expected_value, tensor_with_shape.eval())
tensor_with_same_shape = tensor_util.with_same_shape(expected_placeholder,
tensor)
np.testing.assert_array_equal(expected_value,
tensor_with_same_shape.eval({
expected_placeholder:
np.ones(expected_shape)
}))
def test_with_shape_invalid_expected_shape(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid rank",
tensor_util.with_shape, [[1], [2]],
constant_op.constant(1.0))
def test_with_shape_invalid_type(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape, [1.1],
constant_op.constant([1.0]))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
np.array([1.1]), constant_op.constant(1.0))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
constant_op.constant(np.array([1.1])),
constant_op.constant(1.0))
def test_with_shape_0(self):
with self.cached_session():
value = 42
shape = [0]
unexpected_shapes = [[1], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_1(self):
with self.cached_session():
value = [42]
shape = [1]
unexpected_shapes = [[0], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2(self):
with self.cached_session():
value = [42, 43]
shape = [2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
shape = [2, 2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2_with_partial_expected_shape(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
actual_shape = [2, 2]
tensor = constant_op.constant(value, shape=actual_shape)
partial_expected_shape = tensor_shape.TensorShape([None, 2])
# Won't raise any exception here:
tensor_with_shape = tensor_util.with_shape(partial_expected_shape, tensor)
np.testing.assert_array_equal(value, tensor_with_shape.eval())
def test_with_shape_none(self):
with self.cached_session():
tensor_no_shape = array_ops.placeholder(dtypes.float32)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_no_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_no_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_no_shape: array_2x2
}))
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval,
{tensor_no_shape: [42.0, 43.0]})
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval, {tensor_no_shape: [42.0]})
def test_with_shape_partial(self):
with self.cached_session():
tensor_partial_shape = array_ops.placeholder(dtypes.float32)
tensor_partial_shape.set_shape([None, 2])
for incompatible_shape in [[0], [1]]:
self.assertRaisesRegexp(
ValueError, "Shapes must be equal rank, but are 2 and 1",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[1, 2, 1]]:
self.assertRaisesRegexp(ValueError, "Dimensions must be equal",
tensor_util.with_shape, incompatible_shape,
tensor_partial_shape)
for incompatible_shape in [[2, 1]]:
self.assertRaisesRegexp(
ValueError,
r"Dimension 1 in both shapes must be equal, but are 2 and 1. "
r"Shapes are \[\?,2\] and \[2,1\].",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_partial_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_partial_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_partial_shape: array_2x2
}))
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0, 43.0]})
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0]})
class RemoveSqueezableDimensionsTest(test.TestCase):
def testRemoveSqueezableDimensions(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticLabel_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraPredictionDim_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_static(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticPrediction_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
# TODO(ptucker): Replace this with parameterized test.
def _testRemoveSqueezableDimensions(self, predictions_have_static_shape,
predictions_have_extra_dim,
labels_have_static_shape,
labels_have_extra_dim):
assert not (predictions_have_extra_dim and labels_have_extra_dim)
predictions_value = (0, 1, 1, 0, 0, 1, 0)
labels_value = (0, 0, 1, 1, 0, 0, 0)
input_predictions_value = ([[p] for p in predictions_value] if
predictions_have_extra_dim else
predictions_value)
input_labels_value = ([[l] for l in labels_value] if labels_have_extra_dim
else labels_value)
with ops.Graph().as_default() as g:
feed_dict = {}
if predictions_have_static_shape:
predictions = constant_op.constant(
input_predictions_value, dtype=dtypes.int32)
else:
predictions = array_ops.placeholder(
dtype=dtypes.int32, name="predictions")
feed_dict[predictions] = input_predictions_value
if labels_have_static_shape:
labels = constant_op.constant(input_labels_value, dtype=dtypes.int32)
else:
labels = array_ops.placeholder(dtype=dtypes.int32, name="labels")
feed_dict[labels] = input_labels_value
squeezed_predictions, squeezed_labels = (
tensor_util.remove_squeezable_dimensions(predictions, labels))
with self.session(g):
variables_lib.local_variables_initializer().run()
self.assertAllClose(
predictions_value, squeezed_predictions.eval(feed_dict=feed_dict))
self.assertAllClose(
labels_value, squeezed_labels.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
d7cd476ca8f69a0ba757cc642b513abdbddb7e8d | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/classification/SqueezeNet1_1/main.py | 3852a80149bfa320d4bf1d54f10c4af570924c6c | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 19,790 | py | # BSD 3-Clause License
# Copyright (c) Soumith Chintala 2016,
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.npu
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from apex import amp
import apex
import warnings
warnings.filterwarnings('ignore')
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data', metavar='DIR', default='/opt/npu/imagenet',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='squeezenet1_1',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--npu', default=0, type=int,
help='NPU id to use.')
# apex
parser.add_argument('--amp', default=False, action='store_true',
help='use amp to train the model')
parser.add_argument('--loss-scale', default=1024., type=float,
help='loss scale using in amp, default -1 means dynamic')
parser.add_argument('--opt-level', default='O2', type=str,
help='loss scale using in amp, default -1 means dynamic')
cur_step = 0
def main():
args = parser.parse_args()
if args.npu is None:
args.npu = 0
global CALCULATE_DEVICE
global best_acc1
best_acc1 = 0
CALCULATE_DEVICE = "npu:{}".format(args.npu)
# CALCULATE_DEVICE = "nup:1"
torch.npu.set_device(CALCULATE_DEVICE)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
# ngpus_per_node = torch.cuda.device_count()
ngpus_per_node = torch.npu.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
global cur_step
# args.gpu = gpu
args.gpu = None
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.distributed:
print("args.gpu", args.gpu)
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
# model = torch.nn.DataParallel(model).cuda()
# ๅฐๆจกๅ่ฟ็งปๅฐNPUไธ่ฟ่ก่ฎญ็ป
# print("mv to npu")
model = model.to(CALCULATE_DEVICE)
# print("mv success")
# ๅฐๆๅคฑๅฝๆฐ่ฟ็งปๅฐNPUไธ่ฟ่ก่ฎก็ฎ
criterion = nn.CrossEntropyLoss().to(CALCULATE_DEVICE)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.amp:
print("=> use amp...")
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.npu is None:
checkpoint = torch.load(args.resume)
elif args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.npu is not None:
# best_acc1 = best_acc1.to(args.npu)
best_acc1 = best_acc1.to("npu:{}".format(args.npu))
elif args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
if not args.amp:
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
else:
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
'amp': amp.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
# if torch.cuda.is_available():
# target = target.cuda(args.gpu, non_blocking=True)
target = target.to(torch.int32)
images, target = images.to(CALCULATE_DEVICE, non_blocking=True), target.to(CALCULATE_DEVICE, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print("batch_size:", args.batch_size, 'Time: {:.3f}'.format(batch_time.avg), '* FPS@all {:.3f}'.format(
args.batch_size/batch_time.avg))
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
# ๅฐๆฐๆฎ้่ฟ็งปๅฐNPUไธ่ฟ่ก่ฎก็ฎๅนถไฟฎๆนtargetๆฐๆฎ็ฑปๅ
target = target.to(torch.int32)
images, target = images.to(CALCULATE_DEVICE, non_blocking=True), target.to(CALCULATE_DEVICE, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
# shutil.copyfile(filename, 'model_best_acc%.4f_epoch%d.pth.tar' % (state['best_acc1'], state['epoch']))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e9de1fbae42006866704b12710ee0caac1dc22ea | 65a735524f36356c0d4870012df19b4ec655558b | /Coding the Matrix/matrix/hw0/hw0.py | c3a7336dcf67cb96888ca72baefab72715e6b9b9 | [] | no_license | zjsxzy/Courses | 7047a871d6acb9748ef956bbdfd7570431c76e37 | aacbd4c81cc3af2d0a74cc1d5c08931130e491fc | refs/heads/master | 2020-03-30T07:13:54.812252 | 2013-10-29T02:16:40 | 2013-10-29T02:16:40 | 10,140,554 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | # Please fill out this stencil and submit using the provided submission script.
## Problem 1
def myFilter(L, num):
return [x for x in L if x % num != 0]
## Problem 2
def myLists(L):
return [list(range(1, x + 1)) for x in L]
## Problem 3
def myFunctionComposition(f, g):
return {k:g[v] for k, v in f.items()}
## Problem 4
# Please only enter your numerical solution.
complex_addition_a = 5 + 3j
complex_addition_b = 1j
complex_addition_c = -1 + 0.001j
complex_addition_d = 0.001 + 9j
## Problem 5
GF2_sum_1 = 1
GF2_sum_2 = 0
GF2_sum_3 = 0
## Problem 6
def mySum(L):
ret = 0
for x in L:
ret += x
return ret;
## Problem 7
def myProduct(L):
ret = 1
for x in L:
ret *= x
return ret;
## Problem 8
def myMin(L):
ret = L[0]
for x in L:
if x < ret:
ret = x
return ret
## Problem 9
def myConcat(L):
ret = ''
for x in L:
ret += x
return ret
## Problem 10
def myUnion(L):
ret = set()
for x in L:
ret |= x
return ret
| [
"[email protected]"
] | |
963ea31f26d0a3ff242a9c5e5d625f37f427e6ea | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part000711.py | ebaaff510bb65f65923cd0f3844acbce5b59094d | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher112273(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.3.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher112273._instance is None:
CommutativeMatcher112273._instance = CommutativeMatcher112273()
return CommutativeMatcher112273._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 112272
return
yield
from collections import deque | [
"[email protected]"
] | |
1d71d22b4e7c4676ddbfd83f684686ed3e859183 | 8ab8a1c524030a95f2cba68a02ae036dd2c65d78 | /lib/apps.py | 5d96408738acefdd49b044567e9485b21cc466f5 | [] | no_license | marekventur/emfcamp-2018-app-library | 2ffce3136c789c56bb45acecfb1ca33f4ac06a46 | 32e278d4c99936a70c28d23ae52270b7eff26a51 | refs/heads/master | 2020-03-22T16:40:46.762618 | 2018-07-16T22:04:42 | 2018-07-16T22:05:25 | 140,343,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,358 | py | """Model and Helpers for TiLDA apps and the App Library API"""
___license___ = "MIT"
___dependencies___ = ["http"]
import os
import ure
import http_client
import filesystem
import gc
ATTRIBUTE_MATCHER = ure.compile("^\s*###\s*([^:]*?)\s*:\s*(.*)\s*$") # Yeah, regex!
CATEGORY_ALL = "all"
CATEGORY_NOT_SET = "uncategorised"
class App:
"""Models an app and provides some helper functions"""
def __init__(self, folder_name, api_information = None):
self.folder_name = self.name = folder_name.lower()
self.user = EMF_USER
if USER_NAME_SEPARATOR in folder_name:
[self.user, self.name] = folder_name.split(USER_NAME_SEPARATOR, 1)
self.user = self.user.lower()
self.name = self.name.lower()
self._attributes = None # Load lazily
self.api_information = api_information
@property
def folder_path(self):
return "apps/" + self.folder_name
@property
def main_path(self):
return self.folder_path + "/main.py"
@property
def loadable(self):
return filesystem.is_file(self.main_path) and os.stat(self.main_path)[6] > 0
@property
def description(self):
"""either returns a local attribute or uses api_information"""
if self.api_information and "description" in self.api_information:
return self.api_information["description"]
return self.get_attribute("description") or ""
@property
def files(self):
"""returns a list of file dicts or returns False if the information is not available"""
if self.api_information and "files" in self.api_information:
return self.api_information["files"]
return False
@property
def category(self):
return self.get_attribute("Category", CATEGORY_NOT_SET).lower()
@property
def title(self):
return self.get_attribute("appname") or self.name
@property
def user_and_title(self):
if self.user == EMF_USER:
return self.name
else:
return "%s by %s" % (self.title, self.user)
def matches_category(self, category):
"""returns True if provided category matches the category of this app"""
category = category.lower()
return category == CATEGORY_ALL or category == self.category
@property
def attributes(self):
"""Returns all attribues of this app
The result is cached for the lifetime of this object
"""
if self._attributes == None:
self._attributes = {}
if self.loadable:
with open(self.main_path) as file:
for line in file:
match = ATTRIBUTE_MATCHER.match(line)
if match:
self._attributes[match.group(1).strip().lower()] = match.group(2).strip()
else:
break
return self._attributes
def get_attribute(self, attribute, default=None):
"""Returns the value of an attribute, or a specific default value if attribute is not found"""
attribute = attribute.lower() # attributes are case insensitive
if attribute in self.attributes:
return self.attributes[attribute]
else:
return default
def fetch_api_information(self):
"""Queries the API for information about this app, returns False if app is not publicly listed"""
with http_client.get("http://api.badge.emfcamp.org/api/app/%s/%s" % (self.user, self.name)) as response:
if response.status == 404:
return False
self.api_information = response.raise_for_status().json()
return self.api_information
def __str__(self):
return self.user_and_title
def __repr__(self):
return "<App %s>" % (self.folder_name)
def app_by_name_and_user(name, user):
"""Returns an user object"""
if user.lower() == EMF_USER:
return App(name)
else:
return App(user + USER_NAME_SEPARATOR + name)
def app_by_api_response(response):
if response["user"].lower() == EMF_USER:
return App(response["name"], response)
else:
return App(response["user"] + USER_NAME_SEPARATOR + response["name"], response)
def get_local_apps(category=CATEGORY_ALL):
"""Returns a list of apps that can be found in the apps folder"""
apps = [App(folder_name) for folder_name in os.listdir("apps") if filesystem.is_dir("apps/" + folder_name)]
return [app for app in apps if app.matches_category(category)]
_public_apps_cache = None
def fetch_public_app_api_information(uncached=False):
"""Returns a dict category => list of apps
Uses cached version unless the uncached parameter is set
"""
global _public_apps_cache
if not _public_apps_cache or uncached:
response = {}
for category, apps in http_client.get("http://api.badge.emfcamp.org/api/apps").raise_for_status().json().items():
response[category] = [app_by_api_response(app) for app in apps]
_public_apps_cache = response
return _public_apps_cache
def get_public_app_categories(uncached=False):
"""Returns a list of all categories used on the app library"""
return list(fetch_public_app_api_information(uncached).keys())
def get_public_apps(category=CATEGORY_ALL, uncached=False):
"""Returns a list of all public apps in one category"""
category = category.lower()
api_information = fetch_public_app_api_information(uncached)
return api_information[category] if category in api_information else []
_category_cache = None
def get_local_app_categories(uncached=False):
"""Returns a list of all app categories the user's apps are currently using
Uses cached version unless the uncached parameter is set
"""
global _category_cache
if not _category_cache or uncached:
_category_cache = ["all"]
for app in get_local_apps():
if app.category not in _category_cache:
_category_cache.append(app.category)
return _category_cache
def empty_local_app_cache():
"""If you're tight on memory you can clean up the local cache"""
global _public_apps_cache, _category_cache
_public_apps_cache = None
_category_cache = None
gc.collect()
| [
"[email protected]"
] | |
b4bd250abb60b9c3eab25cfc0aa1a5fdef2c81f4 | ba694353a3cb1cfd02a6773b40f693386d0dba39 | /sdk/python/pulumi_google_native/gkeonprem/v1/get_vmware_node_pool.py | 1fa08dd4c899a8a1f1fb9cc13f36d274c7f4d2c1 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-google-native | cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b | 124d255e5b7f5440d1ef63c9a71e4cc1d661cd10 | refs/heads/master | 2023-08-25T00:18:00.300230 | 2023-07-20T04:25:48 | 2023-07-20T04:25:48 | 323,680,373 | 69 | 16 | Apache-2.0 | 2023-09-13T00:28:04 | 2020-12-22T16:39:01 | Python | UTF-8 | Python | false | false | 9,983 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVmwareNodePoolResult',
'AwaitableGetVmwareNodePoolResult',
'get_vmware_node_pool',
'get_vmware_node_pool_output',
]
@pulumi.output_type
class GetVmwareNodePoolResult:
def __init__(__self__, annotations=None, config=None, create_time=None, delete_time=None, display_name=None, etag=None, name=None, node_pool_autoscaling=None, on_prem_version=None, reconciling=None, state=None, status=None, uid=None, update_time=None):
if annotations and not isinstance(annotations, dict):
raise TypeError("Expected argument 'annotations' to be a dict")
pulumi.set(__self__, "annotations", annotations)
if config and not isinstance(config, dict):
raise TypeError("Expected argument 'config' to be a dict")
pulumi.set(__self__, "config", config)
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if delete_time and not isinstance(delete_time, str):
raise TypeError("Expected argument 'delete_time' to be a str")
pulumi.set(__self__, "delete_time", delete_time)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if node_pool_autoscaling and not isinstance(node_pool_autoscaling, dict):
raise TypeError("Expected argument 'node_pool_autoscaling' to be a dict")
pulumi.set(__self__, "node_pool_autoscaling", node_pool_autoscaling)
if on_prem_version and not isinstance(on_prem_version, str):
raise TypeError("Expected argument 'on_prem_version' to be a str")
pulumi.set(__self__, "on_prem_version", on_prem_version)
if reconciling and not isinstance(reconciling, bool):
raise TypeError("Expected argument 'reconciling' to be a bool")
pulumi.set(__self__, "reconciling", reconciling)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if status and not isinstance(status, dict):
raise TypeError("Expected argument 'status' to be a dict")
pulumi.set(__self__, "status", status)
if uid and not isinstance(uid, str):
raise TypeError("Expected argument 'uid' to be a str")
pulumi.set(__self__, "uid", uid)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter
def annotations(self) -> Mapping[str, str]:
"""
Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.
"""
return pulumi.get(self, "annotations")
@property
@pulumi.getter
def config(self) -> 'outputs.VmwareNodeConfigResponse':
"""
The node configuration of the node pool.
"""
return pulumi.get(self, "config")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time at which this node pool was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="deleteTime")
def delete_time(self) -> str:
"""
The time at which this node pool was deleted. If the resource is not deleted, this must be empty
"""
return pulumi.get(self, "delete_time")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The display name for the node pool.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> str:
"""
This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> str:
"""
Immutable. The resource name of this node pool.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nodePoolAutoscaling")
def node_pool_autoscaling(self) -> 'outputs.VmwareNodePoolAutoscalingConfigResponse':
"""
Node pool autoscaling config for the node pool.
"""
return pulumi.get(self, "node_pool_autoscaling")
@property
@pulumi.getter(name="onPremVersion")
def on_prem_version(self) -> str:
"""
Anthos version for the node pool. Defaults to the user cluster version.
"""
return pulumi.get(self, "on_prem_version")
@property
@pulumi.getter
def reconciling(self) -> bool:
"""
If set, there are currently changes in flight to the node pool.
"""
return pulumi.get(self, "reconciling")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the node pool.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def status(self) -> 'outputs.ResourceStatusResponse':
"""
ResourceStatus representing the detailed VMware node pool state.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def uid(self) -> str:
"""
The unique identifier of the node pool.
"""
return pulumi.get(self, "uid")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> str:
"""
The time at which this node pool was last updated.
"""
return pulumi.get(self, "update_time")
class AwaitableGetVmwareNodePoolResult(GetVmwareNodePoolResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVmwareNodePoolResult(
annotations=self.annotations,
config=self.config,
create_time=self.create_time,
delete_time=self.delete_time,
display_name=self.display_name,
etag=self.etag,
name=self.name,
node_pool_autoscaling=self.node_pool_autoscaling,
on_prem_version=self.on_prem_version,
reconciling=self.reconciling,
state=self.state,
status=self.status,
uid=self.uid,
update_time=self.update_time)
def get_vmware_node_pool(location: Optional[str] = None,
project: Optional[str] = None,
vmware_cluster_id: Optional[str] = None,
vmware_node_pool_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVmwareNodePoolResult:
"""
Gets details of a single VMware node pool.
"""
__args__ = dict()
__args__['location'] = location
__args__['project'] = project
__args__['vmwareClusterId'] = vmware_cluster_id
__args__['vmwareNodePoolId'] = vmware_node_pool_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('google-native:gkeonprem/v1:getVmwareNodePool', __args__, opts=opts, typ=GetVmwareNodePoolResult).value
return AwaitableGetVmwareNodePoolResult(
annotations=pulumi.get(__ret__, 'annotations'),
config=pulumi.get(__ret__, 'config'),
create_time=pulumi.get(__ret__, 'create_time'),
delete_time=pulumi.get(__ret__, 'delete_time'),
display_name=pulumi.get(__ret__, 'display_name'),
etag=pulumi.get(__ret__, 'etag'),
name=pulumi.get(__ret__, 'name'),
node_pool_autoscaling=pulumi.get(__ret__, 'node_pool_autoscaling'),
on_prem_version=pulumi.get(__ret__, 'on_prem_version'),
reconciling=pulumi.get(__ret__, 'reconciling'),
state=pulumi.get(__ret__, 'state'),
status=pulumi.get(__ret__, 'status'),
uid=pulumi.get(__ret__, 'uid'),
update_time=pulumi.get(__ret__, 'update_time'))
@_utilities.lift_output_func(get_vmware_node_pool)
def get_vmware_node_pool_output(location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
vmware_cluster_id: Optional[pulumi.Input[str]] = None,
vmware_node_pool_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVmwareNodePoolResult]:
"""
Gets details of a single VMware node pool.
"""
...
| [
"[email protected]"
] | |
581f5adec9da3ee40c34877b24662132f1a72437 | 35dbd536a17d7127a1dd1c70a2903ea0a94a84c2 | /fixtures/schema_validation.py | 6b0ead9186f4e6aa218c3c747c04b35ab07587e3 | [
"Apache-2.0",
"BUSL-1.1"
] | permissive | nagyist/sentry | efb3ef642bd0431990ca08c8296217dabf86a3bf | d9dd4f382f96b5c4576b64cbf015db651556c18b | refs/heads/master | 2023-09-04T02:55:37.223029 | 2023-01-09T15:09:44 | 2023-01-09T15:09:44 | 48,165,782 | 0 | 0 | BSD-3-Clause | 2022-12-16T19:13:54 | 2015-12-17T09:42:42 | Python | UTF-8 | Python | false | false | 505 | py | import pytest
from jsonschema import ValidationError
def invalid_schema(func):
def inner(self, *args, **kwargs):
with pytest.raises(ValidationError):
func(self)
return inner
def invalid_schema_with_error_message(message):
def decorator(func):
def inner(self, *args, **kwargs):
with pytest.raises(ValidationError) as excinfo:
func(self)
assert excinfo.value.message == message
return inner
return decorator
| [
"[email protected]"
] | |
61aed9016adcb0356666e3b2b263240642d9d902 | 48981ef531729e6c133f9e01252a7dc6f4251629 | /modules/modules broken.py | 181b3ed252c0f2a552a10fc9aa6e27da7c813baf | [] | no_license | sharland/python_scripts | f778f3c2dfc2f19592e94472ec9afd5c77ed4052 | 702b543a77ee5361aa73d5068510e1df937210c4 | refs/heads/master | 2021-09-08T13:26:54.123689 | 2021-08-30T21:24:58 | 2021-08-30T21:24:58 | 17,588,312 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | #! /usr/local/bin/python3
x = 49
y = sqrt(x)
print(y)
| [
"[email protected]"
] | |
50896b220232ff3cad259b8616dcaf5801e1c0fe | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/tests/zd/CB_ZD_Verify_AAA_Server_GUI_CLI_Get.py | edf805d0ccb40058c8caa5ef9eb27ed5e1fc115a | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | # Copyright (C) 2011 Ruckus Wireless, Inc. All rights reserved.
# Please make sure the following module docstring is accurate since it will be used in report generation.
"""
Description:
@author: Cherry Cheng
@contact: [email protected]
@since: Feb 2012
Prerequisite (Assumptions about the state of the test bed/DUT):
1. Build under test is loaded on the AP and Zone Director
Required components:
Test parameters:
Test procedure:
1. Config:
- initialize test parameters
2. Test:
- Compare aaa servers between GUI get and CLI get
3. Cleanup:
- N/A
Result type: PASS/FAIL
Results: PASS: Data between gui get and cli get are same
FAIL: If any item is incorrect
Messages: If FAIL the test script returns a message related to the criterion that is not satisfied
"""
import logging
from RuckusAutoTest.models import Test
from RuckusAutoTest.components.lib.zdcli import aaa_servers as cas
class CB_ZD_Verify_AAA_Server_GUI_CLI_Get(Test):
required_components = []
parameters_description = {}
def config(self, conf):
self._initTestParameters(conf)
def test(self):
logging.debug("GUI: %s" % self.gui_get_server_list)
logging.debug("CLI: %s" % self.cli_get_server_list)
self._verify_server_gui_cli_get()
if self.errmsg:
return self.returnResult('FAIL', self.errmsg)
else:
self.passmsg = "The servers information are same between GUI get and CLI get"
return self.returnResult('PASS', self.passmsg)
def cleanup(self):
pass
def _initTestParameters(self, conf):
self.gui_get_server_list = self.carrierbag['zdgui_server_info_list']
self.cli_get_server_list = self.carrierbag['zdcli_server_info_list']
self.errmsg = ''
self.passmsg = ''
def _verify_server_gui_cli_get(self):
logging.info('Verify the AAA server settings between GUI get and CLI get')
try:
err_msg = cas.verify_server_cfg_gui_cli_get(self.gui_get_server_list, self.cli_get_server_list)
if err_msg:
self.errmsg = err_msg
except Exception, ex:
self.errmsg = ex.message | [
"[email protected]"
] | |
434f7560833bf09a40483a71d5430a62a3834174 | 95f3e72dfdd6e7194c8cdad6529f891141d1cc68 | /pyatv/mrp/protobuf/SendCommandResultMessage_pb2.pyi | f1d7710eb0bdb3adb65d89fb6d8f556b29319296 | [
"MIT"
] | permissive | Lee-View/pyatv | 87939b2ce7c2d5d383090c64c4f0f15b03d040cb | 5f46dacccea8e107d0407c95432eda611980ef81 | refs/heads/master | 2021-01-02T11:19:36.315363 | 2020-02-10T18:36:16 | 2020-02-10T19:02:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
FieldDescriptor as google___protobuf___descriptor___FieldDescriptor,
)
from google.protobuf.internal.containers import (
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
class SendCommandResultMessage(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
errorCode = ... # type: builtin___int
handlerReturnStatus = ... # type: builtin___int
handlerReturnStatusDatas = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[builtin___bytes]
def __init__(self,
*,
errorCode : typing___Optional[builtin___int] = None,
handlerReturnStatus : typing___Optional[builtin___int] = None,
handlerReturnStatusDatas : typing___Optional[typing___Iterable[builtin___bytes]] = None,
) -> None: ...
@classmethod
def FromString(cls, s: builtin___bytes) -> SendCommandResultMessage: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
if sys.version_info >= (3,):
def HasField(self, field_name: typing_extensions___Literal[u"errorCode",u"handlerReturnStatus"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"errorCode",u"handlerReturnStatus",u"handlerReturnStatusDatas"]) -> None: ...
else:
def HasField(self, field_name: typing_extensions___Literal[u"errorCode",b"errorCode",u"handlerReturnStatus",b"handlerReturnStatus"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"errorCode",b"errorCode",u"handlerReturnStatus",b"handlerReturnStatus",u"handlerReturnStatusDatas",b"handlerReturnStatusDatas"]) -> None: ...
sendCommandResultMessage = ... # type: google___protobuf___descriptor___FieldDescriptor
| [
"[email protected]"
] | |
ecb7ed4701df3ff2fdfa90258ae280604ab871d2 | 9c7e75720740422044747387907b2678360b7241 | /setup.py | 302fc4bf2efd1912fe8516e65300143060032d8b | [
"MIT"
] | permissive | sourabhtkd/django-log-viewer | cc84c861ff9f10e3d6c498ebce8c93ef5c10b2cf | a8216d3572d5209a50175a9004a4f59fe2227492 | refs/heads/master | 2023-06-12T12:41:17.348747 | 2021-07-07T01:18:06 | 2021-07-07T01:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import (setup, find_packages)
__version__ = '1.1.3'
setup(
name='django-log-viewer',
version=__version__,
packages=find_packages(exclude=["*demo"]),
include_package_data=True,
zip_safe=False,
description='Django log viewer',
url='https://github.com/agusmakmun/django-log-viewer',
download_url='https://github.com/agusmakmun/django-log-viewer/tarball/v%s' % __version__,
keywords=['django log viewer'],
long_description=open('README.rst').read(),
license='MIT',
author='Agus Makmun (Summon Agus)',
author_email='[email protected]',
classifiers=[
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Environment :: Web Environment',
]
)
| [
"[email protected]"
] | |
0be80f1a5029ac315fad63e0b82ca6c1af7b3388 | 3b479163504e51abdd08596f13abe40d0f84c8e6 | /ISSUE-11/SOLUTION-5/obstacles.py | 1cfadd86988aec43ee6811a3b29b2b626d8707de | [] | no_license | kantal/WPC | c864bf7f1a0ce63aef264309dfb9fce7587895bd | 46648436ee2dac2b727a1382c4e13a9a0ccb8bdf | refs/heads/master | 2021-06-10T14:08:57.441613 | 2021-02-16T12:00:11 | 2021-02-16T12:00:11 | 16,383,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,141 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
'''
2013-06-02 by kantal59
License: LGPL
OLIMEX WPC#11:
Letโs have a maze with size X x Y where X and Y are integer (3..100) .
The maze is defined with strings, where in the string โWโ is wall, โ@โ is obstacle and โ โ is air.
Make code which finds the largest by area connected obstacles in the maze.
Python3 required.
Counting the '@' which are neighbours. Input will be read from file, which must contain SPACES instead of TABS, or using test data.
The main algorithm consist of 8 lines of code, the rest is just glitter.
For graphics demo (with fixed canvas size), the 'GUI' must be set to True. The 'tkinter' module required.
'''
#GUI= False
GUI= True
from os import sys
#---------------------------------
# THE ALGORITHM
#---------------------------------
def neighbours( obi, obstacles ):
''' The algorithm.
'''
yield obi
nb= [ (x,y) for (x,y) in obstacles if (x,y) in [ (obi[0]+i, obi[1]+j) for i in range(-1,2) for j in range(-1,2) ] ]
for p in nb:
obstacles.remove(p)
for p in nb:
# neighbours( p, obstacles) <-- yielded values of recursive generator must be propagated explicitly
for others in neighbours( p, obstacles):
yield others
#----------------------------------
# DATA READING
#----------------------------------
def load_data( fname, obstacles):
''' Input
'''
row=0
fd=open(fname,'r')
for line in fd:
pos= line.find('@')
while pos!=-1:
obstacles.append( (row,pos) )
#---- graphics begins
if GUI:
chgcolor( (row,pos), "red")
#---- graphics ends
pos= line.find('@',pos+1)
#---- graphics begins
if GUI:
line=line.upper()
pos= line.find('W')
while pos!=-1:
chgcolor( (row,pos), "black")
pos= line.find('W',pos+1)
#---- graphics ends
row+=1
fd.close()
#----------------------------------
# GRAPHICS
#----------------------------------
if GUI:
from tkinter import *
from tkinter import ttk
scell=10; xmaze=300; ymaze=300; title='OLIMEX WPC#11'
it=None
def chgcolor( t, color):
oid= can.find_closest( t[1]*scell, t[0]*scell)
can.itemconfigure( oid, fill=color )
def step( points,bls,mx):
global it
if it==None:
it=iter(points)
try:
chgcolor( next(it),"green")
win.after( 500, step, points,bls,mx)
except StopIteration:
win.title( title+" ---> the largest block size: {0}".format(mx))
for b in bls:
if len(b)==mx:
for p in b:
chgcolor( p,"yellow")
win= Tk()
hsc = ttk.Scrollbar(win, orient=HORIZONTAL)
vsc = ttk.Scrollbar(win, orient=VERTICAL)
can= Canvas( win, width=500, height=400, scrollregion=(0, 0, scell*ymaze, scell*xmaze), bg='gray', yscrollcommand=vsc.set, xscrollcommand=hsc.set )
hsc['command'] = can.xview
vsc['command'] = can.yview
can.grid(column=0, row=0, sticky=(N,W,E,S))
hsc.grid(column=0, row=1, sticky=(W,E))
vsc.grid(column=1, row=0, sticky=(N,S))
ttk.Sizegrip(win).grid(column=1, row=1, sticky=(S,E))
win.grid_columnconfigure(0, weight=1)
win.grid_rowconfigure(0, weight=1)
for i in range(0,xmaze):
x=i*scell
for j in range(0,ymaze):
y=j*scell
can.create_rectangle( x,y, x+scell, y+scell)
win.title(title)
#----------------------------------
# TESTING
#----------------------------------
def main():
''' Test
'''
#--- Input
stones=[]
if len(sys.argv)!=2:
print("\nUsage: $> obstacles.py maze_file\n- using test data:\n")
stones= [ (3,3),(3,4),(4,2),(4,3),(5,7),(6,1),(6,6),(6,7) ]
#stones= [ (3,3),(3,4),(4,2),(4,3),(5,7),(6,1),(6,6),(6,7), (4,4),(4,5),(4,6),(3,6) ]
#---- graphics begins
if GUI:
for p in stones:
chgcolor( p, "red")
#---- graphics ends
else:
print("- using the file: {0}".format(sys.argv[1]))
load_data( sys.argv[1], stones)
#--- Calculation
blocks=[]
while stones:
p=stones.pop()
blocks.append( [ a for a in neighbours( p, stones) ] )
#--- Output
l=0
for a in blocks:
if len(a) > l:
l=len(a)
print("size: {0} -->{1}".format( len(a),a))
print("The largest size: {0}".format(l) )
#---- graphics begins
if GUI:
pnts=[ p for bl in blocks for p in bl ]
win.after( 1000, step, pnts, blocks, l)
#---- graphics ends
# LET'S GO !
if GUI:
#---- graphics begins
win.after( 100, main)
win.mainloop()
#---- graphics ends
else:
main()
| [
"[email protected]"
] | |
1a9931e21fe1e127a78ef5b4276c981c27471124 | 16a5c9c9f0d7519a6808efc61b592b4b614102cf | /Python/70.py | 966783c4a0677185ef2fd95c25677aa959c88acf | [] | no_license | kevin851066/Leetcode | c1d86b2e028526231b80c6d4fb6d0be7ae8d39e5 | 885a9af8a7bee3c228c7ae4e295dca810bd91d01 | refs/heads/main | 2023-08-10T16:50:12.426440 | 2021-09-28T15:23:26 | 2021-09-28T15:23:26 | 336,277,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | class Solution:
def climbStairs(self, n):
'''
:type: n: int
:rtype: int
'''
arr = [1, 2]
for i in range(2, n):
arr.append(arr[i-1]+arr[i-2])
return arr[n-1] | [
"[email protected]"
] | |
fd2c623806b24a1c36d209d183e759acff4bf4fc | 2dd26e031162e75f37ecb1f7dd7f675eeb634c63 | /nemo/collections/nlp/data/question_answering_squad/__init__.py | 9e3250071955216f6abc505e6181fb59931baa8d | [
"Apache-2.0"
] | permissive | NVIDIA/NeMo | 1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1 | c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7 | refs/heads/main | 2023-08-21T15:28:04.447838 | 2023-08-21T00:49:36 | 2023-08-21T00:49:36 | 200,722,670 | 7,957 | 1,986 | Apache-2.0 | 2023-09-14T18:49:54 | 2019-08-05T20:16:42 | Python | UTF-8 | Python | false | false | 610 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| [
"[email protected]"
] | |
b425bac67bd61149de100ef3f07563c01ca8f0b6 | e0fbc96bec9e83bc3fc3482e432bd2c6b6ad05a6 | /MRPT/vtz/monoxides/FeO/mrpt.py | 9647bda9cf436383cf636a27b17a5e16e9757669 | [
"MIT"
] | permissive | mussard/share_data_benchmark | fe2cbd95879e069be2475d39b191de4f04e140ee | c02bfa4017b9008800cabe47d7c7959f82c26060 | refs/heads/master | 2020-03-11T21:25:00.264437 | 2019-04-29T00:28:13 | 2019-04-29T00:28:13 | 130,264,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,958 | py | import json
from pyscf import gto,scf,mcscf, fci, lo, ci, cc
from pyscf.scf import ROHF, UHF,ROKS
import numpy as np
import pandas as pd
# THIS IS WERE IT STARTS ====================================
df=json.load(open("../../../trail.json"))
spins={'ScO':1, 'TiO':2, 'VO':3, 'CrO':4, 'MnO':5, 'FeO':4, 'CuO':1}
nd={'Sc':(1,0), 'Ti':(2,0), 'V':(3,0), 'Cr':(5,0), 'Mn':(5,0), 'Fe':(5,1), 'Cu':(5,4)}
cas={'Sc':3, 'Ti':4, 'V':5, 'Cr':6, 'Mn':7, 'Fe':8, 'Cu':11}
re={'ScO':1.668, 'TiO':1.623, 'VO':1.591, 'CrO':1.621, 'MnO':1.648, 'FeO':1.616, 'CuO':1.725}
datacsv={}
for nm in ['basis','charge','method','molecule','pseudopotential',
'totalenergy','totalenergy-stocherr','totalenergy-syserr']:
datacsv[nm]=[]
basis='vtz'
element='Fe'
mol=gto.Mole()
mol.ecp={}
mol.basis={}
for el in [element,'O']:
mol.ecp[el]=gto.basis.parse_ecp(df[el]['ecp'])
mol.basis[el]=gto.basis.parse(df[el][basis])
mol.charge=0
mol.spin=spins[element+'O']
mol.build(atom="%s 0. 0. 0.; O 0. 0. %g"%(element,re[element+'O']),verbose=4)
m=ROHF(mol)
m.level_shift=1000.0
dm=m.from_chk("../../../../HF/monoxides/"+element+basis+"0.chk")
hf=m.kernel(dm)
m.analyze()
from pyscf.shciscf import shci
mc = shci.SHCISCF(m, 9, 4+cas[element])
#mc.fcisolver.conv_tol = 1e-14
mc.fcisolver.mpiprefix="srun -n20"
mc.fcisolver.num_thrds=12
mc.verbose = 4
cas=mc.kernel()[0]
from pyscf.icmpspt import icmpspt
pt=icmpspt.icmpspt(mc,rdmM=500, PTM=1000,\
pttype="MRLCC",\
third_order=True,\
fully_ic=True,\
do_dm4=True)
datacsv['basis'].append(basis)
datacsv['charge'].append(0)
datacsv['method'].append('MRPT')
datacsv['molecule'].append(element)
datacsv['pseudopotential'].append('trail')
datacsv['totalenergy'].append(cas+pt)
datacsv['totalenergy-stocherr'].append(0.0)
datacsv['totalenergy-syserr'].append(0.0)
pd.DataFrame(datacsv).to_csv(element+".csv",index=False)
| [
"[email protected]"
] | |
d4875149298a8f42184376266bdf807f665ffb6b | 19dedf819f54bf905b2f68053ea75a654578b69e | /manimlib/mobject/number_line.py | 40577194a33b901704314043b50494077895ad7f | [
"MIT"
] | permissive | gear/ganim | 784eb88cdbc7e0dfdd1123344bb5c73a170d1a56 | 6a84bbc37580b79de28fe3f25c314f5f828d9705 | refs/heads/master | 2022-11-29T23:39:45.363480 | 2019-12-20T07:46:23 | 2019-12-20T07:46:23 | 229,211,899 | 0 | 1 | MIT | 2022-11-22T02:58:38 | 2019-12-20T07:20:07 | Python | UTF-8 | Python | false | false | 6,218 | py | import operator as op
from manimlib.constants import *
from manimlib.mobject.geometry import Line
from manimlib.mobject.numbers import DecimalNumber
from manimlib.mobject.types.vectorized_mobject import VGroup
from manimlib.utils.bezier import interpolate
from manimlib.utils.config_ops import digest_config
from manimlib.utils.config_ops import merge_dicts_recursively
from manimlib.utils.simple_functions import fdiv
from manimlib.utils.space_ops import normalize
class NumberLine(Line):
CONFIG = {
"color": LIGHT_GREY,
"x_min": -FRAME_X_RADIUS,
"x_max": FRAME_X_RADIUS,
"unit_size": 1,
"include_ticks": True,
"tick_size": 0.1,
"tick_frequency": 1,
# Defaults to value near x_min s.t. 0 is a tick
# TODO, rename this
"leftmost_tick": None,
# Change name
"numbers_with_elongated_ticks": [0],
"include_numbers": False,
"numbers_to_show": None,
"longer_tick_multiple": 2,
"number_at_center": 0,
"number_scale_val": 0.75,
"label_direction": DOWN,
"line_to_number_buff": MED_SMALL_BUFF,
"include_tip": False,
"tip_width": 0.25,
"tip_height": 0.25,
"decimal_number_config": {
"num_decimal_places": 0,
},
"exclude_zero_from_default_numbers": False,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
start = self.unit_size * self.x_min * RIGHT
end = self.unit_size * self.x_max * RIGHT
Line.__init__(self, start, end, **kwargs)
self.shift(-self.number_to_point(self.number_at_center))
self.init_leftmost_tick()
if self.include_tip:
self.add_tip()
if self.include_ticks:
self.add_tick_marks()
if self.include_numbers:
self.add_numbers()
def init_leftmost_tick(self):
if self.leftmost_tick is None:
self.leftmost_tick = op.mul(
self.tick_frequency,
np.ceil(self.x_min / self.tick_frequency)
)
def add_tick_marks(self):
tick_size = self.tick_size
self.tick_marks = VGroup(*[
self.get_tick(x, tick_size)
for x in self.get_tick_numbers()
])
big_tick_size = tick_size * self.longer_tick_multiple
self.big_tick_marks = VGroup(*[
self.get_tick(x, big_tick_size)
for x in self.numbers_with_elongated_ticks
])
self.add(
self.tick_marks,
self.big_tick_marks,
)
def get_tick(self, x, size=None):
if size is None:
size = self.tick_size
result = Line(size * DOWN, size * UP)
result.rotate(self.get_angle())
result.move_to(self.number_to_point(x))
result.match_style(self)
return result
def get_tick_marks(self):
return VGroup(
*self.tick_marks,
*self.big_tick_marks,
)
def get_tick_numbers(self):
u = -1 if self.include_tip else 1
return np.arange(
self.leftmost_tick,
self.x_max + u * self.tick_frequency / 2,
self.tick_frequency
)
def number_to_point(self, number):
alpha = float(number - self.x_min) / (self.x_max - self.x_min)
return interpolate(
self.get_start(), self.get_end(), alpha
)
def point_to_number(self, point):
start_point, end_point = self.get_start_and_end()
full_vect = end_point - start_point
unit_vect = normalize(full_vect)
def distance_from_start(p):
return np.dot(p - start_point, unit_vect)
proportion = fdiv(
distance_from_start(point),
distance_from_start(end_point)
)
return interpolate(self.x_min, self.x_max, proportion)
def n2p(self, number):
"""Abbreviation for number_to_point"""
return self.number_to_point(number)
def p2n(self, point):
"""Abbreviation for point_to_number"""
return self.point_to_number(point)
def get_unit_size(self):
return (self.x_max - self.x_min) / self.get_length()
def default_numbers_to_display(self):
if self.numbers_to_show is not None:
return self.numbers_to_show
numbers = np.arange(
np.floor(self.leftmost_tick),
np.ceil(self.x_max),
)
if self.exclude_zero_from_default_numbers:
numbers = numbers[numbers != 0]
return numbers
def get_number_mobject(self, number,
number_config=None,
scale_val=None,
direction=None,
buff=None):
number_config = merge_dicts_recursively(
self.decimal_number_config,
number_config or {},
)
if scale_val is None:
scale_val = self.number_scale_val
if direction is None:
direction = self.label_direction
buff = buff or self.line_to_number_buff
num_mob = DecimalNumber(number, **number_config)
num_mob.scale(scale_val)
num_mob.next_to(
self.number_to_point(number),
direction=direction,
buff=buff
)
return num_mob
def get_number_mobjects(self, *numbers, **kwargs):
if len(numbers) == 0:
numbers = self.default_numbers_to_display()
return VGroup(*[
self.get_number_mobject(number, **kwargs)
for number in numbers
])
def get_labels(self):
return self.get_number_mobjects()
def add_numbers(self, *numbers, **kwargs):
self.numbers = self.get_number_mobjects(
*numbers, **kwargs
)
self.add(self.numbers)
return self
class UnitInterval(NumberLine):
CONFIG = {
"x_min": 0,
"x_max": 1,
"unit_size": 6,
"tick_frequency": 0.1,
"numbers_with_elongated_ticks": [0, 1],
"number_at_center": 0.5,
"decimal_number_config": {
"num_decimal_places": 1,
}
}
| [
"[email protected]"
] | |
38e4789e280a8928eeebee517516c384dbfba205 | 0b41847069aa825496ba80bb2d776cdba7cf4bc1 | /src/face++.py | 5630b0edd20d049360ae037b5b6a7e1334fa5b73 | [] | no_license | josephding23/Facial | b30ba17cf8138b8b7631080983d770ce01339d67 | 9ce4332ec84a0d1edd8256014baacb57b38a432b | refs/heads/master | 2021-10-28T04:49:54.055836 | 2019-04-22T06:52:03 | 2019-04-22T06:52:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py | # -*- coding: utf-8 -*-
import urllib.request
import urllib.error
import time
import json
http_url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
key = "JiR6-u4F8zPb63yuIRi3x7q8V_3kGd0M"
secret = "L81Va5ynggdIrrs107c9h9WnqW7I1YDs"
filepath = r"E:/Fotografica/92414d2ccbe975e049634c084f0d6.jpg"
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'api_key')
data.append(key)
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'api_secret')
data.append(secret)
data.append('--%s' % boundary)
fr = open(filepath, 'rb')
data.append('Content-Disposition: form-data; name="%s"; filename=" "' % 'image_file')
data.append('Content-Type: %s\r\n' % 'application/octet-stream')
data.append(fr.read())
fr.close()
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'return_landmark')
data.append('1')
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'return_attributes')
data.append(
"gender,age,smiling,headpose,facequality,blur,eyestatus,emotion,ethnicity,beauty,mouthstatus,eyegaze,skinstatus")
data.append('--%s--\r\n' % boundary)
for i, d in enumerate(data):
if isinstance(d, str):
data[i] = d.encode('utf-8')
http_body = b'\r\n'.join(data)
# build http request
req = urllib.request.Request(url=http_url, data=http_body)
# header
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)
try:
# post data to server
resp = urllib.request.urlopen(req, timeout=5)
# get response
qrcont = resp.read()
# if you want to load as json, you should decode first,
# for example: json.loads(qrount.decode('utf-8'))
result = qrcont.decode('utf-8')
result_json = json.loads(result)
print(json.dumps(result_json, indent=2))
except urllib.error.HTTPError as e:
result = e.read().decode('utf-8')
print(json.dumps(result, indent=2)) | [
"[email protected]"
] | |
779966906bddd6b8136f5b50be49cdd1090534ee | b9f5bed67a7e2afefe3fd759c78eeed4d10881f6 | /django_app/introduction_to_models/migrations/0001_initial.py | bf3e3ef02e026969fdfab0bb88d03d964bd76890 | [] | no_license | jmnghn/Django-Documentation-Practice | 672c1cc9e37ffeff5f08029f45bf580de923422b | 2106881c415e6e39ba14dc5ba6aa9ac1430de327 | refs/heads/master | 2022-09-29T17:30:36.860197 | 2017-06-08T05:09:12 | 2017-06-08T05:09:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-05 18:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('shirt_size', models.CharField(max_length=1)),
],
),
]
| [
"[email protected]"
] | |
102e1c8facf01163849e905d9a46995b66eebf28 | c50d716910e4d51ebfc24ca8a50abe4842b63b5d | /train_tp_sort_wiki.py | a37081f7b92bac6d3cbbd87f54ed4657078c4ad5 | [
"MIT"
] | permissive | SkyrookieYu/manga_ordering | b504d1a78e26351adb7d00a1ab8a4929854cc4b7 | 8957e9efc0bed68636bc9898dda056963e214663 | refs/heads/main | 2023-03-22T14:12:16.399591 | 2021-03-17T09:17:40 | 2021-03-17T09:17:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | from transformers import Trainer, TrainingArguments
from data.dataloaders import PairwiseWikiData
from models.tpsortmodels import base_order_model
train_set = PairwiseWikiData("jawiki/japanese_wiki_paragraphs.json", divisions=10)
model = base_order_model()
training_args = TrainingArguments(
output_dir="ckpt",
num_train_epochs=3,
per_device_train_batch_size=40,
learning_rate=5e-6,
warmup_steps=500,
weight_decay=0.01,
logging_dir='./logs',
logging_steps=len(train_set) // 200 + 1,
save_steps=len(train_set) // 200 + 1,
save_total_limit=2,
dataloader_num_workers=4
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_set
)
trainer.train() | [
"[email protected]"
] | |
6d6e7b7b4ea0bb64f56f91ae028971b202e5b4a5 | eacfc1c0b2acd991ec2cc7021664d8e79c9e58f6 | /ccpnmr2.4/python/ccp/format/nmrStar/bmrb/File.py | f38ee8715476d4ac2bb74448b38a52e00f6a6ea3 | [] | no_license | edbrooksbank/ccpnmr2.4 | cfecb0896dcf8978d796e6327f7e05a3f233a921 | f279ca9bb2d972b1ce075dad5fcc16e6f4a9496c | refs/heads/master | 2021-06-30T22:29:44.043951 | 2019-03-20T15:01:09 | 2019-03-20T15:01:09 | 176,757,815 | 0 | 1 | null | 2020-07-24T14:40:26 | 2019-03-20T14:59:23 | HTML | UTF-8 | Python | false | false | 17,274 | py | """
Classes for dealing with STAR syntax
"""
__author__ = "$Author: jurgenfd $"
___revision__ = "$Revision: 1.8.4.2 $"
___date__ = "$Date: 2011-04-06 09:00:38 $"
"""
$Log: not supported by cvs2svn $
Revision 1.8.4.1 2010/02/11 12:44:33 wimvranken
Fix to deal with loop_ only chemical shift files - these are now modified on-the-fly in order to read them.
Revision 1.8 2007/07/09 12:22:27 wimvranken
Merge with branch 4.
Revision 1.7.4.1.4.2 2007/06/13 08:35:52 wimvranken
Removed copyright - is not CCPN
Revision 1.7.4.1.4.1 2006/10/25 10:57:09 wimvranken
Fixed problem that shows up in windows only (\ to continue end of line)
Revision 1.7.4.1 2006/07/21 17:29:01 wimvranken
Now only contain full path imports
Revision 1.7 2005/11/03 14:06:39 wimvranken
Replaced var text_lenght by text_length, added extra error message printout
Revision 1.6 2005/06/27 16:43:41 wb104
Updated licenses.
Revision 1.5 2004/12/15 17:57:20 tjs23
TJS: Updated licenses.
Revision 1.4 2003/08/08 12:45:54 wfv20
Changed preferred quote, added comment writing support
Revision 1.3 2003/07/14 09:34:32 wfv20
Modified so list references are not carried through class initialization
Revision 1.2 2003/07/10 16:13:31 wfv20
Changed to new universal setup
Revision 1.1 2003/07/01 12:56:18 wfv20
Jurgen Doreleijers modified Python nmrStar reader. Bug fixes and additions (notably for reading nmrView star files) have been made.
Revision 1.1.1.1 2001/11/02 20:16:40 jurgen
Initial package capable of read/write access to STAR files without nested loops
"""
## Standard modules
import os
## BMRB modules
#import ccp.format.nmrStar.bmrb as STAR # TODO: check
from ccp.format.nmrStar.bmrb.SaveFrame import * #@UnusedWildImport
from ccp.format.nmrStar.bmrb.TagTable import * #@UnusedWildImport
from ccp.format.nmrStar.bmrb.Text import * #@UnusedWildImport
from ccp.format.nmrStar.bmrb.Utils import * #@UnusedWildImport
## CCP stuff
from memops.universal.Io import getTopDirectory
if os.name != 'posix':
tempdir = None # The module tempfile will pick one
else:
tempdir = '/tmp'
# No changes required after this line
###############################################################################
"""
STAR file
Only methods for reading and writing are currently implemented.
datanodes is a list of possibly mixed saveframes and tagtables
"""
class File (Lister):
def __init__( self,
title = 'general_star_file_title',
filename = '',
datanodes = None,
flavor = None, # Call set_flavor when changing
preferred_quote = '"', # Put somewhere else?
verbosity = 2
):
self.title = title
self.filename = filename
if datanodes:
self.datanodes = datanodes
else:
self.datanodes = []
self.flavor = flavor
self.verbosity = verbosity
"Simple checks on integrity"
def check_integrity( self, recursive = 1 ):
if recursive:
for datanode in self.datanodes:
if datanode.check_integrity( recursive = 1):
print "ERROR: integrity check failed for Saveframe"
return 1
if self.verbosity >= 9:
print 'Checked integrity of File (%2s datanodes, recurs.=%s) : OK [%s]' % (
len(self.datanodes), recursive, self.title )
"Returns the STAR text representation"
def star_text(self, flavor = None):
if flavor == None:
flavor = self.flavor
str = 'data_%s\n' % self.title
# Data node objects can be of type SaveFrame OR TagTable only
# Data node object can now also contain comment information
# these comments are printed before the saveframe (Wim 2003/08/05)
for datanode in self.datanodes:
str = str + datanode.comment
str = str + datanode.star_text( flavor = flavor)
return str
"""
Reads a NMR-STAR formatted file using
the filename attribute.
Added option to pass text in straight away - this is for handling crappy loop_ only files so
I can fix them in memory (Wim 11/02/10).
"""
def read (self, strip_comments=1, nmrView_type = 0, text = ""):
if not text:
if not self.filename:
print 'ERROR: no filename in STARFile with title:', self.title
return 1
text = open(self.filename,'r').read()
if self.parse( text=text, strip_comments=strip_comments, nmrView_type = nmrView_type):
print "ERROR: couldn't parse file"
return 1
return 0
"""
- Parses text into save frames and tagtables.
- Input text should start at position given with non-white space character
- Appends a list of datanodes(save frames or tagtables)
"""
def parse (self, text='', strip_comments=1, nmrView_type = 0):
if self.verbosity > 1:
print 'Parsing STAR file:', self.filename
"""
'"Begin at the beginning," the King said, gravely,
"and go on till you come to the end; then stop."' (LC)
"""
## Collapse the semicolon block for ease of parsing
## Very expensive to do
## Timed at: xx seconds for a xx Mb file with xx semicolon blocks
text = semicolon_block_collapse( text )
## Now it's easy to strip comments
if strip_comments:
text = comments_strip( text )
## For nmrView 'nmrStar' also compress { } into {}
## Wim 05/03/2003
if nmrView_type:
text = nmrView_compress( text )
## TITLE
match_data_tag = re.search(r'\s*data_(\S+)\s+', text, 0 )
if not match_data_tag:
print "Warning: Found no 'data_title' string in file's text."
print "Warning: Your file is not valid NMR-STAR - to attempt reading"
print "Warning: this file a data_title tag was added automatically."
text = "data_autoTitle\n" + text
match_data_tag = re.search(r'\s*data_(\S+)\s+', text, 0 )
self.title = match_data_tag.group(1)
pos = match_data_tag.end()
## Four quick searches for possible continuations
next_sf_begin = None # SAVE FRAME BEGIN
next_sf_end = None # SAVE FRAME END
next_free_tt = None # FREE TAGTABLE
next_loop_tt = None # LOOP TAGTABLE
sf_open = None # When a saveframe is open
text_length = len(text)
## Only break when parsed to the eof
while pos < text_length:
if self.verbosity >= 9:
print 'Parse text from position:%s : [%s]' % (
pos, text[pos:pos+10] )
match_save_begin_nws = pattern_save_begin_nws.search(text,pos,pos+len('save_1'))
if match_save_begin_nws:
if match_save_begin_nws.start() == pos:
next_sf_begin = 1
if not (next_sf_begin):
match_save_end_nws = pattern_save_end_nws.search(text,pos,pos+len('save_ '))
if match_save_end_nws:
if match_save_end_nws.start() == pos:
next_sf_end = 1
if not (next_sf_begin or next_sf_end):
match_tag_name_nws = pattern_tag_name_nws.search(text,pos,pos+len(' _X'))
if match_tag_name_nws:
if match_tag_name_nws.start() == pos:
next_free_tt = 1
if not (next_sf_begin or next_sf_end or next_free_tt):
match_tagtable_loop_nws = pattern_tagtable_loop_nws.search(text,pos,pos+len('loop_ '))
if match_tagtable_loop_nws:
if match_tagtable_loop_nws.start() == pos:
next_loop_tt = 1
## Just checking
if not ( next_sf_begin or next_sf_end or next_free_tt or next_loop_tt ):
print 'ERROR: No new item found in data_nodes_parse.'
print 'Items looked for are a begin or end of a saveframe, or'
print 'a begin of a tagtable(free or looped).'
print
print "At text:"
print text[pos:pos+70]
print "Preceded by:"
print text[pos-200:pos]
return None
## SAVE FRAME BEGIN
if next_sf_begin:
if sf_open:
print "ERROR: Found the beginning of a saveframe but"
print "ERROR: saveframe before is still open(not closed;-)"
return None
match_save_begin = pattern_save_begin.search( text, pos )
if not match_save_begin:
print "ERROR: Code error (no second match on sf begin)";
return None
if match_save_begin.start() != pos:
print "ERROR: Code error (wrong second match on sf begin)";
return None
self.datanodes.append( SaveFrame( tagtables = [], # Need resetting
verbosity = self.verbosity ) )
self.datanodes[-1].title = match_save_begin.group(1)
sf_open = 1
next_sf_begin = None
pos = match_save_begin.end()
continue
## SAVE FRAME END
if next_sf_end:
if not sf_open:
print "ERROR: Found the end of a saveframe but"
print "ERROR: saveframe was not open"
return None
match_save_end = pattern_save_end.search( text, pos )
if not match_save_end:
print "ERROR: Code error (no second match on sf end)";
return None
if match_save_end.start() != pos:
print "ERROR: Code error (wrong second match on sf end)";
return None
sf_open = None
next_sf_end = None
pos = match_save_end.end()
continue
## FREE or LOOP TAGTABLE
if next_free_tt:
free = 1
next_free_tt = None
else: # next_loop_tt must be true as this was checked before
if not next_loop_tt:
print 'ERROR: code bug in File.parse()'
return None
free = None
next_loop_tt = None
match_tagtable_loop = pattern_tagtable_loop.search( text, pos )
if not match_tagtable_loop:
print 'ERROR: Code error, no second match on tagtable_loop'
return None
if match_tagtable_loop.start() != pos:
print "ERROR: Code error (wrong second match on tagtable_loop)"
return None
pos = match_tagtable_loop.end()
if sf_open:
dn = self.datanodes[-1].tagtables # Insert in last saveframes' tagtables
else:
dn = self.datanodes
dn.append(
TagTable( free = free,
tagnames = [],
tagvalues = [],
verbosity = verbosity ) )
tt = dn[-1] # Just to be verbose for the beloved reader
pos = tt.parse( text=text, pos=pos )
if pos == None:
print "ERROR: In parsing tagtable"
return None
if self.verbosity >=9:
print 'Parsed tagtable up to pos: [%s]' % pos
if self.verbosity >= 9:
print 'Parsed: [%s] datanodes (top level count only)' % len( self.datanodes )
if self.check_integrity( recursive = 0):
print "ERROR: integrity not ok"
return 1
# Save some memory
text = ''
return 0
"""
Writes the object to a STAR formatted file using
the filename attribute.
"""
def write (self):
if not self.filename:
print 'ERROR: no filename in STARFile with title:', self.title
return 1
open(self.filename,'w').write( self.star_text() )
if self.verbosity > 1:
print 'Written STAR file:', self.filename
"""
Tries to reformat a file on disk with the filename given in the
attribute of this object.
Running Steve Madings (BMRB) formatNMRSTAR program if available
NOTE: this does NOT do anything with the datanodes of this object!
"""
def formatNMRSTAR( self,
comment_file_str_dir = '/bmrb/lib',
):
if self.verbosity >= 9:
print "Attempting to reformat STAR file using external program if available"
if os.name != 'posix':
print "WARNING: No external program available on non-posix systems for reformatting STAR files"
return 1
## Try command and check for non-zero exit status
## Note that these commands are only valid on Unix
## Standard error is thrown on the bit bucket.
cmd = "%s < %s 2>/dev/null" % ('formatNMRSTAR', self.filename)
pipe = os.popen( cmd )
output = pipe.read()
## The program exit status is available by the following construct
## The status will be the exit number (in one of the bytes)
## unless the program executed successfully in which case it will
## be None.
status = pipe.close()
if self.verbosity >= 9:
print "Got status:", status
## Success
if ( status == None ):
try:
open(self.filename, 'w').write(output)
except IOError:
print 'ERROR: Could not open the file for writing', self.filename
return 1
if self.verbosity >= 9:
print "Reformatted STAR file:", self.filename
return 0
else:
if self.verbosity :
print "WARNING: Not pretty printing STAR file", self.filename
return 1
################################################################################
if __name__ == '__main__':
if 1:
# 0 is only errors, 1 is warnings as well, 2 is normal and 9 is debug
STAR.verbosity = 2
strf = File( verbosity=STAR.verbosity)
if os.name == 'posix':
pathname = '.'
else:
pathname = r'C:\Temp'
## filename = 'local/test_nopound.txt'
## filename = 'local/block_21921.txt'
topDir = getTopDirectory()
#filename = os.path.join(topDir, '../rawdata/8/1/1/info.general/bmr5106.str')
filename = os.path.join(topDir, '../reference/nmrStar/1b4c.restraints')
#filename = os.path.join(topDir, '../rawdata/2/2/1/info.general/bmr5307.str')
strf.filename = filename
## def myfunc():
if strf.read():
print "ERROR: In read. Exiting program"
# strf.filename
# strf.formatNMRSTAR: REFORMATS FILE! Don't use!!
# strf.attrnames: bulk output of everything in file
# strf.flavor: mmCIF, ... (in principle only nmrStar works?)
# strf.title: pdb code?
#
# strf.datanodes: list of saveframes
#
# .title: saveframe name
# .tagtables: list o/ebi/msd/nmrqual/rawdata/8/1/1/info.general/bmr5106.stf tags in saveframe
# .tagnames: list of tagnames
# .tagvalues: list of list of values for tagnames
# .free: if None, is a LOOP tagtable! If 1, is a list of simple tags
#
# time.time()
# strf.read()
# time.time()
## profile.run( 'myfunc()' )
## print "Exiting program (test done)"
## sys.exit(0)
## strf.flavor = 'mmCIF'
#strf.filename = strf.filename + '_new1.str'
#if strf.write():
# print "ERROR: In write. Exiting program"
| [
"[email protected]"
] | |
c4f5690140458bce0d536cfefba6636c3535c9ad | fb3439159a8bd88fd380b0e3fea435ddd4e6e45a | /doge/common/utils.py | 4bbfede249a45b70aa72dc2edbd57b2757e10776 | [
"Apache-2.0"
] | permissive | qyt2018/doge | 6ab3aca5424dfa258fef6ea0db0b5f451c0cd430 | 54ba17f9e997e468ab0d7af15e5ef7f45b19e3d6 | refs/heads/master | 2020-06-28T04:36:44.014286 | 2018-09-30T09:56:31 | 2018-09-30T09:56:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | # coding: utf-8
import time
from importlib import import_module
from gsocketpool.pool import Pool
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
raise ImportError(msg)
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name)
raise ImportError(msg)
def time_ns():
s, n = ("%.20f" % time.time()).split('.')
return int(s) * 1e9 + int(n[:9])
def str_to_host(s):
h, p = s.split(":")
return (str(h), int(p))
class ConnPool(Pool):
def _create_connection(self):
conn = self._factory(**self._options)
# conn.open()
return conn | [
"[email protected]"
] | |
36812d07aa4a93feac66304a289b2e735c02b1bc | ead692cceee6635fc3b11ea6da2069614883c4ef | /holocron/models/resnet.py | a1e0b25551b6b4f972b42f26d7f4a0ac22a549d2 | [
"MIT"
] | permissive | aiedward/Holocron | c6036c8d95efbc2f26a05d8f83f45fd38c22422e | 7206d08a9f5338fc3f08834fb64171fbd2195e1a | refs/heads/master | 2023-04-11T08:52:56.203299 | 2021-04-04T22:55:52 | 2021-04-04T22:55:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,651 | py | import sys
from collections import OrderedDict
import torch.nn as nn
from torch import Tensor
from holocron.nn import init, GlobalAvgPool2d
from .utils import conv_sequence, load_pretrained_params
from typing import Dict, Any, List, Optional, Callable, Union, Type
__all__ = ['BasicBlock', 'Bottleneck', 'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 'resnet50d']
default_cfgs: Dict[str, Dict[str, Any]] = {
'resnet18': {'block': 'BasicBlock', 'num_blocks': [2, 2, 2, 2],
'url': None},
'resnet34': {'block': 'BasicBlock', 'num_blocks': [3, 4, 6, 3],
'url': None},
'resnet50': {'block': 'Bottleneck', 'num_blocks': [3, 4, 6, 3],
'url': 'https://github.com/frgfm/Holocron/releases/download/v0.1.2/resnet50_256-5e6206e0.pth'},
'resnet101': {'block': 'Bottleneck', 'num_blocks': [3, 4, 23, 3],
'url': None},
'resnet152': {'block': 'Bottleneck', 'num_blocks': [3, 8, 86, 3],
'url': None},
'resnext50_32x4d': {'block': 'Bottleneck', 'num_blocks': [3, 4, 6, 3],
'url': None},
'resnext101_32x8d': {'block': 'Bottleneck', 'num_blocks': [3, 4, 23, 3],
'url': None},
'resnet50d': {'block': 'Bottleneck', 'num_blocks': [3, 4, 6, 3],
'url': 'https://github.com/frgfm/Holocron/releases/download/v0.1.2/resnet50d_224-499c0b54.pth'},
}
class _ResBlock(nn.Module):
expansion: int = 1
def __init__(
self,
convs: List[nn.Module],
downsample: Optional[nn.Module] = None,
act_layer: Optional[nn.Module] = None
) -> None:
super().__init__()
# Main branch
self.conv = nn.Sequential(*convs)
# Shortcut connection
self.downsample = downsample
if isinstance(act_layer, nn.Module):
self.activation = act_layer
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv(x)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if hasattr(self, 'activation'):
out = self.activation(out)
return out
class BasicBlock(_ResBlock):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
act_layer: Optional[nn.Module] = None,
norm_layer: Optional[Callable[[int], nn.Module]] = None,
drop_layer: Optional[Callable[..., nn.Module]] = None,
conv_layer: Optional[Callable[..., nn.Module]] = None,
**kwargs: Any
) -> None:
super().__init__(
[*conv_sequence(inplanes, planes, act_layer, norm_layer, drop_layer, conv_layer, kernel_size=3,
stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation, **kwargs),
*conv_sequence(planes, planes, None, norm_layer, drop_layer, conv_layer, kernel_size=3,
stride=1, padding=dilation, groups=groups, bias=False, dilation=dilation, **kwargs)],
downsample, act_layer)
class Bottleneck(_ResBlock):
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
act_layer: Optional[nn.Module] = None,
norm_layer: Optional[Callable[[int], nn.Module]] = None,
drop_layer: Optional[Callable[..., nn.Module]] = None,
conv_layer: Optional[Callable[..., nn.Module]] = None,
**kwargs: Any
) -> None:
width = int(planes * (base_width / 64.)) * groups
super().__init__(
[*conv_sequence(inplanes, width, act_layer, norm_layer, drop_layer, conv_layer, kernel_size=1,
stride=1, bias=False, **kwargs),
*conv_sequence(width, width, act_layer, norm_layer, drop_layer, conv_layer, kernel_size=3,
stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation, **kwargs),
*conv_sequence(width, planes * self.expansion, None, norm_layer, drop_layer, conv_layer, kernel_size=1,
stride=1, bias=False, **kwargs)],
downsample, act_layer)
class ChannelRepeat(nn.Module):
def __init__(self, chan_repeats: int = 1) -> None:
super().__init__()
self.chan_repeats = chan_repeats
def forward(self, x: Tensor) -> Tensor:
repeats = [1] * x.ndim # type: ignore[attr-defined]
# Repeat the tensor along the channel dimension
repeats[1] = self.chan_repeats
return x.repeat(*repeats)
class ResNet(nn.Sequential):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
num_blocks: List[int],
planes: List[int],
num_classes: int = 10,
in_channels: int = 3,
zero_init_residual: bool = False,
width_per_group: int = 64,
conv_layer: Optional[Callable[..., nn.Module]] = None,
act_layer: Optional[nn.Module] = None,
norm_layer: Optional[Callable[[int], nn.Module]] = None,
drop_layer: Optional[Callable[..., nn.Module]] = None,
deep_stem: bool = False,
stem_pool: bool = True,
avg_downsample: bool = False,
num_repeats: int = 1,
block_args: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None
) -> None:
if conv_layer is None:
conv_layer = nn.Conv2d
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if act_layer is None:
act_layer = nn.ReLU(inplace=True)
self.dilation = 1
in_planes = 64
# Deep stem from ResNet-C
if deep_stem:
_layers = [*conv_sequence(in_channels, in_planes // 2, act_layer, norm_layer, drop_layer, conv_layer,
kernel_size=3, stride=2, padding=1, bias=False),
*conv_sequence(in_planes // 2, in_planes // 2, act_layer, norm_layer, drop_layer, conv_layer,
kernel_size=3, stride=1, padding=1, bias=False),
*conv_sequence(in_planes // 2, in_planes, act_layer, norm_layer, drop_layer, conv_layer,
kernel_size=3, stride=1, padding=1, bias=False)]
else:
_layers = conv_sequence(in_channels, in_planes, act_layer, norm_layer, drop_layer, conv_layer,
kernel_size=7, stride=2, padding=3, bias=False)
if stem_pool:
_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# Optional tensor repetitions along channel axis (mainly for TridentNet)
if num_repeats > 1:
_layers.append(ChannelRepeat(num_repeats))
# Consecutive convolutional blocks
stride = 1
# Block args
if block_args is None:
block_args = dict(groups=1)
if not isinstance(block_args, list):
block_args = [block_args] * len(num_blocks)
for _num_blocks, _planes, _block_args in zip(num_blocks, planes, block_args):
_layers.append(self._make_layer(block, _num_blocks, in_planes, _planes, stride, width_per_group,
act_layer=act_layer, norm_layer=norm_layer, drop_layer=drop_layer,
avg_downsample=avg_downsample, num_repeats=num_repeats,
block_args=_block_args))
in_planes = block.expansion * _planes
stride = 2
super().__init__(OrderedDict([
('features', nn.Sequential(*_layers)),
('pool', GlobalAvgPool2d(flatten=True)),
('head', nn.Linear(num_repeats * in_planes, num_classes))]))
# Init all layers
init.init_module(self, nonlinearity='relu')
# Init shortcut
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
m.convs[2][1].weight.data.zero_() # type: ignore[index, union-attr]
elif isinstance(m, BasicBlock):
m.convs[1][1].weight.data.zero_() # type: ignore[index, union-attr]
@staticmethod
def _make_layer(
block: Type[Union[BasicBlock, Bottleneck]],
num_blocks: int,
in_planes: int,
planes: int,
stride: int = 1,
width_per_group: int = 64,
act_layer: Optional[nn.Module] = None,
norm_layer: Optional[Callable[[int], nn.Module]] = None,
drop_layer: Optional[Callable[..., nn.Module]] = None,
conv_layer: Optional[Callable[..., nn.Module]] = None,
avg_downsample: bool = False,
num_repeats: int = 1,
block_args: Optional[Dict[str, Any]] = None
) -> nn.Sequential:
downsample = None
if stride != 1 or in_planes != planes * block.expansion:
# Downsampling from ResNet-D
if avg_downsample:
downsample = nn.Sequential(nn.AvgPool2d(stride, ceil_mode=True, count_include_pad=False),
*conv_sequence(num_repeats * in_planes,
num_repeats * planes * block.expansion,
None, norm_layer, drop_layer, conv_layer,
kernel_size=1, stride=1, bias=False))
else:
downsample = nn.Sequential(*conv_sequence(num_repeats * in_planes,
num_repeats * planes * block.expansion,
None, norm_layer, drop_layer, conv_layer,
kernel_size=1, stride=stride, bias=False))
if block_args is None:
block_args = {}
layers = [block(in_planes, planes, stride, downsample, base_width=width_per_group,
act_layer=act_layer, norm_layer=norm_layer, drop_layer=drop_layer, **block_args)]
for _ in range(num_blocks - 1):
layers.append(block(block.expansion * planes, planes, 1, None, base_width=width_per_group,
act_layer=act_layer, norm_layer=norm_layer, drop_layer=drop_layer, **block_args))
return nn.Sequential(*layers)
def _resnet(arch: str, pretrained: bool, progress: bool, **kwargs: Any) -> ResNet:
#ย Retrieve the correct block type
block = sys.modules[__name__].__dict__[default_cfgs[arch]['block']]
# Build the model
model = ResNet(block, default_cfgs[arch]['num_blocks'], [64, 128, 256, 512], **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]['url'], progress)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-18 from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
return _resnet('resnet18', pretrained, progress, **kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-34 from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
return _resnet('resnet34', pretrained, progress, **kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-50 from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
return _resnet('resnet50', pretrained, progress, **kwargs)
def resnet50d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-50-D from
`"Bag of Tricks for Image Classification with Convolutional Neural Networks"
<https://arxiv.org/pdf/1812.01187.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
return _resnet('resnet50d', pretrained, progress, deep_stem=True, avg_downsample=True, **kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-101 from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
return _resnet('resnet101', pretrained, progress, **kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-152 from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
return _resnet('resnet152', pretrained, progress, **kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNeXt-50 from
`"Aggregated Residual Transformations for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
kwargs['width_per_group'] = 4
block_args = dict(groups=32)
return _resnet('resnext50_32x4d', pretrained, progress, block_args=block_args, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNeXt-101 from
`"Aggregated Residual Transformations for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
kwargs['width_per_group'] = 8
block_args = dict(groups=32)
return _resnet('resnext101_32x8d', pretrained, progress, block_args=block_args, **kwargs)
| [
"[email protected]"
] | |
de06ca34f13b2eff76f8484c8cfac851d34f872f | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-core/Examples/Scripts/signal-demo.py | afb61eef5d6ca835e627d2ec0eb4ac0224ec8008 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | #!/usr/bin/python
from PyObjCTools import Signals
Signals.dumpStackOnFatalSignal()
import os
import signal
## all this does is set up an interesting stack to
## to show that a backtrace really is being
## generated. Try commenting out the
## Signals.dumpStackOnFatalSignal() line above and run
## the script again.
def badness():
os.kill(os.getpid(), signal.SIGQUIT)
class Foo:
def baz(self):
badness()
def bar(self):
self.baz()
Foo().bar()
| [
"[email protected]"
] | |
c63083f09d069c34cd302857d6198be2df11afbb | ec5ec1bcfb3f82048fd2703eed6e6fbbcd352bb1 | /add_digits.py | caac8cd0f8e2fffc17fe8a9f08b53af07dcbde30 | [] | no_license | claytonjwong/Sandbox-Python | 5e9da26374b178831ca60beaabfbdd2f423032a2 | b155895c90169ec97372b2517f556fd50deac2bc | refs/heads/master | 2021-06-21T15:37:47.918514 | 2017-08-06T23:58:15 | 2017-08-06T23:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | """
258. Add Digits
Given a non-negative integer num, repeatedly add all its digits until the result has only one digit.
For example:
Given num = 38, the process is like: 3 + 8 = 11, 1 + 1 = 2. Since 2 has only one digit, return it.
"""
class Solution(object):
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
sum = 0
while True:
#
# add up each decimal position from right-to-left
#
sum += num % 10
num //= 10
#
# when there are no decimal positions left,
# see if the sum is greater than a single decimal digit
# if so, then reset num to sum and reset sum to 0
#
# return sum when there are no decimal positions left
# and sum is a single decimal digit
#
if num == 0 and sum >= 10:
num = sum
sum = 0
elif num == 0 and sum < 10:
break
return sum
def main():
solution = Solution()
# import pdb
# pdb.set_trace()
print ( str ( " 2 == " + str ( solution.addDigits(38) )) )
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
5a0895c4e62c422e5190c377fea47fb15b49962b | e3b5e20bcb560a3c37c09f728b9340b1715c1818 | /venv/chartsHelper.py | 6e45eeba0e71efe34c8f587f1e3b0a11ef91c189 | [
"MIT"
] | permissive | 180Studios/LoginApp | 63bc50b1f91e7221c7581627ab166eeb01758f5c | 66ff684a81b23d8f45eef2c56be19a2afd95ab29 | refs/heads/master | 2022-12-24T00:33:08.481826 | 2020-02-03T05:14:41 | 2020-02-03T05:14:41 | 144,414,562 | 0 | 1 | MIT | 2022-12-08T01:38:26 | 2018-08-11T19:57:44 | Python | UTF-8 | Python | false | false | 5,043 | py | from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
import numpy, pandas
from tinydb import *
from tinydb.operations import decrement
import re
from datetime import datetime
import config
# init_notebook_mode(connected=True)
# logfile = "log.json"
try:
logfile = "/Users/kylenahas/Desktop/180LoginV1/db/log.json"
except:
logfile = "./log.json"
def simple_chart():
trace0 = go.Bar(
x=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"],
y=[3, 1, 6, 3, 2, 5],
name="Punchcard Members"
)
trace1 = go.Bar(
x=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"],
y=[2, 3, 4, 1, 5, 1],
name="Monthly Members"
)
data = [trace0, trace1]
layout = go.Layout(
barmode='stack'
)
fig = go.Figure(data=data, layout=layout)
return fig
class chartsHelper:
def __init__(self, log=None):
if not log:
self.log = logfile
try:
self.log = log
self.logDB = TinyDB(self.log)
except FileNotFoundError as e:
print("Invalid logfile")
# self.get_entries_of_member_type()
def get_entries_of_member_type(self):
member_query = Query()
print(self.logDB.all())
def calculate_attendence(self, start_date=None, period=None):
# zeros = numpy.zeros((7,), dtype=int)
# self.attendance = {}
# # self.attendance = {k:numpy.zeros((7,), dtype=int) for k in config.member_types.keys()} # https://stackoverflow.com/a/483833
# for member_type_k in config.member_types.keys():
# # print(member_type_k)
# self.attendance.update({member_type_k: zeros})
self.attendance = { "punchcard": [0, 0, 0, 0, 0, 0, 0, 0],
"monthly": [0, 0, 0, 0, 0, 0, 0, 0],
"annual": [0, 0, 0, 0, 0, 0, 0, 0],
"student": [0, 0, 0, 0, 0, 0, 0, 0],
"student_annual": [0, 0, 0, 0, 0, 0, 0, 0],
"volunteer": [0, 0, 0, 0, 0, 0, 0, 0],
"trial": [0, 0, 0, 0, 0, 0, 0, 0],
"organization": [0, 0, 0, 0, 0, 0, 0, 0] }
for entry in self.logDB:
# print(entry)
dt = datetime.strptime(entry['log_time'], '%Y-%m-%d %H:%M:%S.%f')
wd = dt.weekday()
member_type_str = "punchcard"
try:
member_type_str = entry['member_type_str']
except:
pass
self.attendance[member_type_str][wd] += 1
return self.attendance
def create_attendence_chart(self):
trace0 = go.Bar(
x=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"],
y=self.attendance['punchcard'],
name="Punchcard Members"
)
trace1 = go.Bar(
x=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"],
y=self.attendance['monthly'],
name="Monthly Members"
)
trace2 = go.Bar(
x=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"],
y=self.attendance['annual'],
name="Annual Members"
)
trace3 = go.Bar(
x=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"],
y=self.attendance['student'],
name="Student Members"
)
trace4 = go.Bar(
x=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"],
y=self.attendance['volunteer'],
name="Volunteer Members"
)
trace5 = go.Bar(
x=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"],
y=self.attendance['organization'],
name="Organization Members"
)
data = [trace0, trace1, trace2, trace3, trace4, trace5]
layout = go.Layout(
barmode='stack'
)
fig = go.Figure(data=data, layout=layout)
return fig
def panda_tests(self):
# print(self.logDB.all())
pandas.set_option('display.max_colwidth', -1)
pandas.set_option('display.max_columns', None)
df = pandas.DataFrame(self.logDB.all())
# for entry in self.logDB:
# df.append(entry, ignore_index=True)
# pd = pandas.read_json(self.logDB.all(), orient='index')
df['log_time'] = pandas.to_datetime(df['log_time'])
df['weekday'] = df['log_time'].apply(lambda x: x.isoweekday())
df.set_index("log_time", inplace=True)
print(df.columns)
print(df.head(10))
print(df.groupby("id").count())
if __name__ == '__main__':
ch = chartsHelper(log="/Users/kylenahas/Desktop/180LoginV1/db/log-mar19.json")
# ch.calculate_attendence()
# plot(ch.create_attendence_chart())
ch.panda_tests() | [
"[email protected]"
] | |
780bb620af4b7428a5557874d2bdfa66ea855a23 | f159aeec3408fe36a9376c50ebb42a9174d89959 | /155.Min-Stack.py | b754b734b48a4ed9bde782e3396b70e1bcdc3b49 | [
"MIT"
] | permissive | mickey0524/leetcode | 83b2d11ab226fad5da7198bb37eeedcd8d17635a | fc5b1744af7be93f4dd01d6ad58d2bd12f7ed33f | refs/heads/master | 2023-09-04T00:01:13.138858 | 2023-08-27T07:43:53 | 2023-08-27T07:43:53 | 140,945,128 | 27 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | # https://leetcode.com/problems/min-stack/
#
# algorithms
# Easy (34.63%)
# Total Accepted: 248,646
# Total Submissions: 718,028
from collections import deque
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.data_stack = deque()
self.min_stack = deque()
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.data_stack.append(x)
if len(self.min_stack) == 0:
self.min_stack.append(x)
elif x < self.min_stack[-1]:
self.min_stack.append(x)
else:
self.min_stack.append(self.min_stack[-1])
def pop(self):
"""
:rtype: void
"""
self.data_stack.pop()
self.min_stack.pop()
def top(self):
"""
:rtype: int
"""
return self.data_stack[-1]
def getMin(self):
"""
:rtype: int
"""
return self.min_stack[-1]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin() | [
"[email protected]"
] | |
0af0dc31c6a079c69e9a0f69496bf6df0961e7c6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03340/s906430612.py | 4d68664a5cd6e13092a6416dfbf996907ce79293 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | n = int(input())
A = list(map(int, input().split()))
xsum = A[0]
asum = A[0]
ans = 0
left, right = 0, 0
while True:
if xsum == asum:
ans += right - left + 1
right += 1
if right == n:
break
asum += A[right]
xsum ^= A[right]
else:
asum -= A[left]
xsum ^= A[left]
left += 1
print(ans)
| [
"[email protected]"
] | |
36ee40b2872c91989b80744c751f87cf778217c5 | 1617bd9db97c0989679ea3fe8ab25506332443bf | /runserver.py | de6eb2856c62d5469ec80b592ab245ba4da84ea7 | [] | no_license | florije1988/flaskapp | 3da7ca00d36121148b1ebd3fe8417d796a474b91 | 7f5eed9c2c1e3c6d2eeb6f1b457770106b2bf254 | refs/heads/master | 2021-01-25T12:14:14.984931 | 2015-09-04T07:29:45 | 2015-09-04T07:29:45 | 41,535,709 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | # -*- coding: utf-8 -*-
__author__ = 'florije'
from intro_to_flask import app
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
a8cb9b7088c18e01114c6ddf432676df64f9ca0e | 7255a09821deba655309b74927091ac9ab8b6075 | /example/MCD_DA/segmentation/adapt_trainer_onestep.synthia.deeplabv2.lr1e-3.epoch20.poly.py | ac6415fdf38e55029e0766adcb7b4d2fdf57a1ef | [
"MIT"
] | permissive | summer1719/pytorchgo | 2814141d6fc0b5c3369d2b9d37e1140e410b25ec | 1ffd561a53d583ca4098297e585e786e472ddd1a | refs/heads/master | 2020-04-25T20:45:04.203802 | 2019-01-10T15:03:31 | 2019-01-10T15:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,715 | py | from __future__ import division
import os,shutil
import torch
from tqdm import tqdm
from PIL import Image
from tensorboard_logger import configure, log_value
from torch.autograd import Variable
from torch.utils import data
from torchvision.transforms import Compose, Normalize, ToTensor
from argmyparse import fix_img_shape_args
from datasets import ConcatDataset, get_dataset, check_src_tgt_ok
from loss import get_prob_distance_criterion
from models.model_util import get_models, get_optimizer
from transform import ToLabel, Scale, RandomSizedCrop, RandomHorizontalFlip, RandomRotation
from util import mkdir_if_not_exist, save_dic_to_json, check_if_done
import argparse
from pytorchgo.utils import logger
from pytorchgo.loss import CrossEntropyLoss2d_Seg
import numpy as np
cityscapes_image_shape = (2048, 1024)
is_debug = 0
# from visualize import LinePlotter
# set_debugger_org_frc() #TODO interesting tool
#parser = get_da_mcd_training_parser()
torch.backends.cudnn.benchmark=True
parser = argparse.ArgumentParser(description='PyTorch Segmentation Adaptation')
parser.add_argument('--savename', type=str, default="normal", help="save name(Do NOT use '-')")
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs to train (default: 10)')
# ---------- Define Network ---------- #
parser.add_argument('--net', type=str, default="deeplabv2", help="network structure",
choices=['fcnresnet', 'psp', 'segnet', 'fcnvgg',
"drn_c_26", "drn_c_42", "drn_c_58", "drn_d_22",
"drn_d_38", "drn_d_54", "drn_d_105"])
parser.add_argument('--res', type=str, default='50', metavar="ResnetLayerNum",
choices=["18", "34", "50", "101", "152"], help='which resnet 18,50,101,152')
parser.add_argument("--is_data_parallel", action="store_true",
help='whether you use torch.nn.DataParallel')
# ---------- Hyperparameters ---------- #
parser.add_argument('--opt', type=str, default="sgd", choices=['sgd', 'adam'],
help="network optimizer")
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate (default: 0.001)')
parser.add_argument("--adjust_lr", default=True,
help='whether you change lr')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum sgd (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=2e-5,
help='weight_decay (default: 2e-5)')
parser.add_argument('--batch_size', type=int, default=1,
help="batch_size")
# ---------- Optional Hyperparameters ---------- #
parser.add_argument('--augment', type=bool ,default=False,
help='whether you use data-augmentation or not')
# ---------- Input Image Setting ---------- #
parser.add_argument("--input_ch", type=int, default=3,
choices=[1, 3, 4])
parser.add_argument('--train_img_shape', default=(1024, 512), nargs=2, metavar=("W", "H"),
help="W H")
# ---------- Whether to Resume ---------- #
parser.add_argument("--resume", type=str, default=None, metavar="PTH.TAR",
help="model(pth) path")
parser.add_argument('--src_dataset', type=str, default='synthia', choices=["gta", "city", "city16", "synthia"])
parser.add_argument('--tgt_dataset', type=str, default='city16', choices=["gta", "city", "city16", "synthia"])
parser.add_argument('--src_split', type=str, default='train',
help="which split('train' or 'trainval' or 'val' or something else) is used ")
parser.add_argument('--tgt_split', type=str, default='train',
help="which split('train' or 'trainval' or 'val' or something else) is used ")
parser.add_argument('--method', type=str, default="MCD", help="Method Name")
parser.add_argument('--num_k', type=int, default=4,
help='how many steps to repeat the generator update')
parser.add_argument("--num_multiply_d_loss", type=int, default=1)
parser.add_argument('--d_loss', type=str, default="diff",
choices=['mysymkl', 'symkl', 'diff'],
help="choose from ['mysymkl', 'symkl', 'diff']")
parser.add_argument('--uses_one_classifier', default=False,
help="adversarial dropout regularization")
parser.add_argument('--gpu', type=str,default='5',
help="")
parser.add_argument("--n_class", type=int, default=16)
parser.add_argument("--use_f2", type=bool, default=True)
logger.auto_set_dir()
args = parser.parse_args()
#args = add_additional_params_to_args(args)
args = fix_img_shape_args(args)
check_src_tgt_ok(args.src_dataset, args.tgt_dataset)
weight = torch.ones(args.n_class)
from pytorchgo.utils.pytorch_utils import set_gpu
set_gpu(args.gpu)
args.start_epoch = 0
resume_flg = True if args.resume else False
start_epoch = 0
if args.resume:
logger.info("=> loading checkpoint '{}'".format(args.resume))
if not os.path.exists(args.resume):
raise OSError("%s does not exist!" % args.resume)
indir, infn = os.path.split(args.resume)
old_savename = args.savename
args.savename = infn.split("-")[0]
logger.info("savename is %s (original savename %s was overwritten)" % (args.savename, old_savename))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint["epoch"]
args = checkpoint['args']
# -------------------------------------- #
model_g, model_f1, model_f2 = get_models(net_name=args.net, res=args.res, input_ch=args.input_ch,
n_class=args.n_class, method=args.method,
is_data_parallel=args.is_data_parallel)
optimizer_g = get_optimizer(model_g.parameters(), lr=args.lr, momentum=args.momentum, opt=args.opt,
weight_decay=args.weight_decay)
optimizer_f = get_optimizer(list(model_f1.parameters()) + list(model_f2.parameters()), lr=args.lr, opt=args.opt,
momentum=args.momentum, weight_decay=args.weight_decay)
model_g.load_state_dict(checkpoint['g_state_dict'])
model_f1.load_state_dict(checkpoint['f1_state_dict'])
if not args.uses_one_classifier:
model_f2.load_state_dict(checkpoint['f2_state_dict'])
optimizer_g.load_state_dict(checkpoint['optimizer_g'])
optimizer_f.load_state_dict(checkpoint['optimizer_f'])
logger.info("=> loaded checkpoint '{}'".format(args.resume))
else:
model_g, model_f1, model_f2 = get_models(net_name=args.net, res=args.res, input_ch=args.input_ch,
n_class=args.n_class,
method=args.method, uses_one_classifier=args.uses_one_classifier,
is_data_parallel=args.is_data_parallel)
optimizer_g = get_optimizer(model_g.parameters(), lr=args.lr, momentum=args.momentum, opt=args.opt,
weight_decay=args.weight_decay)
optimizer_f = get_optimizer(list(model_f1.parameters()) + list(model_f2.parameters()), opt=args.opt,
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.uses_one_classifier:
logger.warn ("f1 and f2 are same!")
model_f2 = model_f1
from pytorchgo.utils.pytorch_utils import model_summary,optimizer_summary
model_summary([model_g, model_f1, model_f2])
optimizer_summary([optimizer_g,optimizer_f])
mode = "%s-%s2%s-%s_%sch" % (args.src_dataset, args.src_split, args.tgt_dataset, args.tgt_split, args.input_ch)
if args.net in ["fcn", "psp"]:
model_name = "%s-%s-%s-res%s" % (args.method, args.savename, args.net, args.res)
else:
model_name = "%s-%s-%s" % (args.method, args.savename, args.net)
outdir = os.path.join(logger.get_logger_dir(), mode)
# Create Model Dir
pth_dir = os.path.join(outdir, "pth")
mkdir_if_not_exist(pth_dir)
# Create Model Dir and Set TF-Logger
tflog_dir = os.path.join(outdir, "tflog", model_name)
mkdir_if_not_exist(tflog_dir)
configure(tflog_dir, flush_secs=5)
# Save param dic
if resume_flg:
json_fn = os.path.join(args.outdir, "param-%s_resume.json" % model_name)
else:
json_fn = os.path.join(outdir, "param-%s.json" % model_name)
check_if_done(json_fn)
save_dic_to_json(args.__dict__, json_fn)
train_img_shape = tuple([int(x) for x in args.train_img_shape])
from pytorchgo.augmentation.segmentation import SubtractMeans, PIL2NP, RGB2BGR
logger.warn("if you choose deeplab or other weight file, please remember to change the image transform list...")
img_transform = Compose([#notice the order!!!
Scale(train_img_shape, Image.BILINEAR),
PIL2NP(),
RGB2BGR(),
SubtractMeans(),
ToTensor(),
#Normalize([.485, .456, .406], [.229, .224, .225]), must delete!!!
])
label_transform = Compose([
Scale(train_img_shape, Image.NEAREST),
ToLabel(),
])
val_img_transform = Compose([
Scale(train_img_shape, Image.BILINEAR),
PIL2NP(),
RGB2BGR(),
SubtractMeans(),
ToTensor(),
])
val_label_transform = Compose([Scale(cityscapes_image_shape, Image.NEAREST),# notice here, training, validation size difference, this is very tricky.
])
src_dataset = get_dataset(dataset_name=args.src_dataset, split=args.src_split, img_transform=img_transform,
label_transform=label_transform, test=False, input_ch=args.input_ch)
tgt_dataset = get_dataset(dataset_name=args.tgt_dataset, split=args.tgt_split, img_transform=img_transform,
label_transform=label_transform, test=False, input_ch=args.input_ch)
train_loader = torch.utils.data.DataLoader(
ConcatDataset(
src_dataset,
tgt_dataset
),
batch_size=args.batch_size, shuffle=True,
pin_memory=True)
if torch.cuda.is_available():
model_g.cuda()
model_f1.cuda()
model_f2.cuda()
weight = weight.cuda()
def get_validation_miou(model_g, model_f1, model_f2, quick_test=1e10):
if is_debug == 1:
quick_test = 2
logger.info("proceed test on cityscapes val set...")
model_g.eval()
model_f1.eval()
model_f2.eval()
target_loader = data.DataLoader(get_dataset(dataset_name="city16", split="val",
img_transform=val_img_transform,label_transform=val_label_transform, test=True, input_ch=3),
batch_size=1, pin_memory=True)
from tensorpack.utils.stats import MIoUStatistics
stat = MIoUStatistics(args.n_class)
interp = torch.nn.Upsample(size=(cityscapes_image_shape[1], cityscapes_image_shape[0]), mode='bilinear')
for index, (origin_imgs, labels, paths) in tqdm(enumerate(target_loader)):
if index > quick_test: break
path = paths[0]
imgs = Variable(origin_imgs.cuda(), volatile=True)
feature = model_g(imgs)
outputs = model_f1(feature)
if args.use_f2:
outputs += model_f2(feature)
pred = interp(outputs)[0, :].data.max(0)[1].cpu()
feed_predict = np.squeeze(np.uint8(pred.numpy()))
feed_label = np.squeeze(np.asarray(labels.numpy()))
stat.feed(feed_predict, feed_label)
logger.info("tensorpack IoU16: {}".format(stat.mIoU_beautify))
logger.info("tensorpack mIoU16: {}".format(stat.mIoU))
model_g.train()
model_f1.train()
model_f2.train()
return stat.mIoU
criterion_d = get_prob_distance_criterion(args.d_loss)
model_g.train()
model_f1.train()
model_f2.train()
best_mIoU = 0
for epoch in tqdm(range(start_epoch, args.epochs)):
d_loss_per_epoch = 0
c_loss_per_epoch = 0
from pytorchgo.utils.learning_rate import update_learning_rate_poly
cur_lr = update_learning_rate_poly(optimizer_g, args.lr, epoch, args.epochs)
cur_lr = update_learning_rate_poly(optimizer_f, args.lr, epoch, args.epochs)
for ind, batch_data in tqdm(enumerate(train_loader),total=len(train_loader), desc="epoch {}/{}".format(epoch, args.epochs)):
if is_debug ==1 and ind > 3:break
if is_debug==2 and ind > 200:break
source, target = batch_data
src_imgs, src_lbls = Variable(source[0]), Variable(source[1])
tgt_imgs = Variable(target[0])
if torch.cuda.is_available():
src_imgs, src_lbls, tgt_imgs = src_imgs.cuda(), src_lbls.cuda(), tgt_imgs.cuda()
# update generator and classifiers by source samples
optimizer_g.zero_grad()
optimizer_f.zero_grad()
loss = 0
d_loss = 0
outputs = model_g(src_imgs)
outputs1 = model_f1(outputs)
outputs2 = model_f2(outputs)
c_loss = CrossEntropyLoss2d_Seg(outputs1, src_lbls, class_num=args.n_class)
c_loss += CrossEntropyLoss2d_Seg(outputs2, src_lbls, class_num=args.n_class)
c_loss.backward(retain_graph=True)
####################
lambd = 1.0
model_f1.set_lambda(lambd)
model_f2.set_lambda(lambd)
outputs = model_g(tgt_imgs)
outputs1 = model_f1(outputs, reverse=True)
outputs2 = model_f2(outputs, reverse=True)
loss = - criterion_d(outputs1, outputs2)
loss.backward()
optimizer_f.step()
optimizer_g.step()
d_loss = -loss.data[0]
d_loss_per_epoch += d_loss
c_loss = c_loss.data[0]
c_loss_per_epoch += c_loss
if ind % 100 == 0:
logger.info("iter [%d/%d] DLoss: %.6f CLoss: %.4f LR: %.7f, best mIoU: %.5f" % (ind, len(train_loader), d_loss, c_loss, cur_lr, best_mIoU))
log_value('c_loss', c_loss_per_epoch, epoch)
log_value('d_loss', d_loss_per_epoch, epoch)
log_value('lr', cur_lr, epoch)
log_value('best_miou', best_mIoU, epoch)
logger.info("Epoch [%d/%d] DLoss: %.4f CLoss: %.4f LR: %.7f" % (
epoch, args.epochs, d_loss_per_epoch, c_loss_per_epoch, cur_lr))
cur_mIoU = get_validation_miou(model_g, model_f1, model_f2)
is_best = True if cur_mIoU > best_mIoU else False
checkpoint_fn = os.path.join(pth_dir, "current.pth.tar")
args.start_epoch = epoch + 1
save_dic = {
'epoch': epoch + 1,
'args': args,
'best_miou': best_mIoU,
'g_state_dict': model_g.state_dict(),
'f1_state_dict': model_f1.state_dict(),
'optimizer_g': optimizer_g.state_dict(),
'optimizer_f': optimizer_f.state_dict(),
}
if not args.uses_one_classifier:
save_dic['f2_state_dict'] = model_f2.state_dict()
torch.save(save_dic, checkpoint_fn)
if is_best:
best_mIoU = cur_mIoU
shutil.copyfile(checkpoint_fn, os.path.join(pth_dir, "model_best.pth.tar"))
| [
"[email protected]"
] | |
fc9a5598659638528728a6c4cf1799567d39fe30 | cd8a143c5f01fcf6130b129a7a578d0225476b2d | /worker/deps/gyp/test/mac/gyptest-app-error.py | df0781d45562a8225855c79fbe1381317035a652 | [
"BSD-3-Clause",
"ISC"
] | permissive | corvaxx/mediasoup | 47242bd5b0468b1f7e6de8077b11adf562aa244f | 304bce884755243f78ba3eeec5442888ecdc5340 | refs/heads/v3 | 2023-02-05T03:19:59.451099 | 2020-09-25T11:27:36 | 2020-09-25T11:27:36 | 303,987,529 | 0 | 3 | ISC | 2020-11-06T16:22:36 | 2020-10-14T11:00:43 | null | UTF-8 | Python | false | false | 1,321 | py | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that invalid strings files cause the build to fail.
"""
from __future__ import print_function
import TestCmd
import TestGyp
import sys
if sys.platform == 'darwin':
print("This test is currently disabled: https://crbug.com/483696.")
sys.exit(0)
expected_error = 'Old-style plist parser: missing semicolon in dictionary'
saw_expected_error = [False] # Python2 has no "nonlocal" keyword.
def match(a, b):
if a == b:
return True
if not TestCmd.is_List(a):
a = a.split('\n')
if not TestCmd.is_List(b):
b = b.split('\n')
if expected_error in '\n'.join(a) + '\n'.join(b):
saw_expected_error[0] = True
return True
return False
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match=match)
test.run_gyp('test-error.gyp', chdir='app-bundle')
test.build('test-error.gyp', test.ALL, chdir='app-bundle')
# Ninja pipes stderr of subprocesses to stdout.
if test.format in ['ninja', 'xcode-ninja'] \
and expected_error in test.stdout():
saw_expected_error[0] = True
if saw_expected_error[0]:
test.pass_test()
else:
test.fail_test()
| [
"[email protected]"
] | |
ed7afc9075c01158b21a3ec97fa1073c5c131a16 | 98eb51d47082627353cfea31f022ada7ccc6729e | /exer_2_aug24.py | 2f6e26521ee285d9a797abe59c8e1b671ae4ada3 | [] | no_license | pns845/Dictionary_Practice | 0c4fe3bb0e845d7c8b53d4cc74848a1549dbe262 | 1e98357dd59679587683f5daad730e3690192627 | refs/heads/master | 2022-12-11T07:27:51.574009 | 2020-08-25T17:21:01 | 2020-08-25T17:21:01 | 290,277,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | import sys
db_1 = {1: {'Interface':'Ethernet0', 'IP':'1.1.1.1' , 'Status':'up' },
2: {'Interface':'Ethernet1', 'IP':'2.2.2.2' , 'Status': 'down'},
3: {'Interface': 'Serial0', 'IP': '3.3.3.3', 'Status': 'up'},
4: {'Interface': 'Serial1', 'IP': '4.4.4.4', 'Status': 'up'}}
interface = sys.argv[1]
print(interface)
for key in db_1:
print("values:", db_1[key]['Interface'], interface)
if db_1[key]['Interface']== interface:
print("status value for Ethernet-1 is"+db_1[key]['Status'])
| [
"[email protected]"
] | |
fc235403436e30bd87e18d8e4efbaf1982e0c9c7 | 3a9b154aa9d5e379683476f80f30630bf44d2102 | /Server_v1/appfront/forms/BusinessReportForm.py | 6318e4cf6d647ad833278a7755537c5966e237a0 | [] | no_license | KevinDon/py_amazon_analysis | 81995e360d2b536e1df6e515aae9457054edae29 | 13b5fbb046ca6516ac3a47e8f7867baf358011f4 | refs/heads/master | 2022-12-13T00:27:27.511783 | 2019-08-14T11:45:53 | 2019-08-14T11:45:53 | 185,160,162 | 0 | 1 | null | 2022-12-10T05:38:15 | 2019-05-06T08:56:40 | TSQL | UTF-8 | Python | false | false | 1,417 | py | # coding:utf-8
import datetime
from django import forms
from django.utils.translation import ugettext as _
from appfront.model import BusinessReportModel
class BusinessReportForm(forms.ModelForm):
class Meta:
model = BusinessReportModel
fields = '__all__'
def __init__(self, *args, **kwargs):
super(BusinessReportForm, self).__init__(*args, **kwargs)
# def has_change_permission(self, request):
# """ ๅๆถๅๅฐ็ผ่พ้ไปถๅ่ฝ """
# return False
#
# def save(self, *args, **kwargs):
# # ๅๅปบ็จๆทๆถ๏ผไธบ็จๆท่ชๅจ็ๆไธชไบบๅฏไธID
# # if not self.pk:
# # # ๅญๅจๅฐฑๆดๆฐ๏ผไธๅญๅจๅฐฑๅๅปบ
# # m = hashlib.md5()
# # m.update(self.username.encode(encoding="utf-8"))
# # self.uid = m.hexdigest()
# # logger.info(self.updated_at)
# logger.info(self)
# # module.updated_at = datetime.datetime.now()
# super(ProductQrcodeModel, self).save(self, *args, **kwargs)
# # super(ProductQrcodeModel, self).save(*args, **kwargs)
# def save_m2m(self, *args, **kwargs):
# print('save_m2m')
def clean_sku(self):
sku = self.cleaned_data.get('sku')
return sku.upper()
def clean_location(self):
location = self.cleaned_data.get('location')
return location.upper() if location is not None else ''
| [
"[email protected]"
] | |
8b603e86231d625c9d444794c3acdf9b4ce4ed43 | 4236066bcbd37400172d53382301bf8ac8ee8c40 | /cookbook/seismic_srtomo_sparse.py | 82a3eec522f50719b1c59f854ebcdb1f30f9ce51 | [
"BSD-3-Clause"
] | permissive | imclab/fatiando | eaf0e01746f198bbe65e140492a0f62abd299802 | edad8d020094d8fcda1e61402fcd46b686d6fd56 | refs/heads/master | 2020-12-26T03:23:37.984484 | 2013-11-27T23:00:22 | 2013-11-27T23:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,705 | py | """
Seismic: 2D straight-ray tomography of large data sets and models using
sparse matrices
Uses synthetic data and a model generated from an image file.
Since the image is big, use sparse matrices and a steepest descent solver
(it doesn't require Hessians).
WARNING: may take a long time to calculate.
"""
import urllib
from os import path
import numpy
from fatiando import mesher, utils, seismic, vis, inversion
area = (0, 100000, 0, 100000)
shape = (100, 100)
model = mesher.SquareMesh(area, shape)
# Fetch the image from the online docs
urllib.urlretrieve(
'http://fatiando.readthedocs.org/en/latest/_static/logo.png', 'logo.png')
model.img2prop('logo.png', 4000, 10000, 'vp')
# Make some travel time data and add noise
src_loc = utils.random_points(area, 200)
rec_loc = utils.circular_points(area, 80, random=True)
srcs, recs = utils.connect_points(src_loc, rec_loc)
ttimes = seismic.ttime2d.straight(model, 'vp', srcs, recs, par=True)
ttimes, error = utils.contaminate(ttimes, 0.01, percent=True,
return_stddev=True)
# Make the mesh
mesh = mesher.SquareMesh(area, shape)
# Since the matrices are big, use the Steepest Descent solver to avoid dealing
# with Hessian matrices. It needs a starting guess, so start with 1000
inversion.gradient.use_sparse()
solver = inversion.gradient.steepest(1000*numpy.ones(mesh.size))
# and run the inversion
estimate, residuals = seismic.srtomo.run(ttimes, srcs, recs, mesh, sparse=True,
solver=solver, smooth=0.01)
# Convert the slowness estimate to velocities and add it the mesh
mesh.addprop('vp', seismic.srtomo.slowness2vel(estimate))
# Calculate and print the standard deviation of the residuals
# it should be close to the data error if the inversion was able to fit the data
print "Assumed error: %f" % (error)
print "Standard deviation of residuals: %f" % (numpy.std(residuals))
vis.mpl.figure(figsize=(14, 5))
vis.mpl.subplot(1, 2, 1)
vis.mpl.axis('scaled')
vis.mpl.title('Vp synthetic model of the Earth')
vis.mpl.squaremesh(model, prop='vp', vmin=4000, vmax=10000,
cmap=vis.mpl.cm.seismic)
cb = vis.mpl.colorbar()
cb.set_label('Velocity')
vis.mpl.points(src_loc, '*y', label="Sources")
vis.mpl.points(rec_loc, '^r', label="Receivers")
vis.mpl.legend(loc='lower left', shadow=True, numpoints=1, prop={'size':10})
vis.mpl.subplot(1, 2, 2)
vis.mpl.axis('scaled')
vis.mpl.title('Tomography result')
vis.mpl.squaremesh(mesh, prop='vp', vmin=4000, vmax=10000,
cmap=vis.mpl.cm.seismic)
cb = vis.mpl.colorbar()
cb.set_label('Velocity')
vis.mpl.figure()
vis.mpl.grid()
vis.mpl.title('Residuals (data with %.4f s error)' % (error))
vis.mpl.hist(residuals, color='gray', bins=15)
vis.mpl.xlabel("seconds")
vis.mpl.show()
vis.mpl.show()
| [
"[email protected]"
] | |
3567ad3bfb0a1aebe02d9c3198ecbb0427d721fd | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/asset/client/AssetClient.py | 2dc9c5d76d03a400482614cd1ff7944b21f55abf | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,037 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudclient import JDCloudClient
from jdcloud_sdk.core.config import Config
class AssetClient(JDCloudClient):
def __init__(self, credential, config=None, logger=None):
if config is None:
config = Config('asset.jdcloud-api.com')
super(AssetClient, self).__init__(credential, config, 'asset', '0.0.3', logger)
| [
"[email protected]"
] | |
dd9e049b2543768f53963145a7ce82b160deb901 | 9d67cd5f8d3e0ffdd4334a6b9b67c93f8deca100 | /configs/20171216_example_same_room.py | 775f154c4a3d840719099431f5d30d10f38d1201 | [] | no_license | SiyuanLee/caps | 0c300a8e5a9a661eca4b2f59cd38125ddc35b6d3 | 476802e18ca1c7c88f1e29ed66a90c350aa50c1f | refs/heads/master | 2021-06-20T22:48:16.230354 | 2021-02-22T13:21:57 | 2021-02-22T13:21:57 | 188,695,489 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,937 | py | """
This is the example config file
same_room
no parameter share
take a look at transfer_config (differences are there)
"""
import numpy as np
# More one-char representation will be added in order to support
# other objects.
# The following a=10 is an example although it does not work now
# as I have not included a '10' object yet.
a = 10
# This is the map array that represents the map
# You have to fill the array into a (m x n) matrix with all elements
# not None. A strange shape of the array may cause malfunction.
# Currently available object indices are # they can fill more than one element in the array.
# 0: nothing
# 1: wall
# 2: ladder
# 3: coin
# 4: spike
# 5: triangle -------source
# 6: square ------ source
# 7: coin -------- target
# 8: princess -------source
# 9: player # elements(possibly more than 1) filled will be selected randomly to place the player
# unsupported indices will work as 0: nothing
map_array = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 5, 6, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 9, 9, 9, 9, 1, 9, 9, 9, 8, 1],
[1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1],
[1, 0, 0, 2, 0, 0, 0, 2, 0, 7, 1],
[1, 9, 9, 2, 9, 9, 9, 2, 9, 9, 1],
[1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1],
[1, 0, 2, 0, 1, 0, 2, 0, 0, 0, 1],
[1, 0, 2, 0, 1, 0, 2, 0, 0, 0, 1],
[1, 9, 9, 9, 1, 9, 9, 9, 9, 9, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
# set to true -> win when touching the object
# 0, 1, 2, 3, 4, 9 are not possible
end_game = {
5: True,
}
rewards = {
"positive": 0, # when collecting a coin
"win": 1, # endgame (win)
"negative": -25, # endgame (die)
"tick": 0 # living
}
######### dqn only ##########
# ensure correct import
import os
import sys
__file_path = os.path.abspath(__file__)
__dqn_dir = '/'.join(str.split(__file_path, '/')[:-2]) + '/'
sys.path.append(__dqn_dir)
__cur_dir = '/'.join(str.split(__file_path, '/')[:-1]) + '/'
from dqn_utils import PiecewiseSchedule, NoOpWrapperMK
# load the random sampled obs
import pickle
pkl_file = __cur_dir + 'eval_obs_array_random_same.pkl'
with open(pkl_file, 'rb') as f:
eval_obs_array = pickle.loads(f.read())
def seed_func():
return np.random.randint(0, 1000)
num_timesteps = 2e7
learning_freq = 4
# training iterations to go
num_iter = num_timesteps / learning_freq
# piecewise learning rate
lr_multiplier = 1.0
learning_rate = PiecewiseSchedule([
(0, 2e-4 * lr_multiplier),
(num_iter / 2, 1e-4 * lr_multiplier),
(num_iter * 3 / 4, 5e-5 * lr_multiplier),
], outside_value=5e-5 * lr_multiplier)
# piecewise learning rate
exploration = PiecewiseSchedule([
(0, 1.0),
(num_iter / 2, 0.7),
(num_iter * 3 / 4, 0.1),
(num_iter * 7 / 8, 0.05),
], outside_value=0.05)
######### transfer only #########
source_dirs = [
# an old map policy
# '/home/lsy/logs/target6c_12_05_17_21:26:25/dqn',
# '/home/lsy/PycharmProjects/ple-monstrerkong/examples/dqn_new/logs/target5_12_05_17_19:49:45',
# '/home/lsy/target8c_12_10_17_15:25:06/dqn',
'/home/beeperman/Project/ple-monsterkong/examples/dqn_new/logs/same_room_12_12_17_20:54:53/dqn',
#'/home/lsy/same_room_12_12_17_20:54:53/dqn',
]
transfer_config = {
'source_dirs': source_dirs,
'online_q_omega': False, # default false off policy with experience replay
'q_omega_uniform_sample': False, # default false
'four_to_two': False, # default false frame_history_len must be 4!
'source_noop': False, # default false (false means source policies HAS noop action)
'no_share_para': True # default false set to true to stop sharing parameter between q network and q_omega/term
}
dqn_config = {
'seed': seed_func, # will override game settings
'num_timesteps': num_timesteps,
'replay_buffer_size': 1000000,
'batch_size': 32,
'gamma': 0.99,
'learning_starts': 50000,
'learning_freq': learning_freq,
'frame_history_len': 2,
'target_update_freq': 10000,
'grad_norm_clipping': 10,
'learning_rate': learning_rate,
'exploration': exploration,
'additional_wrapper': NoOpWrapperMK,
'eval_obs_array': eval_obs_array, # TODO: construct some eval_obs_array
'room_q_interval': 1e4, # q_vals will be evaluated every room_q_interval steps
'epoch_size': 5e4, # you decide any way
'config_name': str.split(__file_path, '/')[-1].replace('.py', ''), # the config file name
'transfer_config': transfer_config,
}
map_config = {
'map_array': map_array,
'rewards': rewards,
'end_game': end_game,
'init_score': 0,
'init_lives': 1, # please don't change, not going to work
# configs for dqn
'dqn_config': dqn_config,
# work automatically only for aigym wrapped version
'fps': 1000,
'frame_skip': 1,
'force_fps': True, # set to true to make the game run as fast as possible
'display_screen': True,
'episode_length': 1200,
'episode_end_sleep': 0., # sec
} | [
"[email protected]"
] | |
bd79ec0757f648e401c63f9fa0942494e20dc519 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03611/s342764728.py | 5355e439cbc0315cffc195524b903f5e9f4c1700 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | from collections import Counter
n=int(input())
a=list(map(int,input().split()))
l=[]
for x in a:
l.append(x-1)
l.append(x)
l.append(x+1)
print(Counter(l).most_common(1)[0][1]) | [
"[email protected]"
] | |
ae35aec52d86a32bd0b75f1acf50678710a9f641 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2791/60695/239354.py | 6b532a321bf527606eb34026bc1d2b58906786af | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | n = int(input())
a = input().split(" ")
count = 0
for i in range(0, n):
if a[i] == "1":
count += 1
print(count)
for i in range(0, n):
if i == n-1:
print(a[i])
elif a[i + 1] == "1":
print(a[i] + " ", end="") | [
"[email protected]"
] | |
f5751fcb1dace35e1e62379427c047c026646cf0 | 49a167d942f19fc084da2da68fc3881d44cacdd7 | /kubernetes_asyncio/test/test_v1beta2_replica_set_list.py | 271452437d4493d410a2e9bcc3923ad1bcec5d20 | [
"Apache-2.0"
] | permissive | olitheolix/kubernetes_asyncio | fdb61323dc7fc1bade5e26e907de0fe6e0e42396 | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | refs/heads/master | 2020-03-19T12:52:27.025399 | 2018-06-24T23:34:03 | 2018-06-24T23:34:03 | 136,546,270 | 1 | 0 | Apache-2.0 | 2018-06-24T23:52:47 | 2018-06-08T00:39:52 | Python | UTF-8 | Python | false | false | 1,038 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1beta2_replica_set_list import V1beta2ReplicaSetList # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1beta2ReplicaSetList(unittest.TestCase):
"""V1beta2ReplicaSetList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta2ReplicaSetList(self):
"""Test V1beta2ReplicaSetList"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1beta2_replica_set_list.V1beta2ReplicaSetList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
1c213bc633cad5adec5b1c8e8eb5afab1fe602e4 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_shg_integrals_test1.py | 097a0b17deff2c1c43c55c874d8ca09dd08c8970 | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._basis1 import _basis1
class _shg_integrals_test1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Abc = None
self.Nab_min = None
self.Nrep = None
self.Check_accuracy = None
self.Accuracy_level = None
self.Calculate_derivatives = None
self.Test_overlap = None
self.Test_coulomb = None
self.Test_verf = None
self.Test_verfc = None
self.Test_vgauss = None
self.Test_gauss = None
self.Test_ra2m = None
self.M = None
self.Test_overlap_aba = None
self.Test_overlap_abb = None
self.BASIS_list = []
self._name = "SHG_INTEGRALS_TEST"
self._keywords = {'Abc': 'ABC', 'Nab_min': 'NAB_MIN', 'Nrep': 'NREP', 'Check_accuracy': 'CHECK_ACCURACY', 'Accuracy_level': 'ACCURACY_LEVEL', 'Calculate_derivatives': 'CALCULATE_DERIVATIVES', 'Test_overlap': 'TEST_OVERLAP', 'Test_coulomb': 'TEST_COULOMB', 'Test_verf': 'TEST_VERF', 'Test_verfc': 'TEST_VERFC', 'Test_vgauss': 'TEST_VGAUSS', 'Test_gauss': 'TEST_GAUSS', 'Test_ra2m': 'TEST_RA2M', 'M': 'M', 'Test_overlap_aba': 'TEST_OVERLAP_ABA', 'Test_overlap_abb': 'TEST_OVERLAP_ABB'}
self._repeated_subsections = {'BASIS': '_basis1'}
self._attributes = ['Section_parameters', 'BASIS_list']
def BASIS_add(self, section_parameters=None):
new_section = _basis1()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.BASIS_list.append(new_section)
return new_section
| [
"[email protected]"
] | |
078578d693f12594a7667580b822807b12ef8089 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_18822.py | a1d55691cd1f2a14f0f0a7ef08990fd9c8c8e58f | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | # celery recurring task not executing
python manage.py celery worker -B
| [
"[email protected]"
] | |
d7e3eee3c33f77ccf4b9aac394c4a45f5bbbac0d | 7e7e8fb08e00f235306b97908ae083d670c00062 | /Froms/1.formpro/formpro/wsgi.py | fd61d7862293b341cd7fe21018301878529850b3 | [] | no_license | Aadeshkale/Django-devlopment | b60fd8c846b7187ac7d464839055353e877479e3 | 445e6b65825fe03be34a13b30817adbb160bb608 | refs/heads/master | 2021-07-18T13:53:14.216350 | 2020-05-08T07:19:22 | 2020-05-08T07:19:22 | 146,633,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for formpro project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'formpro.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
f18486755d13b3765b34036f097c316cfe0d6f99 | 5ad74a3b997b25d3c3aac3bd27d30876ec5d9304 | /python/brenmy/geometry/bmMesh.py | cb695ed57c087ed557c4d719d62baf2c44e91c51 | [] | no_license | lwk205/brenmy | c587ee56c22cd4332878c980af14362453add8ee | 16e4c3e978699e398fdc76611cda39ce39da0e33 | refs/heads/master | 2023-01-24T03:09:27.093726 | 2020-11-24T09:56:11 | 2020-11-24T09:56:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | '''
Created on 23 Jun 2018
@author: Bren
OpenMaya API mesh utilities
'''
from maya.api import OpenMaya
def get_points(mesh):
# get dag
sl = OpenMaya.MSelectionList()
sl.add(mesh)
dag = sl.getDagPath(0)
# get points
m_mesh = OpenMaya.MFnMesh(dag)
points = m_mesh.getVertices()
return points
def set_points(mesh, points):
# get dag
sl = OpenMaya.MSelectionList()
sl.add(mesh)
dag = sl.getDagPath(0)
# get points
m_mesh = OpenMaya.MFnMesh(dag)
m_mesh.setVertices(points)
return True | [
"[email protected]"
] | |
b78cfbb6561597fad5d2bbf609bc75d888603e52 | 1bd5f83d7faf77ad92d141ba07d25259dd2c4550 | /LeetCode/PascalsTriangle2.py | bc8ddfd2de4983224b1bd3dcabeb24d02db6c083 | [] | no_license | puneet29/algos | a0f5d638909c12e86faa5544d0ae7d9381b8f1fc | 54545a7502f7359968fbd27ec5bf111b82df324d | refs/heads/master | 2021-07-22T22:45:31.745942 | 2021-07-19T18:13:40 | 2021-07-19T18:13:40 | 189,939,790 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if(rowIndex < 1):
return [1] * (rowIndex + 1)
prev = self.getRow(rowIndex-1)
curr = [1 for i in range(rowIndex+1)]
for i in range(1, rowIndex):
curr[i] = prev[i-1] + prev[i]
return curr
| [
"[email protected]"
] | |
a0e5c7c8b90240f173efb2e5b446c5fb9cefe55f | 060fbf2a69a90ad92de5fc877521d5ea6b298007 | /test/vanilla/Expected/AcceptanceTests/BodyComplex/bodycomplex/models/dictionary_wrapper.py | 6a304de7269c0577cfc4283e6d45211281f132dd | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | iscai-msft/autorest.python | db47a8f00253148fbc327fe0ae1b0f7921b397c6 | a9f38dd762fbc046ce6197bfabea2f56045d2957 | refs/heads/master | 2021-08-02T13:06:34.768117 | 2018-11-21T00:29:31 | 2018-11-21T00:29:31 | 161,554,205 | 0 | 0 | MIT | 2018-12-12T22:42:14 | 2018-12-12T22:42:14 | null | UTF-8 | Python | false | false | 913 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DictionaryWrapper(Model):
"""DictionaryWrapper.
:param default_program:
:type default_program: dict[str, str]
"""
_attribute_map = {
'default_program': {'key': 'defaultProgram', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(DictionaryWrapper, self).__init__(**kwargs)
self.default_program = kwargs.get('default_program', None)
| [
"[email protected]"
] | |
3f6e4a8e9423724fa17c503ff03fce2fe280d2bd | c548c10c4fd0b6c1d1c10cc645cb3b90b31f2de6 | /keras2/keras83_embedding1.py | 4867bc0bbc211ed5091a7697040ae28f561545ad | [] | no_license | sswwd95/Study | caf45bc3c8c4301260aaac6608042e53e60210b6 | 3c189090c76a68fb827cf8d6807ee1a5195d2b8b | refs/heads/master | 2023-06-02T21:44:00.518810 | 2021-06-26T03:01:26 | 2021-06-26T03:01:26 | 324,061,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,648 | py | from tensorflow.keras.preprocessing.text import Tokenizer
import numpy as np
docs = ['๋๋ฌด ์ฌ๋ฐ์ด์', '์ฐธ ์ต๊ณ ์์', '์ฐธ ์ ๋ง๋ ์ํ์์',
'์ถ์ฒํ๊ณ ์ถ์ ์ํ์
๋๋ค.', ' ํ ๋ฒ ๋ ๋ณด๊ณ ์ถ๋ค์', '๊ธ์์',
'๋ณ๋ก์์', '์๊ฐ๋ณด๋ค ์ง๋ฃจํด์', '์ฐ๊ธฐ๊ฐ ์ด์ํด์',
'์ฌ๋ฏธ์์ด์', '๋๋ฌด ์ฌ๋ฏธ์๋ค', '์ฐธ ์ฌ๋ฐ๋ค์', '๋ณ์ด๋ ์์ ๊ท์ฌ์์']
# ๊ธ์ 1, ๋ถ์ 0
labels = np.array([1,1,1,1,1,0,0,0,0,0,0,1,1])
token = Tokenizer()
token.fit_on_texts(docs)
print(token.word_index)
'''
{'์ฐธ': 1, '๋๋ฌด': 2, '์ฌ๋ฐ์ด์': 3, '์ต๊ณ ์์': 4, '์': 5, '๋ง๋ ': 6, '์ํ์์': 7, '์ถ์ฒํ๊ณ ': 8,
'์ถ์': 9, '์ํ์
๋๋ค': 10, 'ํ': 11, '๋ฒ': 12, '๋': 13, '๋ณด๊ณ ': 14, '์ถ๋ค์': 15, '๊ธ์์': 16, '
๋ณ๋ก์์': 17, '์๊ฐ๋ณด๋ค': 18, '์ง๋ฃจํด์': 19, '์ฐ๊ธฐ๊ฐ': 20, '์ด์ํด์': 21, '์ฌ๋ฏธ์์ด์': 22, '์ฌ๋ฏธ
์๋ค': 23, '์ฌ๋ฐ๋ค์': 24, '๋ณ์ด๋': 25, '์์ ': 26, '๊ท์ฌ์์': 27}
'''
x = token.texts_to_sequences(docs)
print(x)
# [[2, 3], [1, 4], [1, 5, 6, 7], [8, 9, 10], [11, 12, 13, 14, 15], [16], [17], [18, 19], [20, 21], [22], [2, 23], [1, 24], [25, 26, 27]]# ํฌ๊ธฐ๊ฐ ์ผ์ ํ์ง ์์ ๋ชจ๋ธ๋ง ํ ์ ์๋ค. ๊ธธ์ด๋ฅผ ์ผ์ ํ๊ฒ ๋ง์ถฐ์ฃผ๋ ค๋ฉด? ๊ธด ๊ธ์์ ๋ง๊ฒ ํ๊ณ ๋น์๋ฆฌ๋ 0์ผ๋ก ์ฑ์ด๋ค.
from tensorflow.keras.preprocessing.sequence import pad_sequences # 2์ฐจ, 3์ฐจ ๊ฐ๋ฅ
pad_x = pad_sequences(x, padding='pre') # ์์ชฝ์ 0
# pad_x = pad_sequences(x, padding='post') # ๋ค์ชฝ์ 0
print(pad_x)
# pre
'''
[[ 0 0 0 2 3]
[ 0 0 0 1 4]
[ 0 1 5 6 7]
[ 0 0 8 9 10]
[11 12 13 14 15]
[ 0 0 0 0 16]
[ 0 0 0 0 17]
[ 0 0 0 18 19]
[ 0 0 0 20 21]
[ 0 0 0 0 22]
[ 0 0 0 2 23]
[ 0 0 0 1 24]
[ 0 0 25 26 27]]
'''
# post
'''
[[ 0 0 2 3]
[ 0 0 1 4]
[ 1 5 6 7]
[ 0 8 9 10]
[12 13 14 15]
[ 0 0 0 16]
[ 0 0 0 17]
[ 0 0 18 19]
[ 0 0 20 21]
[ 0 0 0 22]
[ 0 0 2 23]
[ 0 0 1 24]
[ 0 25 26 27]]
'''
print(pad_x.shape) #(13, 5)
pad_x = pad_sequences(x,maxlen=5, truncating='pre',padding='pre')
# maxlen => ๋ด๊ฐ ์ํ๋ ๊ธธ์ด๋ก ์๋ฅธ๋ค
# truncating='pre' : ์์ชฝ์ ์๋ฅธ๋ค
# truncating='post' : ๋ท์ชฝ์ ์๋ฅธ๋ค
print(pad_x)
print(np.unique(pad_x))
# [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
# 24 25 26 27]
print(len(np.unique(pad_x))) #28
# 0๋ถํฐ 27๊น์ง์ธ๋ฐ 11์ด maxlen=4๋ก ์ธํด ์๋ ธ๋ค.
'''
์ํซ์ธ์ฝ๋ฉ์ ๊ทธ๋๋ก ์ฌ์ฉํ๋ฉด ๋ฒกํฐ์ ๊ธธ์ด๊ฐ ๋๋ฌด ๊ธธ์ด์ง๋ค.
๋ง์ฝ ๋ง ๊ฐ์ ๋จ์ด ํ ํฐ์ผ๋ก ์ด๋ฃจ์ด์ง ๋ง๋ญ์น๋ฅผ ๋ค๋ฃฌ๋ค๊ณ ํ ๋,
๋ฒกํฐํ ํ๋ฉด 9,999๊ฐ์ 0๊ณผ ํ๋์ 1๋ก ์ด๋ฃจ์ด์ง ๋จ์ด ๋ฒกํธ๋ฅผ 1๋ง๊ฐ๋ฅผ ๋ง๋ค์ด์ผ ํ๋ค.
์ด๋ฌํ ๊ณต๊ฐ์ ๋ญ๋น๋ฅผ ํด๊ฒฐํ๊ธฐ ์ํด ๋ฑ์ฅํ ๊ฒ์ด ๋จ์ด ์๋ฒ ๋ฉ
'''
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, Dense, LSTM, Flatten, Conv1D
model = Sequential()
# model.add(Embedding(input_dim=28, output_dim=11, input_length=5))
model.add(Embedding(28,11))
# output_dim=11 : ๋ค์ ๋ ์ด์ด๋ก ๋๊ฒจ์ฃผ๋ ๋
ธ๋์ ๊ฐฏ์. ์์๋ก ์ง์ ๊ฐ๋ฅ. ์ค์ ๋จ์ด์ ๊ฐฏ์
# input_length=5 : pad_x.shape์ ๋ค์ ์๋ ๊ฐ
# embeddingํจ์๊ฐ np.unique(pad_x) ๊ณ์ฐํ๊ธฐ ๋๋ฌธ์ 27๋ก ๋ฃ์ผ๋ฉด ์ค๋ฅ. ์ค์ ๋จ์ด์ ๊ฐฏ์๋ณด๋ค ๊ฐ๊ฑฐ๋ ์ปค์ผํ๋ค.
# Embedding์ 3์ฐจ๋ก ๋ฐ์์ 3์ฐจ๋ก ๋๊ฐ๋ค.
model.add(LSTM(32))
# LSTM์ 3์ฐจ๋ก ๋ฐ์์ 2์ฐจ๋ก ๋๊ฐ๋ค.
model.add(Dense(1, activation='sigmoid'))
# model.add(Flatten())
# model.add(Dense(1))
model.summary()
'''
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 5, 11) 308
_________________________________________________________________
flatten (Flatten) (None, 55) 0
_________________________________________________________________
dense (Dense) (None, 1) 56
=================================================================
Total params: 364
Trainable params: 364
Non-trainable params: 0
_________________________________________________________________
'''
# model.add(Embedding(28,11))
'''
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, None, 11) 308
=================================================================
Total params: 308
Trainable params: 308
Non-trainable params: 0
_________________________________________________________________
'''
# param ์ 308? input_dim*output_dim (28*11 = 308)
# input_length์ ์ํฅ์ด ์๋ค.
# model.add(LSTM(32))
'''
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 5, 11) 308
_________________________________________________________________
lstm (LSTM) (None, 32) 5632
_________________________________________________________________
dense (Dense) (None, 1) 33
=================================================================
Total params: 5,973
Trainable params: 5,973
Non-trainable params: 0
_________________________________________________________________
'''
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.fit(pad_x, labels, epochs=100)
acc = model.evaluate(pad_x, labels)[1]
print(acc)
# 1.0 | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.