blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5c7616aab880b8627ecf408e28c5d0fd92c361c3 | c54f5a7cf6de3ed02d2e02cf867470ea48bd9258 | /pyobjc/PyOpenGL-2.0.2.01/src/shadow/GL.3DFX.tbuffer.0001.py | e9fc25dbf782313f2fe2acf197b631a13a69c4d1 | [] | no_license | orestis/pyobjc | 01ad0e731fbbe0413c2f5ac2f3e91016749146c6 | c30bf50ba29cb562d530e71a9d6c3d8ad75aa230 | refs/heads/master | 2021-01-22T06:54:35.401551 | 2009-09-01T09:24:47 | 2009-09-01T09:24:47 | 16,895 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | # This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _tbuffer
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
__version__ = _tbuffer.__version__
__date__ = _tbuffer.__date__
__api_version__ = _tbuffer.__api_version__
__author__ = _tbuffer.__author__
__doc__ = _tbuffer.__doc__
glTbufferMask3DFX = _tbuffer.glTbufferMask3DFX
glInitTbuffer3DFX = _tbuffer.glInitTbuffer3DFX
__info = _tbuffer.__info
| [
"ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25"
] | ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25 |
7cd1a27a27d0669662e639931e57b15c76d48382 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/BuildLinks1.10/test_input/CJ_16_1/16_1_1_roy999_main.py | d064a2f9d473ab5a4c2b76814eef2cbb9857e32e | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 388 | py |
def process(input_file, out):
t = int(input_file.readline())
for i in range(1, t + 1):
line = input_file.readline().strip()
result = solve(line)
out.write("Case #%i: %s\n" % (i, result))
def solve(s):
li = [s[0]]
for c in s[1:]:
if c < li[0]:
li.append(c)
else:
li.insert(0, c)
return ''.join(li) | [
"[[email protected]]"
] | |
801203ff33777b339565f62d514f14301dc94b25 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/qumulo/azext_qumulo/aaz/latest/qumulo/__cmd_group.py | fabbbf233a1f1fca33e7e3fe0128b75b847faa29 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 594 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command_group(
"qumulo",
)
class __CMDGroup(AAZCommandGroup):
"""Manage qumulo
"""
pass
__all__ = ["__CMDGroup"]
| [
"[email protected]"
] | |
12cdaf829a0bb0cf9396af90e035ecbb241bf6a3 | cf19e3a857d488ca449e515f641c686c7409fa87 | /C01-Python-Basics/13-C01P02/Solution01/solution.py | e603c40480ffc9c2f8355ab07cb8e5d1095d5c40 | [
"MIT"
] | permissive | CreeperBeatz/Python-101-Forever | c2133962c0bd50e09f58df3908c8c52234363b7b | 5b3e8706bec84104712d96419210a1e266b4d518 | refs/heads/master | 2023-06-07T07:53:23.796406 | 2021-07-05T14:19:33 | 2021-07-05T14:19:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # Video - https://youtu.be/PmW8NfctNpk
def sum_of_digits(n):
n = abs(n)
digits = []
char_digits = list(str(n))
for char_digit in char_digits:
digits.append(int(char_digit))
return sum(digits)
tests = [
(1325132435356, 43),
(123, 6),
(6, 6),
(-10, 1)
]
for n, expected in tests:
result = sum_of_digits(n)
print(result == expected)
| [
"[email protected]"
] | |
49574c1f871402a4d3ddd50ed609a9a7baf3f6d9 | 76b4790cc405d8287fccfa2dd691f4415fc88a11 | /format/version/blurayversion.py | 97064f571b887faf7f9db879eea0cf2bb3d46f01 | [] | no_license | Nicba1010/blumount-python | 8131f6685469e73b05068c3e41c9b709ccc21a5a | 648c44b74617186172c767c66c98299e7688c056 | refs/heads/master | 2022-12-16T15:14:12.294720 | 2020-09-13T11:55:01 | 2020-09-13T11:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from aenum import Enum
class BluRayVersion(Enum):
VERSION_1 = "0100"
VERSION_2 = "0200"
VERSION_3 = "0300"
| [
"[email protected]"
] | |
0c5769fd45ee518599738a13853fe563e251f03f | 529e713a78e82de2ae5d44cfb8ef209e0894d72a | /typer-cli-python/source_code_step_3/rptodo/cli.py | 89531fcd5bf31d59d49aa457dba1661b7d555049 | [
"MIT"
] | permissive | realpython/materials | cd2f548276be2c82f134ca03eadb1cd279e0f26e | d2d62756d3854f54a12a767f2bf9470486c0ceef | refs/heads/master | 2023-09-05T22:12:29.806738 | 2023-08-31T20:56:28 | 2023-08-31T20:56:28 | 132,374,697 | 4,678 | 6,482 | MIT | 2023-09-12T22:22:06 | 2018-05-06T20:46:18 | HTML | UTF-8 | Python | false | false | 1,423 | py | """This module provides the RP To-Do CLI."""
from pathlib import Path
from typing import Optional
import typer
from rptodo import ERRORS, __app_name__, __version__, config, database
app = typer.Typer()
@app.command()
def init(
db_path: str = typer.Option(
str(database.DEFAULT_DB_FILE_PATH),
"--db-path",
"-db",
prompt="to-do database location?",
),
) -> None:
"""Initialize the to-do database."""
app_init_error = config.init_app(db_path)
if app_init_error:
typer.secho(
f'Creating config file failed with "{ERRORS[app_init_error]}"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
db_init_error = database.init_database(Path(db_path))
if db_init_error:
typer.secho(
f'Creating database failed with "{ERRORS[db_init_error]}"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
else:
typer.secho(f"The to-do database is {db_path}", fg=typer.colors.GREEN)
def _version_callback(value: bool) -> None:
if value:
typer.echo(f"{__app_name__} v{__version__}")
raise typer.Exit()
@app.callback()
def main(
version: Optional[bool] = typer.Option(
None,
"--version",
"-v",
help="Show the application's version and exit.",
callback=_version_callback,
is_eager=True,
)
) -> None:
return
| [
"[email protected]"
] | |
5f91e1a1b7cb3ff8938287fb6fb8a0b3e3c9cd66 | b4e4399f6d18ee83760604fc67c90d3f5eac52dd | /10 Days of Statistics/Day4.BinomialDistributionI.py | 165bbc7384022eb86ad2468c05a50783bcab3f4c | [] | no_license | angelvv/HackerRankSolution | 88415c3ace68ddc10c76ae8df43ab5193aa921d4 | 8b2c323507f9a1826b4156aeab94815f41b6fc84 | refs/heads/master | 2021-07-17T20:51:50.758364 | 2020-05-26T17:25:05 | 2020-05-26T17:25:05 | 167,896,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
boy,girl = list(map(float, input().split())) # or =[float(x) for x in input().split()]
pBoy = boy/(boy+girl)
# hard code calculation
p0Boy = (1-pBoy)**6
p1Boy = 6 * pBoy * (1-pBoy)**5
p2Boy = 6*5/2 * (pBoy**2) * ((1-pBoy)**4)
pAtLeast3Boy = 1 - p0Boy - p1Boy - p2Boy
"""#Alternatively
def factorial(n):
return 1 if n==0 else n * factorial(n-1)
def nChoosek(n, k):
return factorial(n) / (factorial(k) * factorial(n - k))
def binomial(n, k, p):
return nChoosek(n, k) * (p**k) * ((1 - p)**(n - k))
pAtLeast3Boy = 1 - binomial(6, 0, pBoy) - binomial(6, 1, pBoy) - binomial(6, 2, pBoy)
"""
print(round(pAtLeast3Boy,3))
| [
"[email protected]"
] | |
70b366de4381df49308e12f0fad8a1925a18c1a7 | b6bb53780f3d186ccbc6a900a977a766b6ac4ffb | /doc/src/slides/src/solver.py | 3783db7b97e638d13cef6d46ee58fd929429c1bc | [
"BSD-3-Clause"
] | permissive | wolf9s/doconce | 1fa91766cad77dd16debade99e48954cfc7b6dee | 0c7fecb267502a74cdeb7d90100cd2bdc0701cc1 | refs/heads/master | 2021-01-17T15:44:51.703555 | 2015-09-28T07:59:41 | 2015-09-28T07:59:41 | 43,336,393 | 1 | 0 | null | 2015-09-29T01:02:48 | 2015-09-29T01:02:48 | null | UTF-8 | Python | false | false | 4,512 | py | from numpy import *
from matplotlib.pyplot import *
import sys
def solver(I, a, T, dt, theta):
"""Solve u'=-a*u, u(0)=I, for t in (0,T]; step: dt."""
dt = float(dt) # avoid integer division
N = int(round(T/dt)) # no of time intervals
T = N*dt # adjust T to fit time step dt
u = zeros(N+1) # array of u[n] values
t = linspace(0, T, N+1) # time mesh
u[0] = I # assign initial condition
for n in range(0, N): # n=0,1,...,N-1
u[n+1] = (1 - (1-theta)*a*dt)/(1 + theta*dt*a)*u[n]
return u, t
def exact_solution(t, I, a):
return I*exp(-a*t)
def explore(I, a, T, dt, theta=0.5, makeplot=True):
"""
Run a case with the solver, compute error measure,
and plot the numerical and exact solutions (if makeplot=True).
"""
u, t = solver(I, a, T, dt, theta) # Numerical solution
u_e = exact_solution(t, I, a)
e = u_e - u
E = sqrt(dt*sum(e**2))
if makeplot:
figure() # create new plot
t_e = linspace(0, T, 1001) # very fine mesh for u_e
u_e = exact_solution(t_e, I, a)
plot(t, u, 'r--o') # red dashes w/circles
plot(t_e, u_e, 'b-') # blue line for u_e
legend(['numerical', 'exact'])
xlabel('t')
ylabel('u')
title('Method: theta-rule, theta=%g, dt=%g' % (theta, dt))
theta2name = {0: 'FE', 1: 'BE', 0.5: 'CN'}
savefig('%s_%g.png' % (theta2name[theta], dt))
show()
return E
def define_command_line_options():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--I', '--initial_condition', type=float,
default=1.0, help='initial condition, u(0)',
metavar='I')
parser.add_argument('--a', type=float,
default=1.0, help='coefficient in ODE',
metavar='a')
parser.add_argument('--T', '--stop_time', type=float,
default=1.0, help='end time of simulation',
metavar='T')
parser.add_argument('--makeplot', action='store_true',
help='display plot or not')
parser.add_argument('--dt', '--time_step_values', type=float,
default=[1.0], help='time step values',
metavar='dt', nargs='+', dest='dt_values')
return parser
def read_command_line(use_argparse=True):
if use_argparse:
parser = define_command_line_options()
args = parser.parse_args()
print 'I={}, a={}, makeplot={}, dt_values={}'.format(
args.I, args.a, args.makeplot, args.dt_values)
return args.I, args.a, args.T, args.makeplot, args.dt_values
else:
if len(sys.argv) < 6:
print 'Usage: %s I a on/off dt1 dt2 dt3 ...' % \
sys.argv[0]; sys.exit(1)
I = float(sys.argv[1])
a = float(sys.argv[2])
T = float(sys.argv[3])
makeplot = sys.argv[4] in ('on', 'True')
dt_values = [float(arg) for arg in sys.argv[5:]]
return I, a, T, makeplot, dt_values
def main():
I, a, T, makeplot, dt_values = read_command_line()
r = {}
for theta in 0, 0.5, 1:
E_values = []
for dt in dt_values:
E = explore(I, a, T, dt, theta, makeplot=False)
E_values.append(E)
# Compute convergence rates
m = len(dt_values)
r[theta] = [log(E_values[i-1]/E_values[i])/
log(dt_values[i-1]/dt_values[i])
for i in range(1, m, 1)]
for theta in r:
print '\nPairwise convergence rates for theta=%g:' % theta
print ' '.join(['%.2f' % r_ for r_ in r[theta]])
return r
def verify_convergence_rate():
r = main()
tol = 0.1
expected_rates = {0: 1, 1: 1, 0.5: 2}
for theta in r:
r_final = r[theta][-1]
diff = abs(expected_rates[theta] - r_final)
if diff > tol:
return False
return True # all tests passed
if __name__ == '__main__':
if 'verify_rates' in sys.argv:
sys.argv.remove('verify_rates')
if not '--dt' in sys.argv:
print 'Must assign several dt values through the --dt option'
sys.exit(1) # abort
if verify_convergence_rate():
pass
else:
print 'Bug in the implementation!'
else:
# Perform simulations
main()
| [
"[email protected]"
] | |
624ad33267ce9d85d2f3e6466b07a72980e4d01d | c8cee25ecb60ca3e6ce5e24c37db57f82f9858f6 | /Vision Artificial/Emparejamiento.py | 272fabf738a5fc92fc5a35e7e1cfddf092898c0c | [] | no_license | mecomontes/Python | a0b4a0b69ae33ad3623e908731710563392d1615 | daba4247cca90c43a979e3e3f292cd7b8951b3d0 | refs/heads/master | 2023-05-30T05:24:41.999196 | 2020-03-23T02:30:09 | 2020-03-23T02:30:09 | 249,317,310 | 1 | 0 | null | 2023-05-22T22:42:36 | 2020-03-23T02:29:38 | Python | UTF-8 | Python | false | false | 4,283 | py | """ Emparejamiento de plantillas
Teoría
El emparejamiento de plantillas (o template matching en inglés) es un método para buscar y encontrar la ubicación de una imagen de
plantilla en una imagen más grande. OpenCV viene con la función cv2.matchTemplate() para este propósito. Esta función, simplemente,
desliza la imagen de la plantilla sobre la imagen de entrada (como en la convolución 2D) y en cada punto compara la plantilla con
la porción correspondiente de la imagen de entrada. En OpenCV están implementados varios métodos de comparación. La función
devuelve una imagen en escala de grises, donde cada píxel indica cuánto coincide el entorno de ese píxel con la plantilla.
Si la imagen de entrada es de tamaño (WxH) y la imagen de la plantilla es de tamaño (wxh), la imagen de salida tendrá un tamaño de
(W-w + 1, H-h + 1). Una vez que obtenga el resultado, puede usar la función cv2.minMaxLoc() para encontrar dónde está el valor
máximo / mínimo. El valor máximo/ mínimo corresponde a la esquina superior izquierda del rectángulo con ancho w y alto h. Ese
rectángulo será la región de la imagen de entrada que mejor coincide con la plantilla.
Nota: Si está utilizando cv2.TM_SQDIFF como método de comparación, el valor mínimo dará la mejor coincidencia.
Emparejamiento de plantillas en OpenCV
A continuación se comparará el desempeño de diferentes métodos de emparejamiento de la función cv2.matchTemplate(), para encontrar
la cara de un hombre entre los granos de café:
A continuación se muestra el código que hace esto:"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('cafe.jpeg',0)
img2 = img.copy()
template = cv2.imread('template.png',0)
w, h = template.shape[::-1]
# All the 6 methods for comparison in a list
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
for meth in methods:
img = img2.copy()
method = eval(meth)
# Aplica el emparejamiento de plantillas
res = cv2.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# Si el método es TM_SQDIFF o TM_SQDIFF_NORMED, tomar el mínimo
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 10)
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Resultado del emparejamiento'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img,cmap = 'gray')
plt.title('Punto detectado'), plt.xticks([]), plt.yticks([])
plt.suptitle(meth)
plt.show()
"""En este caso se observa que los seis métodos dan resultados similares. Sin embargo, esto puede variar dependiendo de la imagen
y la plantilla en particular. Nótese que en los cuatro primeros gráficos a la izquierda el punto de máxima coincidencia es blanco
(correspondiente con un máximo) mientras que, con los últimos dos métodos el punto de máxima coincidencia es negro (correspondiente
con un mínimo)
Emparejamiento de plantillas con múltiples objetos
En la sección anterior, buscamos en la imagen la cara de un hombre, que aparece solo una vez en la imagen. Supongamos que está
buscando un objeto que tiene múltiples ocurrencias, cv2.minMaxLoc() no le dará todas las ubicaciones. En ese caso, fijaremos un
valor umbral por encima (o por debajo,dependiendo del método que usemos) del cual se asumirá que el objeto en la plantilla coincide
con el objeto en la imagen. A continuación un ejemplo, en el que se muestra una captura de pantalla del famoso juego Mario.
Utilizaremos el método explicado para encontrar todas las monedas."""
img_rgb = cv2.imread('mario.jpeg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('moneda.jpeg',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
umbral = 0.8
loc = np.where( res >= umbral)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imwrite('res.png',img_rgb) | [
"[email protected]"
] | |
13576721467bab9704a406a6cae793fe3350b13e | 576cc83449e10fd3f98281970c46016ea7a5aea2 | /Exercise_for_Job/华为牛客网/华为.py | e16a1edaa5afa95c959bd857bca6a52cf8e8b8f3 | [] | no_license | HotView/PycharmProjects | 215ab9edd341e3293daebcf86d97537f8cd28d75 | 61393fe5ba781a8c1216a5cbe7e0d06149a10190 | refs/heads/master | 2020-06-02T07:41:53.608742 | 2019-11-13T08:31:57 | 2019-11-13T08:31:57 | 191,085,178 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | data = input().split()
str_data = []
def constr(str):
count = 8-len(i)
con_str = ''
while(count):
con_str =con_str+'0'
count = count-1
return i+con_str
for i in data[1:]:
if len(i)<=8:
str_data.append(constr(i))
else:
while(len(i)>8):
str_data.append(i[0:8])
i = i[8:]
str_data.append(constr(i))
sort_data = sorted(str_data,key=lambda x:x[0])
str_out = ''
for ele in sort_data:
str_out = str_out+ele+' '
print(str_out)
| [
"[email protected]"
] | |
ee24487f3caeb07665544aa96a98f6b62d4090df | f2a623b5fe2bc6a8bcaa330d15a60b84c6dddc18 | /models/cnn_mine_gcn.py | b0e40d65f876e73398e4d9e726fa8b4886f81429 | [] | no_license | thilinicooray/VSRL | 6822c58a29b005dab39f2d62444234c2efa1eb9c | 15077ace266e75d1e704910ae1e69fbfdfc902aa | refs/heads/master | 2020-03-22T03:40:52.137380 | 2018-08-02T06:16:35 | 2018-08-02T06:16:35 | 139,445,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,292 | py | import torch
import torch.nn as nn
import torchvision as tv
import torch.nn.functional as F
from . import utils
import math
import torch.nn.init as init
import random
from .pygcn import gcn
from .action_graph import action_graph
from .faster_rcnn.utils.config import cfg
class resnet_modified_small(nn.Module):
def __init__(self):
super(resnet_modified_small, self).__init__()
self.resnet = tv.models.resnet34(pretrained=True)
#finetune last conv later set
for p in self.resnet.layer4.parameters():
p.requires_grad = True
#probably want linear, relu, dropout
self.linear = nn.Linear(7*7*512, 1024)
self.dropout2d = nn.Dropout2d(.5)
self.dropout = nn.Dropout(.5)
self.relu = nn.LeakyReLU()
init.xavier_normal_(self.linear.weight)
#self.conv1 = nn.Conv2d(512, 256, 3, stride=1, padding=1)
#self.conv2 = nn.Conv2d(265, 256, 3, stride=2, padding=1)
#self.conv3 = nn.Conv2d(256, 256, 3, stride=2, padding=1)
#utils.init_weight(self.conv1)
#utils.init_weight(self.conv2)
#utils.init_weight(self.conv3)
self.linear1 = nn.Linear(14*14, 1024)
self.dropout2d1 = nn.Dropout2d(.5)
self.dropout1 = nn.Dropout(.5)
self.relu1 = nn.LeakyReLU()
init.xavier_normal_(self.linear1.weight)
def base_size(self): return 512
def segment_count(self): return 128
def rep_size(self): return 1024
def forward(self, x):
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
#x = self.dropout2d(x)
x_full = self.dropout(self.relu(self.linear(x.view(-1, 7*7*self.base_size()))))
x_full_segment = self.dropout1(self.relu1(self.linear1(x.view(-1, 14*14))))
x_full_segment = x_full_segment.view(-1,self.segment_count(),self.rep_size())
return torch.cat((torch.unsqueeze(x_full,1), x_full_segment), 1)
class resnet_modified_small1(nn.Module):
def __init__(self):
super(resnet_modified_small1, self).__init__()
self.resnet = tv.models.resnet34(pretrained=True)
#probably want linear, relu, dropout
self.linear = nn.Linear(7*7*512, 1024)
self.dropout2d = nn.Dropout2d(.5)
self.dropout = nn.Dropout(.5)
self.relu = nn.LeakyReLU()
utils.init_weight(self.linear)
def base_size(self): return 512
def rep_size(self): return 1024
def forward(self, x):
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
x = self.dropout2d(x)
return self.dropout(self.relu(self.linear(x.view(-1, 7*7*self.base_size()))))
class resnet_modified_large(nn.Module):
def __init__(self):
super(resnet_modified_large, self).__init__()
self.resnet = tv.models.resnet101(pretrained=True)
#probably want linear, relu, dropout
self.linear = nn.Linear(7*7*2048, 1024)
self.dropout2d = nn.Dropout2d(.5)
self.dropout = nn.Dropout(.5)
self.relu = nn.LeakyReLU()
utils.init_weight(self.linear)
def base_size(self): return 2048
def rep_size(self): return 1024
def forward(self, x):
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
x = self.dropout2d(x)
#print x.size()
return self.dropout(self.relu(self.linear(x.view(-1, 7*7*self.base_size()))))
class parallel_table(nn.Module):
def __init__(self, embedding_size, num_verbs, num_roles):
super(parallel_table,self).__init__()
self.verb_lookup_table = nn.Embedding(num_verbs, embedding_size)
#org code has size num_role + 1 x embedding
#how to use embeddings here? what is the gain?
self.role_lookup_table = nn.Embedding(num_roles+1, embedding_size, padding_idx=num_roles)
self.verb_lookup_table.weight.clone().fill_(0)
self.role_lookup_table.weight.clone().fill_(0)
def forward(self,x):
#todo: what is the proper way to make batchx1024 -> batchx6x1024
image_embed = x[0]
verb_embed = self.verb_lookup_table(x[1])
role_embed = self.role_lookup_table(x[2])
role_embed_reshaped = role_embed.transpose(0,1)
max_role_count = x[2].size()[1]
image_embed_expand = image_embed.expand(max_role_count, image_embed.size(0), image_embed.size(1))
verb_embed_expand = verb_embed.expand(max_role_count, verb_embed.size(0), verb_embed.size(1))
'''final_role_init = torch.empty(role_embed.size(), requires_grad=False)
for i in range(max_role_count):
final_role_init[:,i, :] = image_embed * verb_embed * role_embed[:,i, :]
out3 = self.role_lookup_table(x[2])
out_size = out3.size()[1]
out1 = torch.unsqueeze(x[0].repeat(1,out_size),1)
out1 = out1.view(out3.size())
out2 = torch.unsqueeze(self.verb_lookup_table(x[1]).repeat(1,out_size),1)
out2 = out2.view(out3.size())
y = [out1, out2,out3 ]
#print('parallel size',final_role_init.size())
#print('parallel ',final_role_init)'''
final_role_init = image_embed_expand * verb_embed_expand * role_embed_reshaped
return final_role_init.transpose(0,1)
class baseline(nn.Module):
def __init__(self, encoder, gpu_mode,cnn_type='resnet_34'):
super(baseline, self).__init__()
self.normalize = tv.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.train_transform = tv.transforms.Compose([
tv.transforms.Resize(224),
tv.transforms.RandomCrop(224),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
self.normalize,
])
self.dev_transform = tv.transforms.Compose([
tv.transforms.Resize(224),
tv.transforms.CenterCrop(224),
tv.transforms.ToTensor(),
self.normalize,
])
self.encoder = encoder
self.gpu_mode = gpu_mode
self.max_role_count = self.encoder.get_max_role_count()
self.num_verbs = self.encoder.get_num_verbs()
self.num_roles = self.encoder.get_num_roles()
self.vocab_size = self.encoder.get_num_labels() #todo:how to decide this? original has 2000 only
self.embedding_size = 1024 #user argument
self.num_graph_steps = 3
#get the vision module
'''if cnn_type == 'vgg16' :
self.cnn = vgg_modified()
elif cnn_type == 'rn_conv':
self.cnn = ConvInputModel()'''
if cnn_type == 'resnet_34':
self.cnn = resnet_modified_small()
elif cnn_type == "resnet_101" :
self.cnn = resnet_modified_large()
else:
print('unknown base network')
exit()
self.img_size = self.cnn.base_size()
self.graph = action_graph(self.cnn.segment_count(), self.num_graph_steps, self.gpu_mode)
self.verb_module = nn.Sequential(
#nn.ReLU(),
nn.Linear(self.embedding_size, self.embedding_size),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(self.embedding_size, self.num_verbs)
)
self.verb_module.apply(utils.init_weight)
'''self.parallel = parallel_table(self.embedding_size, self.num_verbs, self.num_roles)
self.role_graph_init_module = nn.Sequential(
self.parallel,
nn.ReLU()
)'''
self.verb_lookup_table = nn.Embedding(self.num_verbs, self.embedding_size)
#utils.init_weight(self.verb_lookup_table)
self.role_lookup_table = nn.Embedding(self.num_roles + 1, self.embedding_size, padding_idx=self.num_roles)
#utils.init_weight(self.role_lookup_table, pad_idx=self.num_roles)
self.verb_lookup_table.weight.clone().fill_(0)
self.role_lookup_table.weight.clone().fill_(0)
self.relu = nn.ReLU()
#nhid and dropout, user arg
#in GCN, they don't define #of nodes in init. they pass an adj matrix in forward.
self.role_graph = gcn.GCN(
nfeat=self.embedding_size,
nhid=1024,
nclass=1024,
dropout=0.5
)
'''self.lstm = nn.LSTM(self.embedding_size, self.embedding_size, num_layers=2, bidirectional=True)
utils.init_lstm(self.lstm)'''
self.role_module = nn.ModuleList([
nn.Sequential(nn.Linear(self.embedding_size, self.embedding_size), nn.ReLU(), nn.Dropout(.5),
nn.Linear(self.embedding_size, len(self.encoder.role2_label[role_cat])))
for role_cat in self.encoder.role_cat])
'''self.role_module = nn.Sequential(
nn.ReLU(),
nn.Linear(self.embedding_size, self.vocab_size),
#nn.ReLU()
)'''
#self.hidden = self.init_hidden()
self.role_module.apply(utils.init_weight)
def train_preprocess(self): return self.train_transform
def dev_preprocess(self): return self.dev_transform
def forward(self, img, verbs, roles, hidden=None):
#print('input size', im_data.size())
batch_size = img.size(0)
img_embedding_batch = self.cnn(img)
verb_init = img_embedding_batch[:,0]
#print('verb_init', verb_init.size(), torch.unsqueeze(verb_init, 1).size())
vert_init = img_embedding_batch
verb_init_expand = verb_init.expand(img_embedding_batch.size(1)-1, verb_init.size(0), verb_init.size(1))
verb_init_expand = verb_init_expand.clone().transpose(0,1)
edge_init = img_embedding_batch[:,1:] + verb_init_expand
vert_states = self.graph((vert_init,edge_init))
roles = roles.type(torch.LongTensor)
verbs = verbs.type(torch.LongTensor)
if self.gpu_mode >= 0:
roles = roles.to(torch.device('cuda'))
verbs = verbs.to(torch.device('cuda'))
role_embedding = self.role_lookup_table(roles)
verb_embedding = self.verb_lookup_table(verbs)
#mask = self.encoder.
#print('role embedding', role_embedding[0][3])
vert_no_verb = vert_states[:,1:]
#print('check :', vert_states.size(), vert_no_verb.size(), role_embedding.size())
verb_expand = verb_embedding.expand(self.max_role_count, verb_embedding.size(0),verb_embedding.size(-1))
verb_expand = verb_expand.transpose(1,0)
role_verb = verb_expand * role_embedding
role_mul = torch.matmul(role_verb, vert_no_verb.transpose(-2, -1))#torch.mul(role_embedding, vert_state_expanded)
#print('cat :', role_mul[0,-1])
role_mul = role_mul.masked_fill(role_mul == 0, -1e9)
p_attn = F.softmax(role_mul, dim = -1)
mask = self.encoder.apply_mask(roles, p_attn)
p_attn = mask * p_attn
att_weighted_role = torch.matmul(p_attn, vert_no_verb)
verb_predict = self.verb_module(vert_states[:,0])
#role_init_embedding = self.role_graph_init_module([img_embedding_batch, verbs, roles])
#print('role init: ', role_init_embedding.size())
#graph forward
#adjacency matrix for fully connected undirected graph
#set only available roles to 1. every verb doesn't have 6 roles.
adj_matrx = self.encoder.get_adj_matrix(verbs)
if self.gpu_mode >= 0:
adj_matrx = torch.autograd.Variable(adj_matrx.cuda())
else:
adj_matrx = torch.autograd.Variable(adj_matrx)
role_predict = self.role_graph(self.relu(att_weighted_role), adj_matrx)
for i,module in enumerate(self.role_module):
if i == 0:
role_label_predict = module(role_predict[:,i])
else:
role_label_predict = torch.cat((role_label_predict.clone(), module(role_predict[:,i])), 1)
#print('out from forward :', role_label_predict.size())
return verb_predict, role_label_predict
def calculate_loss(self, verb_pred, gt_verbs, role_label_pred, gt_labels):
verb_criterion = nn.CrossEntropyLoss()
target = gt_verbs
verb_loss = verb_criterion(verb_pred, target)
batch_size = verb_pred.size()[0]
loss = 0
start_idx = self.encoder.role_start_idx
end_idx = self.encoder.role_end_idx
for i in range(batch_size):
for index in range(gt_labels.size()[1]):
frame_loss = 0
for j in range(0, self.encoder.get_max_role_count()):
if j == 0:
frame_loss += utils.cross_entropy_loss(role_label_pred[i][start_idx[j]:end_idx[j]], gt_labels[i,index,j], len(self.encoder.role2_label['agent']))
elif j == 1:
frame_loss += utils.cross_entropy_loss(role_label_pred[i][start_idx[j]:end_idx[j]], gt_labels[i,index,j], len(self.encoder.role2_label['place']))
elif j == 2:
frame_loss += utils.cross_entropy_loss(role_label_pred[i][start_idx[j]:end_idx[j]], gt_labels[i,index,j], len(self.encoder.role2_label['tool']))
elif j == 3:
frame_loss += utils.cross_entropy_loss(role_label_pred[i][start_idx[j]:end_idx[j]], gt_labels[i,index,j], len(self.encoder.role2_label['item']))
else:
frame_loss += utils.cross_entropy_loss(role_label_pred[i][start_idx[j]:end_idx[j]], gt_labels[i,index,j], len(self.encoder.role2_label['other']))
loss += frame_loss/len(self.encoder.verb2_role_dict[self.encoder.verb_list[gt_verbs[i]]])
final_loss = verb_loss + loss/batch_size
#print('loss :', final_loss)
return final_loss
| [
"[email protected]"
] | |
be9640eae3af444d57216e80c1f052482168c13c | 70765f8ede8f2f652ecd092cc5569d66026863d3 | /gate.py | 5a090e62bc4ec5c415a4801d83ec8dccc79f8870 | [] | no_license | sfd198977/RefinementTest | 237e8fc46902d3b6380913c17380279ba0ba9e1f | b3ca4d855467857fd269075ad2c40e55f7b459aa | refs/heads/master | 2020-03-27T08:59:41.536047 | 2017-11-01T11:29:20 | 2017-11-01T11:29:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,669 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# Author: Donny You([email protected])
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.functional as F
import numpy as np
import torch.optim as optim
import math
class Downsampler(nn.Module):
def __init__(self):
super(Downsampler, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
def forward(self, x):
out, index = self.pool1(x)
return out, index
class NonBottleNeck(nn.Module):
def __init__(self, inplanes, outplanes, dilated_rate):
super(NonBottleNeck, self).__init__()
self.conv1_v = nn.Conv2d(inplanes, outplanes, kernel_size=(3, 1),
padding=(dilated_rate, 0), dilation=dilated_rate)
self.conv1_h = nn.Conv2d(outplanes, outplanes, kernel_size=(1, 3),
padding=(0, dilated_rate), dilation=dilated_rate)
self.bn1 = nn.BatchNorm2d(outplanes)
self.relu1_v = nn.ReLU(inplace=True)
self.relu1_h = nn.ReLU(inplace=True)
self.conv2_v = nn.Conv2d(outplanes, outplanes, kernel_size=(3, 1),
padding=(dilated_rate, 0), dilation=dilated_rate)
self.conv2_h = nn.Conv2d(outplanes, outplanes, kernel_size=(1, 3),
padding=(0, dilated_rate), dilation=dilated_rate)
self.bn2 = nn.BatchNorm2d(outplanes)
self.relu2_v = nn.ReLU(inplace=True)
self.relu3 = nn.ReLU(inplace=True)
def forward(self, x):
x_in = self.conv1_v(x)
x_in = self.relu1_v(x_in)
x_in = self.conv1_h(x_in)
x_in = self.relu1_h(x_in)
x_in = self.bn1(x_in)
x_in = self.conv2_v(x_in)
x_in = self.relu2_v(x_in)
x_in = self.conv2_h(x_in)
x_in = self.bn2(x_in)
out = x_in + x
out = self.relu3(out)
return out
class nxBottleNeck(nn.Module):
def __init__(self, planes, times):
super(nxBottleNeck, self).__init__()
self.times = times
self.bottleneck1 = NonBottleNeck(planes, planes, 1)
self.bottleneck2 = NonBottleNeck(planes, planes, 1)
self.bottleneck3 = NonBottleNeck(planes, planes, 1)
self.bottleneck4 = NonBottleNeck(planes, planes, 1)
self.bottleneck5 = NonBottleNeck(planes, planes, 1)
def forward(self, x):
for i in range(self.times):
x = self.bottleneck1(x)
return x
class CasBottleNeck(nn.Module):
def __init__(self, planes):
super(CasBottleNeck, self).__init__()
self.bottleneck1 = NonBottleNeck(planes, planes, 2)
self.bottleneck2 = NonBottleNeck(planes, planes, 4)
self.bottleneck3 = NonBottleNeck(planes, planes, 8)
self.bottleneck4 = NonBottleNeck(planes, planes, 16)
def forward(self, x):
x = self.bottleneck1(x)
x = self.bottleneck2(x)
x = self.bottleneck3(x)
x = self.bottleneck4(x)
return x
class DUC(nn.Module):
def __init__(self, inplanes, planes, upscale_factor=2):
super(DUC, self).__init__()
self.relu = nn.ReLU()
self.conv = nn.Conv2d(inplanes, planes*4, kernel_size=3,
padding=1)
self.bn = nn.BatchNorm2d(planes*4)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pixel_shuffle(x)
return x
class Deconv(nn.Module):
def __init__(self, inplanes, planes):
super(Deconv, self).__init__()
self.deconv = nn.ConvTranspose2d(inplanes, planes, 3, stride=2, padding=1, output_padding=1)
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
def forward(self, x):
x = self.deconv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Fusion(nn.Module):
def __init__(self, inplanes, planes):
super(Fusion, self).__init__()
self.deconv = nn.ConvTranspose2d(inplanes, planes/2, 3, stride=2, padding=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(planes/2)
self.relu1 = nn.ReLU()
self.duc = DUC(inplanes, planes/2)
self.conv = nn.Conv2d(planes, planes, 3, padding=1)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU()
def forward(self, x):
out1 = self.deconv(x)
out1 = self.bn1(out1)
out1 = self.relu1(out1)
out2 = self.duc(x)
out = torch.cat((out1, out2), 1)
out = self.conv(out)
out = self.bn2(out)
out = self.relu2(out)
return out
class UP(nn.Module):
def __init__(self, inplanes, planes):
super(UP, self).__init__()
self.unpool = nn.MaxUnpool2d(2, stride=2)
self.conv = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1)
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, indices):
outputs = self.unpool(x, indices = indices)
outputs = self.conv(outputs)
outputs = self.bn(outputs)
outputs = self.relu(outputs)
return outputs
class GaitUnit(nn.Module):
def __init__(self, h_planes, l_planes):
super(GaitUnit, self).__init__()
self.conv_h_1 = nn.Conv2d(h_planes, h_planes+l_planes, kernel_size=3, padding=1)
self.bn_h_1 = nn.BatchNorm2d(h_planes+l_planes)
self.relu_h_1 = nn.ReLU()
self.conv_l_1 = nn.Conv2d(l_planes, h_planes+l_planes, kernel_size=3, padding=1)
self.bn_l_1 = nn.BatchNorm2d(h_planes+l_planes)
self.relu_l_1 = nn.ReLU()
self.deconv_l_1 = Deconv(h_planes+l_planes, h_planes+l_planes)
def forward(self, h_x, l_x):
h_x = self.conv_h_1(h_x)
h_x = self.bn_h_1(h_x)
h_x = self.relu_h_1(h_x)
l_x = self.conv_l_1(l_x)
l_x = self.bn_l_1(l_x)
l_x = self.relu_l_1(l_x)
l_x = self.deconv_l_1(l_x)
x = h_x * l_x
return x
class RefineUnit(nn.Module):
def __init__(self, m_planes, r_planes, outplanes):
super(RefineUnit, self).__init__()
self.conv1 = nn.Conv2d(m_planes, r_planes, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(r_planes)
self.relu1 = nn.ReLU()
self.deconv1 = nn.ConvTranspose2d(r_planes*2, outplanes, 3, stride=2, padding=1, output_padding=1)
def forward(self, m_x, r_x):
m_x = self.conv1(m_x)
m_x = self.bn1(m_x)
m_x = self.relu1(m_x)
x = torch.cat((m_x, r_x), 1)
out = self.deconv1(x)
return out
class FCN(nn.Module):
def __init__(self, num_classes):
super(FCN, self).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.relu1 = nn.ReLU()
self.downsampler1 = Downsampler()
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU()
self.downsampler2 = Downsampler()
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.relu3 = nn.ReLU()
self.bottleneck_5 = nxBottleNeck(128, 5)
self.downsampler3 = Downsampler()
self.casbottleneck1 = CasBottleNeck(128)
self.casbottleneck2 = CasBottleNeck(128)
self.gaitunit1 = GaitUnit(64, 128)
self.gaitunit2 = GaitUnit(128, 128)
self.refineunit1 = RefineUnit(192, 6, 6)
self.refineunit2 = RefineUnit(256, 6, 6)
self.deconv1 = nn.ConvTranspose2d(128, self.num_classes, 3, stride=2, padding=1, output_padding=1)
# self.deconv1 = Deconv(128, 64)
self.bottleneck_21 = nxBottleNeck(64, 2)
self.bottleneck_22 = nxBottleNeck(32, 2)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x, indices1 = self.downsampler1(x)
x = self.conv2(x)
x = self.bn2(x)
x_1 = self.relu2(x)
x, indices2 = self.downsampler2(x_1)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x_2 = self.bottleneck_5(x)
x, indices3 = self.downsampler3(x_2)
x_3 = self.casbottleneck1(x)
x = self.casbottleneck2(x_3)
out1 = self.deconv1(x)
gait1 = self.gaitunit2(x_2, x_3)
out2 = self.refineunit2(gait1, out1)
gait2 = self.gaitunit1(x_1, x_2)
out3 = self.refineunit1(gait2, out2)
return out1, out2, out3
if __name__ == "__main__":
pass
| [
"[email protected]"
] | |
ebb69f7ee17d733cf4f31b161a6e6a4d3c547dc5 | 23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9 | /app/login/model/manager.py | 5528fcd8f4ce6716526ba111e2d8e93647621f0f | [] | no_license | Cuick/traversing | 210fcfb1c780037de59343fffeb4fa4d3f2eae32 | c78982580af7f63c8bff4dcb37005b7f7c682b5b | refs/heads/master | 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | # -*- coding:utf-8 -*-
"""
created by sphinx on 18/9/14.
"""
import os
import json
import time
account_cache = {}
class ServerManager(object):
def __init__(self):
self._servers = {}
self._is_static = False
if os.path.exists('server_list.json'):
sl = json.load(open('server_list.json'))
self._is_static = True
for _ in sl:
self._servers[_['name']] = _
print 'static server list:', self._servers
def sync_server(self, name, ip, port, status, no):
if not self._is_static:
server = dict(name=name, ip=ip, port=port, status=status, no=no)
for k, v in self._servers.items():
if v.get('name') == name:
del self._servers[k]
self._servers[time.time()] = server
return True
self._servers[time.time()] = server
def get_server(self):
if not self._is_static:
for t in self._servers.keys():
if time.time() - t > 180:
del self._servers[t]
return self._servers.values()
server_manager = ServerManager()
| [
"[email protected]"
] | |
63cbc8726893fed727dd884f838445cd7a2fd8e0 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /s3api_read_1/bucket-cor_get.py | 906435f85b7feabef6ef4ae7f3baf051c9a51d1e | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import read_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/get-bucket-cors.html
if __name__ == '__main__':
"""
delete-bucket-cors : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/delete-bucket-cors.html
put-bucket-cors : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/put-bucket-cors.html
"""
parameter_display_string = """
# bucket : The bucket name for which to get the cors configuration.
"""
add_option_dict = {}
#######################################################################
# setting option use
# ex: add_option_dict["setting_matching_parameter"] = "--owners"
# ex: add_option_dict["setting_key"] = "owner_id"
#######################################################################
# single parameter
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
read_one_parameter("s3api", "get-bucket-cors", "bucket", add_option_dict) | [
"[email protected]"
] | |
b34c147b369988436ae350599d980ba016babbb4 | dd65b9bc9475a6cc58817fd45c078e5a6abae241 | /Tensorflow/car/web-tf2/gcf-packs/tensorflow2.0/source/tensorflow/_api/v2/compat/v1/test/__init__.py | bb58509c61e8cafbbf2c339d984a1ae2bc2e3996 | [] | no_license | jumbokh/gcp_class | 5b68192ab4ad091362d89ad667c64443b3b095bb | 0a8e2663bfb5b01ce20146da178fa0c9bd7c6625 | refs/heads/master | 2021-10-22T09:22:04.634899 | 2021-10-21T12:46:10 | 2021-10-21T12:46:10 | 228,617,096 | 8 | 7 | null | 2021-08-25T15:55:30 | 2019-12-17T12:58:17 | Python | UTF-8 | Python | false | false | 1,438 | py | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Testing.
See the [Testing](https://tensorflow.org/api_guides/python/test) guide.
Note: `tf.test.mock` is an alias to the python `mock` or `unittest.mock`
depending on the python version.
"""
from __future__ import print_function as _print_function
from tensorflow.python.framework.test_util import TensorFlowTestCase as TestCase
from tensorflow.python.framework.test_util import assert_equal_graph_def_v1 as assert_equal_graph_def
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.framework.test_util import gpu_device_name
from tensorflow.python.framework.test_util import is_gpu_available
from tensorflow.python.ops.gradient_checker import compute_gradient
from tensorflow.python.ops.gradient_checker import compute_gradient_error
from tensorflow.python.platform.benchmark import TensorFlowBenchmark as Benchmark
from tensorflow.python.platform.benchmark import benchmark_config
from tensorflow.python.platform.googletest import StubOutForTesting
from tensorflow.python.platform.googletest import mock
from tensorflow.python.platform.test import get_temp_dir
from tensorflow.python.platform.test import is_built_with_cuda
from tensorflow.python.platform.test import main
from tensorflow.python.platform.test import test_src_dir_path
del _print_function
| [
"[email protected]"
] | |
bf244d1b65a6a4561dbd18468710c9bbcc05e83c | 3529ecaa44a53172094ba13498097057c8972723 | /Questiondir/655.print-binary-tree/655.print-binary-tree_112661405.py | c0358586c049dfc21db112211b47cf96f501e3f7 | [] | no_license | cczhong11/Leetcode-contest-code-downloader | 0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6 | db64a67869aae4f0e55e78b65a7e04f5bc2e671c | refs/heads/master | 2021-09-07T15:36:38.892742 | 2018-02-25T04:15:17 | 2018-02-25T04:15:17 | 118,612,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def ht(self, root):
if root is None:
return 0
return 1 + max(self.ht(root.left), self.ht(root.right))
def helper(self, root, level, start, end):
if root is None:
return
mid = (end + start) / 2
self.res[level][mid] = str(root.val)
self.helper(root.left, level+1, start, mid-1)
self.helper(root.right, level+1, mid+1, end)
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
h = self.ht(root)
w = 1
for i in range(1, h):
w = (w * 2 + 1)
self.res = [['' for _ in range(w)] for _ in range(h)]
self.helper(root, 0, 0, w-1)
return self.res
| [
"[email protected]"
] | |
3e08e5e55afb4c7621c582c8857f90a7fe2b2b29 | 0d24036dcf8736c0392a1ee1c2f3b45633221d8a | /etc/src/genpy-ipv4-ospf-oper/cisco_ios_xr_ipv4_ospf_oper/ospf/processes/process/default_vrf/areas/area/interface_briefs/interface_brief/ospf_sh_if_brief_pb2.py | 028ee948eb83579f5d6488f2d82242c9e7e73fc6 | [] | no_license | mspiez/telemetry_collector | c4b97c6686748fc20748898a25e9fc756d2d0b63 | 52ed12c06debfe04181f0bfea9854a66ed8bb3df | refs/heads/master | 2020-12-19T23:28:08.358956 | 2020-05-02T19:54:38 | 2020-05-02T19:54:38 | 235,883,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 15,817 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cisco_ios_xr_ipv4_ospf_oper/ospf/processes/process/default_vrf/areas/area/interface_briefs/interface_brief/ospf_sh_if_brief.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cisco_ios_xr_ipv4_ospf_oper/ospf/processes/process/default_vrf/areas/area/interface_briefs/interface_brief/ospf_sh_if_brief.proto',
package='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief',
syntax='proto3',
serialized_pb=_b('\n\x81\x01\x63isco_ios_xr_ipv4_ospf_oper/ospf/processes/process/default_vrf/areas/area/interface_briefs/interface_brief/ospf_sh_if_brief.proto\x12jcisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief\"V\n\x15ospf_sh_if_brief_KEYS\x12\x14\n\x0cprocess_name\x18\x01 \x01(\t\x12\x0f\n\x07\x61rea_id\x18\x02 \x01(\r\x12\x16\n\x0einterface_name\x18\x03 \x01(\t\"\xfc\x03\n\x10ospf_sh_if_brief\x12\x16\n\x0einterface_name\x18\x32 \x01(\t\x12\x16\n\x0einterface_area\x18\x33 \x01(\t\x12\x19\n\x11interface_address\x18\x34 \x01(\t\x12\x16\n\x0einterface_mask\x18\x35 \x01(\r\x12\x1b\n\x13interface_link_cost\x18\x36 \x01(\r\x12\x1c\n\x14ospf_interface_state\x18\x37 \x01(\t\x12\'\n\x1finterface_fast_detect_hold_down\x18\x38 \x01(\x08\x12 \n\x18interface_neighbor_count\x18\x39 \x01(\r\x12$\n\x1cinterface_adj_neighbor_count\x18: \x01(\r\x12\x18\n\x10interfaceis_madj\x18; \x01(\x08\x12\x1c\n\x14interface_madj_count\x18< \x01(\r\x12\xa0\x01\n\x13interface_madj_list\x18= \x03(\x0b\x32\x82\x01.cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_interface_madj\"\xc9\x01\n\x16ospf_sh_interface_madj\x12\x16\n\x0einterface_area\x18\x01 \x01(\t\x12\x14\n\x0cmadj_area_id\x18\x02 \x01(\r\x12 \n\x18interface_neighbor_count\x18\x03 \x01(\r\x12$\n\x1cinterface_adj_neighbor_count\x18\x04 \x01(\r\x12\x1b\n\x13interface_link_cost\x18\x05 \x01(\r\x12\x1c\n\x14ospf_interface_state\x18\x06 \x01(\tb\x06proto3')
)
_OSPF_SH_IF_BRIEF_KEYS = _descriptor.Descriptor(
name='ospf_sh_if_brief_KEYS',
full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief_KEYS',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='process_name', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief_KEYS.process_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='area_id', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief_KEYS.area_id', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_name', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief_KEYS.interface_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=242,
serialized_end=328,
)
_OSPF_SH_IF_BRIEF = _descriptor.Descriptor(
name='ospf_sh_if_brief',
full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='interface_name', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interface_name', index=0,
number=50, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_area', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interface_area', index=1,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_address', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interface_address', index=2,
number=52, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_mask', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interface_mask', index=3,
number=53, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_link_cost', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interface_link_cost', index=4,
number=54, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ospf_interface_state', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.ospf_interface_state', index=5,
number=55, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_fast_detect_hold_down', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interface_fast_detect_hold_down', index=6,
number=56, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_neighbor_count', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interface_neighbor_count', index=7,
number=57, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_adj_neighbor_count', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interface_adj_neighbor_count', index=8,
number=58, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interfaceis_madj', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interfaceis_madj', index=9,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_madj_count', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interface_madj_count', index=10,
number=60, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_madj_list', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief.interface_madj_list', index=11,
number=61, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=331,
serialized_end=839,
)
_OSPF_SH_INTERFACE_MADJ = _descriptor.Descriptor(
name='ospf_sh_interface_madj',
full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_interface_madj',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='interface_area', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_interface_madj.interface_area', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='madj_area_id', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_interface_madj.madj_area_id', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_neighbor_count', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_interface_madj.interface_neighbor_count', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_adj_neighbor_count', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_interface_madj.interface_adj_neighbor_count', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interface_link_cost', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_interface_madj.interface_link_cost', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ospf_interface_state', full_name='cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_interface_madj.ospf_interface_state', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=842,
serialized_end=1043,
)
_OSPF_SH_IF_BRIEF.fields_by_name['interface_madj_list'].message_type = _OSPF_SH_INTERFACE_MADJ
DESCRIPTOR.message_types_by_name['ospf_sh_if_brief_KEYS'] = _OSPF_SH_IF_BRIEF_KEYS
DESCRIPTOR.message_types_by_name['ospf_sh_if_brief'] = _OSPF_SH_IF_BRIEF
DESCRIPTOR.message_types_by_name['ospf_sh_interface_madj'] = _OSPF_SH_INTERFACE_MADJ
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ospf_sh_if_brief_KEYS = _reflection.GeneratedProtocolMessageType('ospf_sh_if_brief_KEYS', (_message.Message,), dict(
DESCRIPTOR = _OSPF_SH_IF_BRIEF_KEYS,
__module__ = 'cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief_KEYS)
))
_sym_db.RegisterMessage(ospf_sh_if_brief_KEYS)
ospf_sh_if_brief = _reflection.GeneratedProtocolMessageType('ospf_sh_if_brief', (_message.Message,), dict(
DESCRIPTOR = _OSPF_SH_IF_BRIEF,
__module__ = 'cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief)
))
_sym_db.RegisterMessage(ospf_sh_if_brief)
ospf_sh_interface_madj = _reflection.GeneratedProtocolMessageType('ospf_sh_interface_madj', (_message.Message,), dict(
DESCRIPTOR = _OSPF_SH_INTERFACE_MADJ,
__module__ = 'cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_if_brief_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.default_vrf.areas.area.interface_briefs.interface_brief.ospf_sh_interface_madj)
))
_sym_db.RegisterMessage(ospf_sh_interface_madj)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
901fc421da3ad2c486a8789dd60bf1740f10dabf | 8da76aabcf9cfea3478f56037edbb5fa1513140b | /tallsmall/production/tallsmall/account/.svn/text-base/models.py.svn-base | 0b5425bbac404be079a32c07f10089f307f8dc28 | [] | no_license | mikanyman/.virtualenvs-legacy | 039479f31f2ca9f9a3d3544d8837429ddd0a7492 | 5486128b5b3b7ddb9ec81d43e3bb601a23b4025a | refs/heads/master | 2020-12-31T07:10:07.018881 | 2017-02-01T02:16:55 | 2017-02-01T02:16:55 | 80,566,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,390 | """
#http://www.turnkeylinux.org/blog/django-profile
#http://docs.django-userena.org/en/latest/installation.html
from django.contrib.auth.models import User
from userena.models import UserenaLanguageBaseProfile
from django.db import models
#class UserProfile(models.Model):
class UserProfile(UserenaLanguageBaseProfile):
user = models.ForeignKey(User, unique=True)
url = models.URLField("Website", blank=True)
company = models.CharField(max_length=50, blank=True)
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
"""
# from django-userena demo_project
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from userena.models import UserenaLanguageBaseProfile
import datetime
class Profile(UserenaLanguageBaseProfile):
""" Default profile """
GENDER_CHOICES = (
(1, _('Male')),
(2, _('Female')),
)
user = models.OneToOneField(User,
unique=True,
verbose_name=_('user'),
related_name='profile')
gender = models.PositiveSmallIntegerField(_('gender'),
choices=GENDER_CHOICES,
blank=True,
null=True)
website = models.URLField(_('website'), blank=True, verify_exists=True)
location = models.CharField(_('location'), max_length=255, blank=True)
birth_date = models.DateField(_('birth date'), blank=True, null=True)
about_me = models.TextField(_('about me'), blank=True)
@property
def age(self):
if not self.birth_date: return False
else:
today = datetime.date.today()
# Raised when birth date is February 29 and the current year is not a
# leap year.
try:
birthday = self.birth_date.replace(year=today.year)
except ValueError:
day = today.day - 1 if today.day != 1 else today.day + 2
birthday = self.birth_date.replace(year=today.year, day=day)
if birthday > today: return today.year - self.birth_date.year - 1
else: return today.year - self.birth_date.year
| [
"[email protected]"
] | ||
e08a02cbe98519c266ea2415228cd8b1314f6563 | 7cd2c3868d83be96f2699eeed4f6f4ae9dbf3a35 | /programmers/DFSorBFS/타겟넘버.py | 3e62a03c143dbc24cf8dad5323f0f1826310106e | [] | no_license | Kimyechan/codingTestPractice | 4189e97f8543b9afc87374539acb5d1cecf40ce6 | c9d3878eb0d47fab22151fc0d39eef1dfd2210b5 | refs/heads/master | 2023-06-09T14:34:55.984710 | 2021-06-27T18:30:45 | 2021-06-27T18:30:45 | 282,819,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | import sys
import copy
sys.setrecursionlimit(100000)
operatorList = []
# def dfs(operators, count):
# if len(operators) == count:
# operatorList.append(copy.deepcopy(operators))
# return
#
# for x in [1, -1]:
# operators.append(x)
# dfs(operators, count)
# operators.pop()
#
#
# def solution(numbers, target):
# answer = 0
# count = len(numbers)
#
# operators = []
#
# dfs(operators, count)
# for operation in operatorList:
# result = 0
# for i in range(len(operation)):
# result += operation[i] * numbers[i]
# if result == target:
# answer += 1
#
# return answer
answer = 0
def dfs(numbers, target, sum, index):
global answer
if index == len(numbers):
if sum == target:
answer += 1
return
dfs(numbers, target, sum + numbers[index], index + 1)
dfs(numbers, target, sum - numbers[index], index + 1)
def solution(numbers, target):
dfs(numbers, target, 0, 0)
return answer
print(solution([1, 1, 1, 1, 1], 3)) | [
"[email protected]"
] | |
b583fa1b34037eec42586638fbb6f6bdc628533d | 41de4210af23a8a8a3ca7dd090bb51faecf4a0c8 | /lib/python3.5/site-packages/statsmodels/discrete/tests/results/results_discrete.py | 53d2caf897ce0a673bf33158ff925a0e447ee2f1 | [
"Python-2.0"
] | permissive | randybrown-github/ziplineMacOS | 42a0c2bfca2a54baa03d2803dc41317647811285 | eb5872c0903d653e19f259f0800fb7aecee0ee5c | refs/heads/master | 2022-11-07T15:51:39.808092 | 2020-06-18T20:06:42 | 2020-06-18T20:06:42 | 272,631,387 | 0 | 1 | null | 2022-11-02T03:21:45 | 2020-06-16T06:48:53 | Python | UTF-8 | Python | false | false | 51,708 | py | """
Test Results for discrete models from Stata
"""
import os
import numpy as np
#### Discrete Model Tests ####
# Note that there is a slight refactor of the classes, so that one dataset
# might be used for more than one model
cur_dir = os.path.abspath(os.path.dirname(__file__))
class Anes(object):
def __init__(self):
"""r
Results are from Stata 11 (checked vs R nnet package).
"""
self.nobs = 944
def mnlogit_basezero(self):
params = [-.01153598, .29771435, -.024945, .08249144, .00519655,
-.37340167, -.08875065, .39166864, -.02289784, .18104276,
.04787398, -2.2509132, -.1059667, .57345051, -.01485121,
-.00715242, .05757516, -3.6655835, -.0915567, 1.2787718,
-.00868135, .19982796, .08449838, -7.6138431, -.0932846,
1.3469616, -.01790407, .21693885, .08095841, -7.0604782,
-.14088069, 2.0700801, -.00943265, .3219257, .10889408,
-12.105751]
self.params = np.reshape(params, (6,-1), order='F')
bse = [.0342823657, .093626795, .0065248584, .0735865799,
.0176336937, .6298376313, .0391615553, .1082386919,
.0079144618, .0852893563, .0222809297, .7631899491,
.0570382292, .1585481337, .0113313133, .1262913234,
.0336142088, 1.156541492, .0437902764, .1288965854,
.0084187486, .0941250559, .0261963632, .9575809602,
.0393516553, .1171860107, .0076110152, .0850070091,
.0229760791, .8443638283, .042138047, .1434089089,
.0081338625, .0910979921, .025300888, 1.059954821]
self.bse = np.reshape(bse, (6,-1), order='F')
self.yhat = np.loadtxt(os.path.join(cur_dir,'yhat_mnlogit.csv'))
self.phat = np.loadtxt(os.path.join(cur_dir,'phat_mnlogit.csv'))
self.cov_params = None
self.llf = -1461.922747312
self.llnull = -1750.34670999
self.llr = 576.8479253554
self.llr_pvalue = 1.8223179e-102
self.prsquared = .1647810465387
self.df_model = 30
self.df_resid = 944 - 36
self.J = 7
self.K = 6
self.aic = 2995.84549462
self.bic = 3170.45003661
z = [-.3364988051, 3.179798597, -3.823070772, 1.121012042,
.2946945327, -.5928538661, -2.266269864, 3.618564069,
-2.893164162, 2.122688754, 2.148652536, -2.949348555,
-1.857818873, 3.616885888, -1.310634214, -.0566342868,
1.712822091, -3.169435381, -2.090799808, 9.920912816,
-1.031191864, 2.123004903, 3.225576554, -7.951122047,
-2.370538224, 11.49421878, -2.352389066, 2.552011323,
3.523595639, -8.361890935, -3.34331327, 14.43480847,
-1.159676452, 3.533839715, 4.303962885, -11.42100649]
self.z = np.reshape(z, (6,-1), order='F')
pvalues = [0.7364947525, 0.0014737744, 0.0001317999, 0.2622827367,
0.7682272401, 0.5532789548, 0.0234348654, 0.0002962422,
0.0038138191, 0.0337799420, 0.0316619538, 0.0031844460,
0.0631947400, 0.0002981687, 0.1899813744, 0.9548365214,
0.0867452747, 0.0015273542, 0.0365460134, 3.37654e-23,
0.3024508550, 0.0337534410, 0.0012571921, 1.84830e-15,
0.0177622072, 1.41051e-30, 0.0186532528, 0.0107103038,
0.0004257334, 6.17209e-17, 0.0008278439, 3.12513e-47,
0.2461805610, 0.0004095694, 0.0000167770, 3.28408e-30]
self.pvalues = np.reshape(pvalues, (6,-1), order='F')
conf_int = [[[-0.0787282, 0.0556562], [0.1142092, 0.4812195],
[-0.0377335, -0.0121565], [-0.0617356, 0.2267185], [-0.0293649,
0.0397580], [-1.6078610, 0.8610574]], [[-0.1655059, -0.0119954],
[0.1795247, 0.6038126], [-0.0384099, -0.0073858], [0.0138787,
0.3482068], [0.0042042, 0.0915438], [-3.7467380, -0.7550884]],
[[-0.2177596, 0.0058262], [0.2627019, 0.8841991], [-0.0370602,
0.0073578], [-0.2546789, 0.2403740], [-0.0083075, 0.1234578],
[-5.9323630,-1.3988040]],[[-0.1773841, -0.0057293], [1.0261390,
1.5314040], [-0.0251818, 0.0078191], [0.0153462, 0.3843097],
[0.0331544, 0.1358423], [-9.4906670, -5.7370190]], [[-0.1704124,
-0.0161568], [1.1172810, 1.5766420], [-0.0328214, -0.0029868],
[0.0503282, 0.3835495], [0.0359261, 0.1259907], [-8.7154010,
-5.4055560]], [[-0.2234697, -0.0582916], [1.7890040, 2.3511560],
[-0.0253747, 0.0065094], [0.1433769, 0.5004745], [0.0593053,
0.1584829], [-14.1832200, -10.0282800]]]
self.conf_int = np.asarray(conf_int)
# margins, dydx(*) predict(outcome(#))
self.margeff_dydx_overall = np.array([
[0.00868085993550, -0.09779854015456, 0.00272556969847,
-0.01992376579372, -0.00603133322764],
[0.00699386733148, -0.05022430802614, -0.00211003909752,
-0.00536980000265, -0.00554366741814],
[-0.00391040848820, -0.02824717135857, -0.00100551299310,
0.00664337806861, 0.00097987356999],
[-0.00182580888015, -0.00573744730031, -0.00004249256428,
-0.00546669558488, 0.00054101121854],
[-0.00098558129923, 0.01985550937033, 0.00047972250012,
0.00172605778905, 0.00211291403209],
[-0.00153469551647, 0.03755346502013, -0.00068531143399,
0.00472471794347, 0.00254733486106],
[-0.00741820702809, 0.12459834487569, 0.00063806819375,
0.01766610701188, 0.00539385283759]
]).T
self.margeff_dydx_overall_se = np.array([
[.0038581061, .0080471125, .0007068488, .0082318967, .0020261706],
[.003904378, .0073600286, .000756431, .0084381578, .0020482238],
[.003137126, .0056813182, .0006601377, .0068932588, .0018481806],
[.0019427783, .0031904763, .0003865411, .004361789, .0011523221],
[.0029863227, .0054076092, .0005886612, .0064426365, .0018886818],
[.0035806552, .0069497362, .000722511, .0078287717, .0022352393],
[.0033641608, .008376629, .0006774697, .0073505286, .0021660086]
]).T
self.margeff_dydx_mean = np.array([
[0.01149887431225, -0.13784207091973, 0.00273313385873,
-0.02542974260540, -0.00855346837482],
[0.01114846831102, -0.09864273512889, -0.00222435063712,
-0.01214617126321, -0.00903581444579],
[-0.00381702868421, -0.05132297961269, -0.00116763216994,
0.00624203027060, 0.00021912081810],
[-0.00233455327258, -0.00928554037343, -0.00000206561214,
-0.00775415690571, 0.00060004460394],
[-0.00352579921274, 0.06412187169362, 0.00073938948643,
0.00747778063206, 0.00459965010365],
[-0.00574308219449, 0.11126535089794, -0.00057337915464,
0.01467424346725, 0.00641760846097],
[-0.00722687818452, 0.12170608820238, 0.00049490419675,
0.01693601418978, 0.00575285798725]]).T
self.margeff_dydx_mean_se = np.array([
[.0043729758, .0110343353, .0008149907, .0092551389, .0023752071],
[.004875051, .0124746358, .0009613152, .0105665812, .0026524426],
[.0040718954, .0103613938, .0008554615, .0089931297, .0024374625],
[.0026430804, .0070845916, .0005364369, .0057654258, .0015988838],
[.0037798151, .0103849291, .0007393481, .0082021938, .0023489261],
[.0045654631, .0130329403, .0009128134, .0100053262, .0028048602],
[.0027682389, .0113292677, .0005325113, .0061289353, .0017330763]
]).T
self.margeff_dydx_dummy_overall = np.array([
[0.00549149574321, -0.05348235321783, 0.00298963549049,
-0.01479461677951, -0.00332167981255, -0.26502967041815],
[0.00345677928276, -0.00950322030929, -0.00189456107189,
0.00033893662061, -0.00314690167350, -0.21040878091828],
[-0.00645089013284, 0.00401746940204, -0.00083948249351,
0.01114202556889, 0.00277069841472, -0.15967397659686],
[-0.00215436802341, -0.00366545199370, -0.00000002297812,
-0.00457368049644, 0.00065303026027, -0.00094772782001],
[0.00058038428936, -0.00369080100124, 0.00035948233235,
-0.00018863693013, 0.00079351293461, 0.12640653743480],
[0.00217597030999, -0.01279456622853, -0.00091882392767,
0.00001651192759, -0.00037998290789, 0.27175070356670],
[-0.00309932483642, 0.07911868907484, 0.00030378521102,
0.00805941631677, 0.00263129901425, 0.23790291475181]]).T
self.margeff_dydx_dummy_overall_se = np.array([
[.0037314453, .0094102332, .000688838, .0079744554, .0019365971,
.0243914836],
[.0038215262, .0095938828, .0007410885, .008259353, .0019984087,
.0317628806],
[.0031045718, .00785814, .0006504353, .0067892866, .0018060332,
0.0262803561],
[.0019756086, .0051031194, .0003862449, .0043621673, .0011796953,
.0219999601],
[.0029714074, .0081732018, .0005715192, .0064742872, .0019130195,
.0331694192],
[.0034443743, .0097296187, .0006774867, .0075996454, .0021993881,
.038600835],
[.0032003518, .0098741227, .0006335772, .0070902078, .0021003227,
.0255727127]]).T
self.margeff_eydx_dummy_overall = np.array([
[.03939188, -.65758371, .01750922, -.12131806, -.03613241,
-3.2132513],
[.02752366, -.383165, -.00830021, -.03652935, -.03286046,
-1.8741853],
[-.05006681, -.2719659, -.00626481, .06525323, .01012554,
-2.0058029],
[-.05239558, -.22549142, .00025015, -.13104416, .01114517,
-.27052009],
[-.00296374, .25627809, .00140513, .03358712, .02296041,
1.3302701],
[.00328283, .2800168, -.0083912, .04332782, .01575863,
1.8441023],
[-.03257068, .98346111, -.00122118, .10847807, .0406456,
2.9119099]]).T
self.margeff_eydx_dummy_overall_se = np.array([
[.0272085605, .0777760394, .0052427952, .0584011446, .0148618012,
.5796921383],
[.0262290023, .0724479385, .005174736, .0567743614, .0144447083,
.3015738731],
[.0321415498, .0895589422, .0067480662, .0701460193, .0190451865,
.3904138447],
[.0511305319, .1420904068, .0102342163, .1129912244, .0308618233,
.3693799595],
[.0340186217, .0991711703, .0065812158, .0737441012, .0212966336,
.2346982385],
[.0289250212, .0840662279, .0056743561, .0631772185, .0177278895,
.2089516714],
[.0318251305, .1085637405, .0062400589, .0699123044, .0201045606,
.3727166284]]).T
# taken from gretl
self.resid = np.loadtxt(os.path.join(cur_dir,'mnlogit_resid.csv'),
delimiter=",")
class DiscreteL1(object):
def __init__(self):
"""
Special results for L1 models
Uses the Spector data and a script to generate the baseline results
"""
pass
def logit(self):
"""
Results generated with:
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = 3 * np.array([0, 1, 1, 1])
res2 = sm.Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
"""
nan = np.nan
self.params = [-4.10271595, 0., 0.15493781, 0.]
self.conf_int = [[-9.15205122, 0.94661932], [nan, nan],
[-0.06539482, 0.37527044], [ nan, nan]]
self.bse = [ 2.5762388 , nan, 0.11241668, nan]
self.nnz_params = 2
self.aic = 42.091439368583671
self.bic = 45.022911174183122
self.cov_params = [[ 6.63700638, nan, -0.28636261, nan],
[nan, nan, nan, nan], [-0.28636261, nan, 0.01263751, nan],
[nan, nan, nan, nan]]
def sweep(self):
"""
Results generated with
params = np.zeros((3, 4))
alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5], [0.5, 0.5, 1, 1]])
model = sm.Logit(data.endog, data.exog)
for i in range(3):
alpha = alphas[i, :]
res2 = model.fit_regularized(method="l1", alpha=alpha, disp=0, acc=1e-10,
maxiter=1000, trim_mode='off')
params[i, :] = res2.params
print params
"""
self.params = [[-10.37593611, 2.27080968, 0.06670638, 2.05723691],
[ -5.32670811, 1.18216019, 0.01402395, 1.45178712],
[ -3.92630318, 0.90126958, -0. , 1.09498178]]
def probit(self):
"""
Results generated with
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10])
res2 = sm.Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
"""
nan = np.nan
self.params = [-5.40476992, 1.25018458, 0.04744558, 0. ]
self.conf_int = [[-9.44077951, -1.36876033],
[ 0.03716721, 2.46320194],
[-0.09727571, 0.19216687],
[ np.nan, np.nan]]
self.bse = [ 2.05922641, 0.61889778, 0.07383875, np.nan]
self.nnz_params = 3
self.aic = 38.399773877542927
self.bic = 42.796981585942106
self.cov_params = [[ 4.24041339, -0.83432592, -0.06827915, nan],
[-0.83432592, 0.38303447, -0.01700249, nan],
[-0.06827915, -0.01700249, 0.00545216, nan],
[ nan, nan, nan, nan]]
def mnlogit(self):
"""
Results generated with
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
alpha[-1,:] = 0
mlogit_l1_res = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10)
"""
self.params = [[ 0.00100163, -0.05864195, -0.06147822, -0.04769671, -0.05222987,
-0.09522432],
[ 0. , 0.03186139, 0.12048999, 0.83211915, 0.92330292,
1.5680646 ],
[-0.0218185 , -0.01988066, -0.00808564, -0.00487463, -0.01400173,
-0.00562079],
[ 0. , 0.03306875, 0. , 0.02362861, 0.05486435,
0.14656966],
[ 0. , 0.04448213, 0.03252651, 0.07661761, 0.07265266,
0.0967758 ],
[ 0.90993803, -0.50081247, -2.08285102, -5.26132955, -4.86783179,
-9.31537963]]
self.conf_int = [[[ -0.0646223 , 0.06662556],
[ np.nan, np.nan],
[ -0.03405931, -0.00957768],
[ np.nan, np.nan],
[ np.nan, np.nan],
[ 0.26697895, 1.55289711]],
[[ -0.1337913 , 0.01650741],
[ -0.14477255, 0.20849532],
[ -0.03500303, -0.00475829],
[ -0.11406121, 0.18019871],
[ 0.00479741, 0.08416684],
[ -1.84626136, 0.84463642]],
[[ -0.17237962, 0.04942317],
[ -0.15146029, 0.39244026],
[ -0.02947379, 0.01330252],
[ np.nan, np.nan],
[ -0.02501483, 0.09006785],
[ -3.90379391, -0.26190812]],
[[ -0.12938296, 0.03398954],
[ 0.62612955, 1.03810876],
[ -0.02046322, 0.01071395],
[ -0.13738534, 0.18464256],
[ 0.03017236, 0.12306286],
[ -6.91227465, -3.61038444]],
[[ -0.12469773, 0.02023799],
[ 0.742564 , 1.10404183],
[ -0.02791975, -0.00008371],
[ -0.08491561, 0.19464431],
[ 0.0332926 , 0.11201273],
[ -6.29331126, -3.44235233]],
[[ -0.17165567, -0.01879296],
[ 1.33994079, 1.79618841],
[ -0.02027503, 0.00903345],
[ -0.00267819, 0.29581751],
[ 0.05343135, 0.14012026],
[-11.10419107, -7.52656819]]]
self.bse = [[ 0.03348221, 0.03834221, 0.05658338, 0.04167742, 0.03697408,
0.03899631],
[ np.nan, 0.09012101, 0.13875269, 0.10509867, 0.09221543,
0.11639184],
[ 0.00624543, 0.00771564, 0.01091253, 0.00795351, 0.00710116,
0.00747679],
[ np.nan, 0.07506769, np.nan, 0.08215148, 0.07131762,
0.07614826],
[ np.nan, 0.02024768, 0.02935837, 0.02369699, 0.02008204,
0.02211492],
[ 0.32804638, 0.68646613, 0.92906957, 0.84233441, 0.72729881,
0.91267567]]
self.nnz_params = 32
self.aic = 3019.4391360294126
self.bic = 3174.6431733460686
class Spector(object):
"""
Results are from Stata 11
"""
def __init__(self):
self.nobs = 32
def logit(self):
self.params = [2.82611297201, .0951576702557, 2.37868772835,
-13.0213483201]
self.cov_params = [[1.59502033639, -.036920566629, .427615725153,
-4.57347950298], [-.036920566629, .0200375937069,
.0149126464275, -.346255757562], [.427615725153 ,
.0149126464275, 1.13329715236, -2.35916128427],
[-4.57347950298, -.346255757562, -2.35916128427,
24.3179625937]]
self.bse = [1.26294114526, .141554207662, 1.06456430165, 4.93132462871]
self.resid_pearson = [-.1652382, -.2515266, -.4800059, -.1630655,
.8687437, -.1900454, -.165002, -.2331563,
-.3535812, .6647838, -.1583799, -.4843181,
-.689527, 2.043449, -.7516119, -.1764176,
-.2380445, -.2003426, -1.199277, .7164842,
-.255713, .3242821, -.5646816, -2.400189,
.4392082, 1.038473, .75747, -.6659256,
.4336657, .2404583, -1.060033, 2.829577]
self.resid_dev = [-.2321102, -.3502712, -.6439626, -.2290982,
1.060478, -.2663844, -.2317827, -.3253788, -.4853875,
.8555557, -.2225972, -.6491808, -.8819993, 1.813269,
-.9463985, -.247583, -.3320177, -.2805444, -1.335131,
.9103027, -.3559217, .4471892, -.744005, -1.955074,
.5939538, 1.209638, .952332, -.8567857, .5870719, .335292,
-1.227311, 2.096639]
# from gretl
self.resid_generalized = [-0.026578, -0.059501, -0.187260,
-0.025902, 0.430107, -0.034858, -0.026504, -0.051559,
-0.111127, 0.306489, -0.024470, -0.189997, -0.322240,
0.806789, -0.360990, -0.030184, -0.053626, -0.038588,
-0.589872, 0.339214, -0.061376, 0.095153, -0.241772,
-0.852091, 0.161709, 0.518867, 0.364579, -0.307219,
0.158296, 0.054660, -0.529117, 0.888969]
self.phat = np.array([ .02657799236476,
.05950126051903,
.18725991249084,
.02590163610876,
.56989300251007,
.03485824912786,
.02650404907763,
.05155897513032,
.11112663894892,
.69351142644882,
.02447037212551,
.18999740481377,
.32223951816559,
.1932111531496,
.36098992824554,
.03018374741077,
.05362640321255,
.03858831897378,
.58987241983414,
.66078591346741,
.06137581542134,
.90484726428986,
.24177247285843,
.85209089517593,
.8382905125618,
.48113295435905,
.63542068004608,
.30721867084503,
.84170418977737,
.94534027576447,
.52911710739136,
.1110308393836])
self.yhat = np.array([-3.6007342338562,
-2.7604126930237,
-1.4679137468338,
-3.6272060871124,
.28141465783119,
-3.3209850788116,
-3.6035962104797,
-2.9120934009552,
-2.0792844295502,
.81658720970154,
-3.6855175495148,
-1.4500269889832,
-.74349880218506,
-1.429278254509,
-.57107019424438,
-3.4698030948639,
-2.8705959320068,
-3.2154531478882,
.36343798041344,
.66679841279984,
-2.7273993492126,
2.2522828578949,
-1.1429864168167,
1.7510952949524,
1.6455633640289,
-.07550399750471,
.55554306507111,
-.81315463781357,
1.6709630489349,
2.8504176139832,
.11660042405128,
-2.0802545547485])
self.llf = -12.8896334653335
self.llnull = -20.5917296966173
self.df_model = 3
self.df_resid = 32 - 4 #TODO: is this right? not reported in stata
self.llr = 15.4041924625676
self.prsquared = .374038332124624
self.llr_pvalue = .00150187761112892
self.aic = 33.779266930667
self.bic = 39.642210541866
self.z = [2.237723415, 0.6722348408, 2.234423721, -2.640537645]
self.conf_int = [[.3507938,5.301432],[-.1822835,.3725988],[.29218,
4.465195],[-22.68657,-3.35613]]
self.pvalues = [.0252390974, .5014342039, .0254552063, .0082774596]
# taken from margins command
self.margeff_nodummy_dydx = [.36258084688424,.01220841099085,
.30517768382304]
self.margeff_nodummy_dydx_se = [.1094412, .0177942, .0923796]
self.margeff_nodummy_dydxmean = [.53385885781692,.01797548988961,
.44933926079386]
self.margeff_nodummy_dydxmean_se = [.237038, .0262369, .1967626]
self.margeff_nodummy_dydxmedian = [.25009492465091,.00842091261329,
.2105003352955]
self.margeff_nodummy_dydxmedian_se = [.1546708, .0134314, .0928183]
self.margeff_nodummy_dydxzero = [6.252993785e-06,2.105437138e-07,
5.263030788e-06]
self.margeff_nodummy_dydxzero_se = [.0000288, 9.24e-07, .000025]
self.margeff_nodummy_dyex = [1.1774000792198,.27896245178384,
.16960002159996]
self.margeff_nodummy_dyex_se = [.3616481, .4090679, .0635583]
self.margeff_nodummy_dyexmean = [1.6641381583512,.39433730945339,
.19658592659731]
self.margeff_nodummy_dyexmean_se = [.7388917, .5755722, .0860836]
#NOTE: PSI at median should be a NaN or 'omitted'
self.margeff_nodummy_dyexmedian = [.76654095836557,.18947053379898,0]
self.margeff_nodummy_dyexmedian_se = [ .4740659, .302207, 0]
#NOTE: all should be NaN
self.margeff_nodummy_dyexzero = [0,0,0]
self.margeff_nodummy_dyexzero_se = [0,0,0]
self.margeff_nodummy_eydx = [1.8546366266779,.06244722072812,
1.5610138123033]
self.margeff_nodummy_eydx_se = [.847903, .0930901, .7146715]
self.margeff_nodummy_eydxmean = [2.1116143062702,.0710998816585,
1.7773072368626]
self.margeff_nodummy_eydxmean_se = [ 1.076109, .1081501, .9120842]
self.margeff_nodummy_eydxmedian = [2.5488082240624,.0858205793373,
2.1452853812126]
self.margeff_nodummy_eydxmedian_se = [1.255377, .1283771, 1.106872]
self.margeff_nodummy_eydxzero = [2.8261067189993,.0951574597115,
2.3786824653103]
self.margeff_nodummy_eydxzero_se = [1.262961, .1415544, 1.064574]
self.margeff_nodummy_eyex = [5.4747106798973,1.3173389907576,
.44600395466634]
self.margeff_nodummy_eyex_se = [2.44682, 1.943525, .1567618]
self.margeff_nodummy_eyexmean = [6.5822977203268,1.5597536538833,
.77757191612739]
self.margeff_nodummy_eyexmean_se = [3.354433, 2.372543, .3990368]
self.margeff_nodummy_eyexmedian = [7.8120973525952,1.9309630350892,0]
self.margeff_nodummy_eyexmedian_se = [3.847731951, 2.888485089, 0]
self.margeff_nodummy_eyexzero = [0,0,0]
self.margeff_nodummy_eyexzero_se = [0,0,0]
# for below GPA = 2.0, psi = 1
self.margeff_nodummy_atexog1 = [.1456333017086,.00490359933927,
.12257689308426]
self.margeff_nodummy_atexog1_se = [.145633, .0111226, .1777101]
# for below GPA at mean, tuce = 21, psi = 0
self.margeff_nodummy_atexog2 = [.25105129214546,.00845311433473,
.2113052923675]
self.margeff_nodummy_atexog2_se = [.1735778, .012017, .0971515]
# must get this from older margeff or i.psi then margins
self.margeff_dummy_dydx = [.36258084688424,.01220841099085,
.35751515254729]
self.margeff_dummy_dydx_se = [.1094412, .0177942, .1420034]
self.margeff_dummy_dydxmean = [.53385885781692,.01797548988961,
.4564984096959]
self.margeff_dummy_dydxmean_se = [.237038, .0262369, .1810537]
#self.margeff_dummy_dydxmedian
# from margeff
self.margeff_dummy_count_dydx_median = [0.250110487483923,
0.008426867847905, 0.441897738279663]
self.margeff_dummy_count_dydx_median_se = [.1546736661, .0134551951,
.1792363708]
# estimate with i.psi for the below then use margins
self.margeff_dummy_eydx = [1.8546366266779,.06244722072812,
1.5549034398832]
self.margeff_dummy_eydx_se = [.847903, .0930901, .7283702]
# ie
# margins, eydx(*) at((mean) _all)
self.margeff_dummy_eydxmean = [2.1116143062702,.0710998816585,
1.6631775707188]
self.margeff_dummy_eydxmean_se = [1.076109, .1081501, .801205]
# Factor variables not allowed in below
# test raises
#self.margeff_dummy_dydxzero
#self.margeff_dummy_eydxmedian
#self.margeff_dummy_eydxzero
#self.margeff_dummy_dyex
#self.margeff_dummy_dyexmean
#self.margeff_dummy_dyexmedian
#self.margeff_dummy_dyexzero
#self.margeff_dummy_eyex
#self.margeff_count_dummy_dydx_median
#self.margeff_count_dummy_dydx_median_se
#NOTE: need old version of margeff for nodisc but at option is broken
# stata command is margeff, count nodisc
# this can be replicated with the new results by margeff
# and then using margins for the last value
self.margeff_count_dydx = [.3625767598018, .0122068569914, .3051777]
self.margeff_count_dydx_se = [.1094379569, .0177869773, .0923796]
# middle value taken from margeff rest from margins
self.margeff_count_dydxmean = [.5338588, 0.01797186545386,
.4493393 ]
self.margeff_count_dydxmean_se = [.237038, .0262211, .1967626]
# with new version of margeff this is just a call to
# margeff
# mat list e(margeff_b), nonames format(%17.16g)
self.margeff_count_dummy_dydxoverall = [.362576759801767,
.012206856991439, .357515163621704]
# AFAICT, an easy way to get se is
# mata
# V = st_matrix("e(margeff_V)")
# se = diagonal(cholesky(diag(V)))
# last SE taken from margins with i.psi, don't know how they
# don't know why margeff is different, but trust official results
self.margeff_count_dummy_dydxoverall_se = [.1094379569, .0177869773,
.1420034]
#.1574340751 ]
# from new margeff
self.margeff_count_dummy_dydxmean = [0.533849340033768,
0.017971865453858, 0.456498405282412]
self.margeff_count_dummy_dydxmean_se = [.2370202503, .0262210796,
.1810536852 ]
# for below GPA = 2.0, psi = 1
self.margeff_dummy_atexog1 = [.1456333017086,.00490359933927,
.0494715429937]
self.margeff_dummy_atexog1_se = [.145633, .0111226, .0731368]
# for below GPA at mean, tuce = 21, psi = 0
self.margeff_dummy_atexog2 = [.25105129214546,.00845311433473,
.44265645632553]
self.margeff_dummy_atexog2_se = [.1735778, .012017, .1811925]
#The test for the prediction table was taken from Gretl
#Gretl Output matched the Stata output here for params and SE
self.pred_table = np.array([[18, 3], [3, 8]])
def probit(self):
self.params = [1.62581025407, .051728948442, 1.42633236818,
-7.45232041607]
self.cov_params = [[.481472955383, -.01891350017, .105439226234,
-1.1696681354], [-.01891350017, .00703757594, .002471864882,
-.101172838897], [.105439226234, .002471864882, .354070126802,
-.594791776765], [-1.1696681354, -.101172838897, -.594791776765,
6.46416639958]]
self.bse = [.693882522754, .083890261293, .595037920474, 2.54247249731]
self.llf = -12.8188033249334
self.llnull = -20.5917296966173
self.df_model = 3
self.df_resid = 32 - 4
self.llr = 15.5458527433678
self.prsquared = .377478069409622
self.llr_pvalue = .00140489496775855
self.aic = 33.637606649867
self.bic = 39.500550261066
self.z = [ 2.343062695, .6166263836, 2.397044489, -2.931131182]
self.conf_int = [[.2658255,2.985795],[-.1126929,.2161508],[.2600795,
2.592585],[-12.43547,-2.469166]]
self.pvalues = [.0191261688, .537481188, .0165279168, .0033773013]
self.phat = [.0181707, .0530805, .1899263, .0185707, .5545748,
.0272331, .0185033, .0445714, .1088081, .6631207,
.0161024, .1935566, .3233282, .1951826, .3563406,
.0219654, .0456943, .0308513, .5934023, .6571863,
.0619288, .9045388, .2731908, .8474501, .8341947,
.488726, .6424073, .3286732, .8400168, .9522446,
.5399595, .123544]
self.yhat = np.array([-2.0930860042572,
-1.615691781044,
-.87816804647446,
-2.0842070579529,
.13722851872444,
-1.9231110811234,
-2.0856919288635,
-1.6999372243881,
-1.2328916788101,
.42099541425705,
-2.1418602466583,
-.86486464738846,
-.45841211080551,
-.85895526409149,
-.36825761198997,
-2.0147502422333,
-1.6881184577942,
-1.8684275150299,
.23630557954311,
.40479621291161,
-1.538782119751,
1.3078554868698,
-.60319095849991,
1.025558590889,
.97087496519089,
-.02826354466379,
.36490100622177,
-.44357979297638,
.99452745914459,
1.6670187711716,
.10033150017262,
-1.1574513912201])
self.resid_dev = [-.191509, -.3302762, -.6490455, -.1936247, 1.085867,
-.2349926, -.1932698, -.3019776, -.4799906, .9064196,
-.1801855, -.6559291, -.8838201, 1.807661, -.9387071,
-.2107617, -.3058469, -.2503485, -1.341589, .9162835,
-.3575735, .447951, -.7988633, -1.939208, .6021435,
1.196623, .9407793, -.8927477, .59048, .3128364,
-1.246147, 2.045071]
# Stata doesn't have it, but I think it's just oversight
self.resid_pearson = None
# generalized residuals from gretl
self.resid_generalized = [-0.045452, -0.114220, -0.334908,
-0.046321, 0.712624, -0.064538,
-0.046175, -0.098447, -0.209349,
0.550593, -0.040906, -0.340339,
-0.530763, 1.413373, -0.579170,
-0.053593, -0.100556, -0.071855,
-0.954156, 0.559294, -0.130167,
0.187523, -0.457597, -1.545643,
0.298511, 0.815964, 0.581013,
-0.538579, 0.289631, 0.104405,
-0.862836, 1.652638]
self.pred_table = np.array([[18, 3], [3, 8]])
class RandHIE(object):
"""
Results obtained from Stata 11
"""
def __init__(self):
self.nobs = 20190
def poisson(self):
self.params = [-.052535114675, -.247086797633, .035290201794,
-.03457750643, .271713973711, .033941474461, -.012635035534,
.054056326828, .206115121809, .700352877227]
self.cov_params = None
self.bse = [.00288398915279, .01061725196728, .00182833684966,
.00161284852954, .01223913844387, .00056476496963,
.00925061122826, .01530987068312, .02627928267502,
.01116266712362]
predict = np.loadtxt(os.path.join(cur_dir, 'yhat_poisson.csv'),
delimiter=",")
self.phat = predict[:,0]
self.yhat = predict[:,1]
self.llf = -62419.588535018
self.llnull = -66647.181687959
self.df_model = 9
self.df_resid = self.nobs - self.df_model - 1
self.llr = 8455.186305881856
self.prsquared = .0634324369893758
self.llr_pvalue = 0
self.aic = 124859.17707
self.bic = 124938.306497
self.z = [-18.21612769, -23.27219872, 19.30180524, -21.43878101,
22.20041672, 60.09840604, -1.36585953, 3.53081538, 7.84325525,
62.74063980]
self.conf_int = [[ -.0581876, -.0468826],[-0.2678962, -0.2262774],
[0.0317067, 0.0388737],[-0.0377386, -0.0314164],
[0.2477257, 0.2957022], [0.0328346, 0.0350484],[-0.0307659,
0.0054958], [0.0240495, 0.0840631],[0.1546087, 0.2576216],
[0.6784745, 0.7222313]]
self.pvalues = [3.84415e-74, 8.4800e-120, 5.18652e-83, 5.8116e-102,
3.4028e-109, 0, .1719830562, .0004142808, 4.39014e-15, 0]
# from stata
# use margins and put i. in front of dummies
self.margeff_dummy_overall = [-0.15027280560599, -0.66568074771099,
0.10094500919706, -0.09890639687842,
0.77721770295360, 0.09708707452600,
-0.03608195237609, 0.15804581481115,
0.65104087597053]
self.margeff_dummy_overall_se = [.008273103, .0269856266,
.0052466639, .0046317555, .0351582169, .0016652181,
.0263736472, .0457480115, .0913901155]
# just use margins
self.margeff_nodummy_overall = [-0.15027280560599, -0.70677348928158,
0.10094500919705, -0.09890639687842,
0.77721770295359, 0.09708707452600,
-0.03614158359367, 0.15462412033340,
0.58957704430148]
self.margeff_nodummy_overall_se = [.008273103, .0305119343,
.0052466639, .0046317555,
.0351582168, .0016652181,
.0264611158, .0437974779,
.0752099666]
# taken from gretl
self.resid = np.loadtxt(os.path.join(cur_dir,'poisson_resid.csv'),
delimiter=",")
def negativebinomial_nb2_bfgs(self):
# R 2.15.1 MASS 7.3-22 glm.nb()
self.params = [-0.0579469537244314,
-0.267787718814838, 0.0412060770911646, -0.0381376804392121,
0.268915772213171, 0.0381637446219235, -0.0441338846217674,
0.0172521803400544, 0.177960787443151,0.663556087183864,
# lnalpha from stata
1.292953339909746
]
# alpha and stderr from stata
self.lnalpha_std_err = .0143932
self.lnalpha = 0.256929012449
self.bse = [0.00607085853920512, 0.0226125368090765,
0.00405542008282773, 0.00344455937127785, 0.0298855063286547,
0.00142421904710063, 0.0199374393307107, 0.0358416931939136,
0.0741013728607101, 0.0250354082637892,
# from stata
.0186098
]
self.z = [-9.54510030998327, -11.8424447940467,
10.1607419822296, -11.071860382846, 8.99820030672628,
26.7962605187844, -2.21361850384595, 0.481343898758222,
2.40158556546135, 26.5047040652267]
self.pvalues = [1.35975947860026e-21,
2.35486776488278e-32, 2.96808970292151e-24,
1.71796558863781e-28, 2.2944789508802e-19,
3.57231639404726e-158, 0.0268550333379416, 0.630272102021494,
0.0163241908407114, 8.55476622951356e-155]
self.fittedvalues = [0.892904166867786, 0.892904166867786, 0.892904166867786,
0.892904166867786, 0.892904166867786, 0.937038051489553,
0.937038051489553, 0.937038051489553, 0.937038051489553,
0.937038051489553]
#self.aic = 86789.3241530713 # This is what R reports
self.aic = 86789.32415307125484 # from Stata
self.df_resid = 20180
self.df_model = 9
# R conf_int: 1.96 * bse, not profile likelihood via R's confint()
self.conf_int = [
# from Stata
[-.0698826, -.0460113],
[-.3122654, -.2233101],
[ .0330781, .049334],
[-.0448006, -.0314748],
[ .2102246, .3276069],
[ .0352959, .0410316],
[-.0834356, -.0048321],
[-.0535908, .0880951],
[ .0324115, .3235101],
[ .6150055, .7121067],
# from Stata
[ 1.256989, 1.329947]
]
self.bic = 86876.36652289562335 # stata
self.llnull = -44199.27443563430279 # stata
self.llr = 1631.224718197351 # stata
self.llf = -43383.66207653563 # stata
self.df_model = 9.0
self.llr_pvalue = 0.0
def negativebinomial_nb1_bfgs(self):
# Unpublished implementation intended for R's COUNT package. Sent by
# J.Hilbe (of Cambridge UP NBin book) and Andrew Robinson to Vincent
# Arel-Bundock on 2012-12-06.
#self.params = [-0.065309744899923, -0.296016207412261,
# 0.0411724098925173, -0.0320460533573259, 0.19083354227553,
# 0.0318566232844115, -0.0331972813313092, -0.0484691550721231,
# 0.111971860837541, 0.757560143822609,
# 3.73086958562569]
# from Stata
self.params = [-.065317260803762961, -.296023807893893376,
.041187021258044826, -.032028789543547605,
.19065933246421754, .031871625115758778,
-.033250849053302826, -.04850769174426571,
.111813637465757343, .757277086555503409,
3.731151380800305]
# lnalpha and lnalpha_std_err are from stata
self.lnalpha = 1.316716867203
self.lnalpha_std_err = .0168876692
self.bse = [0.00536019929563678,
0.0196998350459769, 0.00335779098766272, 0.00301145915122889,
0.0237984097096245, 0.00107360844112751, 0.0167174614755359,
0.0298037989274781, 0.0546838603596457,0.0214703279904911,
0.0630011409376052]
self.z = [-12.1842008660173, -15.0263292419148,
12.2617548393554, -10.6413707601675, 8.0187518663633,
29.6724784046551, -1.98578482623631, -1.62627439508848,
2.04762173155154, 35.2840508145997,
# From R, this is alpha/bse(alpha)
59.2190796881069
# taken from Stata even though they don't report it
# lnalpha/bse(lnalpha)
#77.968995
]
self.conf_int = [
[-0.075815736,-0.0548037543],
[-0.334627884,-0.2574045307],
[ 0.034591140, 0.0477536802],
[-0.037948513,-0.0261435934],
[ 0.144188659, 0.2374784253],
[ 0.029752351, 0.0339608958],
[-0.065963506,-0.0004310568],
[-0.106884601, 0.0099462908],
[ 0.004791495, 0.2191522271],
[ 3.607387349, 3.8543518219],
[ 0.715478301, 0.7996419867]]
# from Stata
self.llf = -43278.75612911823
self.llnull = -44199.2744356343
self.llr = 1841.036613032149
self.aic = 86579.51225823645655
self.bic = 86666.55462806082505
self.llr_pvalue = 0.0
self.df_model = 9.0
self.df_resid = 20180.0
# Smoke tests TODO: check against other stats package
self.pvalues = [3.65557865e-034, 5.24431864e-051,
1.42921171e-034, 2.09797259e-026, 1.15949461e-015,
1.56785415e-193, 4.71746349e-002, 1.04731854e-001,
4.07534831e-002, 1.95504975e-272, 0.00000000e+000]
self.conf_int = [[-.0758236, -.054811],
[-.3346363, -.2574113],
[ .0346053, .0477687],
[-.0379314, -.0261261],
[ .1440119, .2373067],
[ .0297667, .0339766],
[-.0660178, -.0004839],
[-.1069241, .0099087],
[ .0046266, .2190007],
[ .7151889, .7993652],
# from stata for alpha no lnalpha
[ 3.609675, 3.856716]]
#[ 1.28360034e+00, 1.34979803e+00]]
self.fittedvalues = [ 0.8487497 , 0.8487497 , 0.8487497 , 0.8487497,
0.8487497 , 0.88201746, 0.88201746, 0.88201746, 0.88201746,
0.88201746]
def negativebinomial_geometric_bfgs(self):
# Smoke tests TODO: Cross check with other stats package
self.params = [-0.05768894, -0.26646696, 0.04088528, -0.03795503,
0.26885821, 0.03802523, -0.04308456, 0.01931675, 0.18051684,
0.66469896]
self.bse = [ 0.00553867, 0.02061988, 0.00375937, 0.0030924 ,
0.02701658, 0.00132201, 0.01821646, 0.03271784, 0.06666231,
0.02250053]
self.pvalues = [ 2.10310916e-025, 3.34666368e-038, 1.50697768e-027,
1.25468406e-034, 2.48155744e-023, 6.18745348e-182,
1.80230194e-002, 5.54919603e-001, 6.77044178e-003,
8.44913440e-192]
self.z = [-10.41567024, -12.92281571, 10.8755779 , -12.27364916,
9.95160202, 28.76323587, -2.36514487, 0.59040434,
2.70792943, 29.54148082]
self.aic = 87101.159433012392 # old value 87101.160011780419
self.bic = 87180.288860125467 # old value 87180.289438893495
self.df_model = 9.0
self.df_resid = 20180.0
self.llf = -43540.58000589021
self.llnull = -44586.650971362695 # old value -44199.27443567125
self.llr = 2092.1425097129977 # old value 1317.3888595620811
self.llr_pvalue = 0 # old value 5.4288002863296022e-278
self.fittedvalues = [ 0.89348994, 0.89348994, 0.89348994,
0.89348994, 0.89348994, 0.9365745 , 0.9365745 , 0.9365745 ,
0.9365745 , 0.9365745 ]
self.conf_int = [[-0.06854453, -0.04683335],
[-0.30688118, -0.22605273],
[ 0.03351706, 0.04825351],
[-0.04401602, -0.03189404],
[ 0.21590669, 0.32180972],
[ 0.03543415, 0.04061632],
[-0.07878816, -0.00738096],
[-0.04480903, 0.08344253],
[ 0.04986111, 0.31117258],
[ 0.62059873, 0.70879919]]
def generalizedpoisson_gp2(self):
# Stata gnpoisson function
self.llf = -43326.42720093228
self.params = [-0.0604495342, -0.277717228, 0.0438136144,
-0.0395811744, 0.273044906, 0.0399108677, -0.0552626543,
-0.001227569488, 0.151980519, 0.651125316, 0.448085318
]
self.lnalpha_std_err = 0.0125607
self.lnalpha = -0.8027716
self.bse = [0.00634704, 0.02381906, 0.00443871, 0.00355094,
0.0334247, 0.00166303, 0.02102142, 0.0390845,
0.087821,0.02626823, 0.00562825
]
self.df_model = 9
self.aic = 86674.854401865
self.conf_int = [
[-0.07288951, -0.04800956],
[-0.32440173, -0.23103272],
[ 0.03511389, 0.05251333],
[-0.04654088, -0.03262147],
[ 0.20753371, 0.33855610],
[ 0.03665139, 0.04317034],
[-0.09646387, -0.01406144],
[-0.07783191, 0.07537652],
[-0.02014548, 0.32410651],
[ 0.59964053, 0.70261011],
[ 0.43718883, 0.45925338]
]
self.bic = 86761.896771689
self.wald_pvalue = 4.8795019354e-254
self.wald_statistic = 1206.46339591254
def zero_inflated_poisson_logit(self):
self.params = [.1033783, -1.045983, -.0821979, .0085692,
-.0267957, 1.482363]
self.llf = -57005.72199826186
self.bse = [0.0079912, 0.02235510, .0107145, 0.0018697,
0.0014121, 0.0085915]
self.conf_int = [[ 0.0877159, 0.1190408],
[-1.089798, -1.002167],
[-0.1031979, -0.061198],
[ 0.0049045, 0.0122338],
[-0.0295635, -0.024028],
[ 1.465524, 1.499202]]
self.aic = 114023.444
self.bic = 114070.9
def zero_inflated_poisson_probit(self):
self.params = [.0622534, -.6429324, -.0821788, .0085673,
-.0267952, 1.482369]
self.llf = -57006.05
self.bse = [.0048228, .0132516, .0107142, .0018697,
.0014121, .0085913]
self.conf_int = [[ 0.0528009, .0717058],
[-0.6689051, -.6169597],
[-0.1031783, -.0611793],
[ 0.0049027, .0122319],
[-0.0295629, -.0240275],
[ 1.46553, 1.499208]]
self.aic = 114024.1
self.bic = 114071.6
def zero_inflated_poisson_offset(self):
self.params = [.1052014, -1.082434, -.0922822, .0115868,
-.0283842, 1.347514]
self.llf = -58207.67
self.bse = [.0081836, .0230043, .0107788, .0018687,
.0014162, .0086309]
self.conf_int = [[ .0891619, .1212409],
[-1.127522, -1.037347],
[-.1134082, -.0711561],
[ .0079242, .0152494],
[-.0311599, -.0256085],
[ 1.330598, 1.36443]]
self.aic = 116427.3
self.bic = 116474.8
def zero_inflated_generalized_poisson(self):
self.params = [3.57337, -17.95797, -0.21380, 0.03847,
-0.05348, 1.15666, 1.36468]
self.llf = -43630.6
self.bse = [1.66109, 7.62052, 0.02066, 0.00339,
0.00289, 0.01680, 0.01606]
self.aic = 87275
def zero_inflated_negative_binomial(self):
self.params = [1.883859, -10.280888, -0.204769,
1.137985, 1.344457]
self.llf = -44077.91
self.bse = [0.3653, 1.6694, 0.02178, 0.01163, 0.0217496]
self.aic = 88165.81 | [
"[email protected]"
] | |
d4931041ad734c7569f7175eb78f7c7d99cff6c0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/322/103061/submittedfiles/principal.py | 80de3f765d209fcbcedb9c1411a4cfa34c140606 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
i=0
while (i<10):
i+=1
print(i)
| [
"[email protected]"
] | |
0f054b6e23fbaf3c471d841c202f72d1f2244345 | 62c523b000e43b41bcb2bc96259f2e0136e8548f | /src/data/data.py | 703cef216a5a30549d909773faa9f1b5d06e41ca | [] | no_license | OlofHarrysson/pytorch-foundation | d7e0bd6d4bdf52bcf65d0c6e370e8e78ee0219cd | 6632e5260302669c458dde28be47c03fed052e53 | refs/heads/master | 2021-10-07T22:46:05.674105 | 2021-01-27T19:20:22 | 2021-01-27T19:20:22 | 212,659,773 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | from torchvision import datasets
from torch.utils.data import DataLoader
from collections import namedtuple
from anyfig import get_config
from ..transforms import get_train_transforms, get_val_transforms
from ..utils.meta_utils import get_project_root
def setup_dataloaders():
dataloaders = namedtuple('Dataloaders', ['train', 'val'])
return dataloaders(train=setup_trainloader(), val=setup_valloader())
def setup_trainloader():
transforms = get_train_transforms()
dataset_dir = get_project_root() / 'datasets'
dataset = MyCifar10(dataset_dir, transforms, train=True)
return DataLoader(dataset,
batch_size=get_config().batch_size,
num_workers=get_config().num_workers,
shuffle=True)
def setup_valloader():
transforms = get_val_transforms()
dataset_dir = get_project_root() / 'datasets'
dataset = MyCifar10(dataset_dir, transforms, train=False)
return DataLoader(dataset,
batch_size=get_config().batch_size,
num_workers=get_config().num_workers)
class MyCifar10(datasets.CIFAR10):
def __init__(self, path, transforms, train=True):
super().__init__(path, train, download=True)
self.transforms = transforms
def __getitem__(self, index):
im, label = super().__getitem__(index)
return self.transforms(im), label
| [
"[email protected]"
] | |
51b475ef145277d90a9da2e2b9fd07047a6f501b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/classification/LResNet100E-IR/prof.py | 7c2e0f01a90ddcd74354f146f728d0b9b6cc1964 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,277 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import argparse
from tqdm import tqdm
import torch
from torch import nn
from torch import optim
import apex
from apex import amp
from model import Backbone, Arcface
from utils import separate_bn_paras
def get_data(args):
x = torch.rand((args.batch, 3, 112, 112), dtype=torch.float32)
y = torch.randint(2, (args.batch,), dtype=torch.long)
return x, y
class NewModel(nn.Module):
def __init__(self):
super(NewModel, self).__init__()
self.backbone = Backbone(num_layers=100, drop_ratio=0.6, mode='ir_se')
self.head = Arcface(embedding_size=512, classnum=85742)
def forward(self, images, labels):
embeddings = self.backbone(images)
thetas = self.head(embeddings, labels)
return thetas
def prepare_args():
parser = argparse.ArgumentParser(description='get prof')
parser.add_argument("-device", help="device", default='cuda:0', type=str)
parser.add_argument("-amp", help="use amp", default=True, type=str)
parser.add_argument("-batch", help="batch size", default=256, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
# 640.982ms
args = prepare_args()
device = torch.device(args.device)
if 'npu' in args.device:
torch.npu.set_device(device)
else:
torch.cuda.set_device(device)
# model
model = NewModel()
model = model.to(device)
print('model head create over ')
# optimizer
paras_only_bn, paras_wo_bn = separate_bn_paras(model.backbone)
if 'npu' in args.device and args.amp:
optimizer = apex.optimizers.NpuFusedSGD([
{'params': paras_wo_bn + [model.head.kernel], 'weight_decay': 5e-4},
{'params': paras_only_bn}
], lr=0.001, momentum=0.9)
else:
optimizer = optim.SGD([
{'params': paras_wo_bn + [model.head.kernel], 'weight_decay': 5e-4},
{'params': paras_only_bn}
], lr=0.001, momentum=0.9)
print('optimizer create over')
# loss function
loss_func = nn.CrossEntropyLoss().to(device)
# amp setting
if 'npu' in args.device and args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=128.0, combine_grad=True)
elif 'cuda' in args.device and args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=128.0)
print('start warm up train')
# warm up train
for _ in tqdm(range(5)):
imgs, labels = get_data(args)
imgs = imgs.to(device)
labels = labels.to(device)
thetas = model(imgs, labels)
loss = loss_func(thetas, labels)
optimizer.zero_grad()
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
print('start get prof')
# get prof
if "npu" in args.device:
k_v = {'use_npu': True}
else:
k_v = {'use_cuda': True}
with torch.autograd.profiler.profile(**k_v) as prof:
imgs, labels = get_data(args)
imgs = imgs.to(device)
labels = labels.to(device)
thetas = model(imgs, labels)
loss = loss_func(thetas, labels)
optimizer.zero_grad()
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
# print(prof.key_averages().table(sort_by="self_cpu_time_total"))
prof.export_chrome_trace("output.prof") # "output.prof"为输出文件地址
| [
"[email protected]"
] | |
f676137f1b3df9dd44c03457f8de846a6d8ac76e | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/4187084/snippet.py | 47fc8edb16f6917914298e0cefd3eb24d7a13d52 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 2,729 | py | # Name: EXIFmover.py
# Author: Brian Klug (@nerdtalker / [email protected])
# Purpose:
# Move Files into directory based on EXIF data make and model
# Designed to un-clusterfuck the Dropbox camera upload directory which is a mess of every
# JPEG and PNG ever if you use it like I do on a bunch of phones, and thus totally unwieldy
# and full of images sorted by date or else nothing sometimes, dropbox seems nondeterminstic
# Moves files into /[Image Make]+[Image Model]/ eg /Camera Uploads/LGE Nexus 4/
# Creates directory if it doesn't exist, moves into that directory if it exists
# Files without EXIF get moved into /nomake nomodel (EG screenshots / nonsense) except exifmover/exif.py
# This is experimental and one-way in a destructive sense, I take no responsibility
# if this absolutely destroys your directory structure for some reason
# I STRONGLY recommend making a copy of Camera Uploads, then running this on the copy, first
# Requires EXIF-PY to be installed and importable
# EXIF-PY can be obtained from https://github.com/ianare/exif-py
# Previous implementation used EXIF.py standalone, updated to work with installable version
# Run simply (eg from ipython "run exifmover.py" inside "Camera Upload")
# Tested on OS 10.8.2 and Python 2.7.3 EPD
# Tested on Windows XP and Python 2.7.3 EPD
# Tested on Ubuntu 11.10
try:
import exifread
except:
print "exifread was not found in the same directory as exifmover.py"
import os
import time
start_time=time.time()
path = os.getcwd()
dirList=os.listdir(path)
excludedfiles = ["EXIF.py","EXIFmover.py","exifmover.py","thumbs.db",".DS_Store","EXIF.pyc"]
for fname in dirList:
if os.path.isfile(fname):
if fname not in excludedfiles:
print "File name is " + fname
f = open(fname)
try:
tags = exifread.process_file(f)
except:
print "Couldn't read tag on " + fname
try:
make = tags['Image Make'].printable
except: make = 'nomake'
try:
model = tags['Image Model'].printable
except: model = 'nomodel'
src = path + "/" + fname
#print "source is " + src
dst = path + "/" + make + " " + model + "/"
#print "destination is " + dst
if os.path.isdir(dst) == False:
os.mkdir(dst)
#print "made" + dst
destination = dst+fname
f.close()
try:
os.rename(src,destination)
except:
print "Oh noes. That didn't work for some reason"
print 'Done. Execution took {:0.3f} seconds'.format((time.time() - start_time))
| [
"[email protected]"
] | |
6cc8562d0421b4e61baf344ba161216a74b334cb | e802ed36cbfb55b87654b8aa7932ae2fc2ae7d43 | /u05ps02q03.py | 46947989451ecf5e42a777c1666b562aa1a090d4 | [] | no_license | maryammouse/cs101 | 29f493ab421117fb9bc038da4de7c5bdc29ca7ac | 6c15855c7cdc24972a0ff370d417c5de8278ce9c | refs/heads/master | 2016-09-01T22:34:16.016963 | 2014-05-26T21:54:49 | 2014-05-26T21:54:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | # Write a procedure, rotate which takes as its input a string of lower case
# letters, a-z, and spaces, and an integer n, and returns the string constructed
# by shifting each of the letters n steps, and leaving the spaces unchanged.
# Note that 'a' follows 'z'. You can use an additional procedure if you
# choose to as long as rotate returns the correct string.
# Note that n can be positive, negative or zero.
# I love being able to use code I've defined already! Love not
# starting from scratch :)
def shift_n_letters(letter, n):
if ord(letter) < 97: # just had to make
return ' ' # this one change
if ord(letter) + n > 122:
n = ord(letter) + n - 122
return chr(96 + n)
elif ord(letter) + n < 97:
n = 97 - (ord(letter) + n)
return chr(123 - n)
return chr(ord(letter) + n)
def rotate(word, n):
rotated = ''
for letter in word:
rotated += shift_n_letters(letter, n)
return rotated
print 'coralee' + 'sings'
print rotate ('sarah', 13)
#>>> 'fnenu'
print rotate('fnenu',13)
#>>> 'sarah'
print rotate('dave',5)
#>>>'ifaj'
print rotate('ifaj',-5)
#>>>'dave'
print rotate(("zw pfli tfuv nfibj tfiivtkcp pfl jyflcu "
"sv rscv kf ivru kyzj"),-17)
#>>> ???
| [
"[email protected]"
] | |
fbee9da62d0a7ff55bff70fe12034d97b7805070 | 55ceefc747e19cdf853e329dba06723a44a42623 | /_CodeTopics/LeetCode_contest/weekly/weekly2021/248-[smallweek]/248_1.py | 0810c9fe1f89962d294e01c337639653707396fc | [] | no_license | BIAOXYZ/variousCodes | 6c04f3e257dbf87cbe73c98c72aaa384fc033690 | ee59b82125f100970c842d5e1245287c484d6649 | refs/heads/master | 2023-09-04T10:01:31.998311 | 2023-08-26T19:44:39 | 2023-08-26T19:44:39 | 152,967,312 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | class Solution(object):
def buildArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n = len(nums)
res = [-1] * n
for i in range(n):
res[i] = nums[nums[i]]
return res
"""
https://leetcode-cn.com/submissions/detail/192094213/
134 / 134 个通过测试用例
状态:通过
执行用时: 32 ms
内存消耗: 13.1 MB
"""
| [
"[email protected]"
] | |
71b966c7a8456e8f3cde13f79f2955b28d5a2c91 | d9b5fc6e35e56e182fe1bfe9bafd2562a5d9cf33 | /bluefly/areadetector_sim.py | 9e8a4cac9d449257fc4dc3b75dcff4ded04a82b3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | untzag/bluefly | 6406ef19b573c05a52bcd6cc53c27c5db1ca5cdf | 5f461998a3f629a5f07e8733ab937a0302fa92f6 | refs/heads/master | 2022-12-16T13:52:30.621420 | 2020-09-17T11:12:06 | 2020-09-17T11:12:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,981 | py | import asyncio
import h5py
import numpy as np
from bluesky.run_engine import get_bluesky_event_loop
from bluefly.areadetector import DetectorDriver, HDFWriter
from bluefly.motor import MotorDevice
from bluefly.simprovider import SimProvider
def make_gaussian_blob(width: int, height: int) -> np.ndarray:
"""Make a Gaussian Blob with float values in range 0..1"""
x, y = np.meshgrid(np.linspace(-1, 1, width), np.linspace(-1, 1, height))
d = np.sqrt(x * x + y * y)
blob = np.exp(-(d ** 2))
return blob
def interesting_pattern(x: float, y: float) -> float:
"""This function is interesting in x and y in range -10..10, returning
a float value in range 0..1
"""
z = 0.5 + (np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)) / 2
return z
DATA_PATH = "/entry/data/data"
UID_PATH = "/entry/uid"
SUM_PATH = "/entry/sum"
def sim_detector_logic(
p: SimProvider,
driver: DetectorDriver,
hdf: HDFWriter,
x: MotorDevice,
y: MotorDevice,
width: int = 320,
height: int = 240,
):
stopping = asyncio.Event(loop=get_bluesky_event_loop())
# The detector image we will modify for each image (0..255 range)
blob = make_gaussian_blob(width, height) * 255
hdf_file = None
p.set_value(driver.array_size_x, width)
p.set_value(driver.array_size_y, height)
@p.on_call(hdf.start)
async def do_hdf_start():
file_path = p.get_value(hdf.file_template) % (
p.get_value(hdf.file_path),
p.get_value(hdf.file_name),
)
nonlocal hdf_file
hdf_file = h5py.File(file_path, "w", libver="latest")
# Data written in a big stack, growing in that dimension
hdf_file.create_dataset(
DATA_PATH,
dtype=np.uint8,
shape=(1, height, width),
maxshape=(None, height, width),
)
for path, dtype in {UID_PATH: np.int32, SUM_PATH: np.float64}.items():
# Areadetector attribute datasets have the same dimesionality as the data
hdf_file.create_dataset(
path, dtype=dtype, shape=(1, 1, 1), maxshape=(None, 1, 1), fillvalue=-1
)
hdf_file.swmr_mode = True
@p.on_call(driver.start)
async def do_driver_start():
stopping.clear()
# areaDetector drivers start from array_counter + 1
offset = p.get_value(driver.array_counter) + 1
exposure = p.get_value(driver.acquire_time)
period = p.get_value(driver.acquire_period)
for i in range(p.get_value(driver.num_images)):
try:
# See if we got told to stop
await asyncio.wait_for(stopping.wait(), period)
except asyncio.TimeoutError:
# Carry on
pass
else:
# Stop now
break
uid = i + offset
# Resize the datasets so they fit
for path in (DATA_PATH, SUM_PATH, UID_PATH):
ds = hdf_file[path]
expand_to = tuple(max(*z) for z in zip((uid + 1, 1, 1), ds.shape))
ds.resize(expand_to)
intensity = interesting_pattern(
p.get_value(x.motor.readback), p.get_value(y.motor.readback)
)
detector_data = (blob * intensity * exposure / period).astype(np.uint8)
hdf_file[DATA_PATH][uid] = detector_data
hdf_file[UID_PATH][uid] = uid
hdf_file[SUM_PATH][uid] = np.sum(detector_data)
p.set_value(hdf.array_counter, p.get_value(hdf.array_counter) + 1)
@p.on_call(hdf.flush_now)
async def do_hdf_flush():
# Note that UID comes last so anyone monitoring knows the data is there
for path in (DATA_PATH, SUM_PATH, UID_PATH):
hdf_file[path].flush()
@p.on_call(hdf.stop)
async def do_hdf_close():
hdf_file.close()
@p.on_call(driver.stop)
async def do_driver_stop():
stopping.set()
| [
"[email protected]"
] | |
f32cc6d452d796bbeeafae91e91b5870cf55b46f | 4f57124af46dd2a73166239df9c53af561d5f5d6 | /venv/bin/neutron | 1189e745b04479bcb9c4c7059039624328a25156 | [] | no_license | briankoco/cc-scripts | 5db6e8c498d8ff103cde6c7e4914620cc5bb2c52 | 51e78f88e96c51cc5d4c8fe6debae45ab1953724 | refs/heads/master | 2018-09-17T15:07:12.800875 | 2018-06-05T20:30:35 | 2018-06-05T20:30:35 | 120,139,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | #!/Users/briankoco/cc-scripts/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from neutronclient.shell import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
35be7f5c28be793d70e91298fc6b79f1a31dec25 | 64cf985225d14e1954ed91e5a261a465a44b0cc5 | /mirror/mirror/settings.py | 225f4f100303392553e2236166673a8b2600eb68 | [] | no_license | pinewoods/back-to-game | dee3c76e85186c86c6feaa0bd56635c1e460e6f0 | 824734bbd2e235886e3cb8e30d587949896a0d7e | refs/heads/master | 2021-01-21T12:26:50.102778 | 2015-05-13T14:13:37 | 2015-05-13T14:13:37 | 34,489,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,808 | py | """
Django settings for mirror project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6!0h@jqz0cuc80p*tahm!q4j7kc=^zl8*)j!n*yh^@!w!(j==y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'sync_control',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mirror.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mirror.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',),
}
| [
"[email protected]"
] | |
92d63691e4e9d10e1eea6a25f16402a6962731df | c61a28aba19f7cdf9a5127e8a782bf115c265e70 | /apps/recruitpro/recruitpro/recruitpro/doctype/territory/test_territory.py | 8009c125ad483c92f2292298cee52bee7615b914 | [
"MIT"
] | permissive | sharmilaviji/RecruitPRO-NEW | fa72c8fc00f469a41798b1047c11dcc470fbc495 | dcfaedebe56b45acd6ddcab7e24c939b853a2c8c | refs/heads/master | 2021-05-26T12:14:12.611154 | 2020-04-27T04:40:50 | 2020-04-27T04:40:50 | 254,125,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, teampro and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestTerritory(unittest.TestCase):
pass
| [
"[email protected]"
] | |
29436f5e76326fb5b9a84bacfe0b06347836e309 | 8050ac9f2cb8cdad2cadea6c9038b96378cdff7e | /examples/tutorials/analysis-2d/ring_background.py | df2dd0fbab49bf43341c61af73687b95fa783d60 | [
"BSD-3-Clause"
] | permissive | luca-giunti/gammapy | 0d3af457d33e2c1f79d3bd054082decb6809f00f | 40fe44102105f6570cb4fd44deac2cccc98187c2 | refs/heads/master | 2023-02-21T15:57:57.973227 | 2022-09-22T20:39:33 | 2022-09-22T20:39:33 | 169,222,628 | 0 | 0 | null | 2019-02-05T10:24:00 | 2019-02-05T10:24:00 | null | UTF-8 | Python | false | false | 9,081 | py | """
Ring background map
===================
Create an excess (gamma-ray events) and a significance map extracting a ring background.
Context
-------
One of the challenges of IACT analysis is accounting for the large
residual hadronic emission. An excess map, assumed to be a map of only
gamma-ray events, requires a good estimate of the background. However,
in the absence of a solid template bkg model it is not possible to
obtain reliable background model a priori. It was often found necessary
in classical cherenkov astronomy to perform a local renormalization of
the existing templates, usually with a ring kernel. This assumes that
most of the events are background and requires to have an exclusion mask
to remove regions with bright signal from the estimation. To read more
about this method, see
`here. <https://arxiv.org/abs/astro-ph/0610959>`__
Objective
---------
Create an excess (gamma-ray events) map of MSH 15-52 as well as a
significance map to determine how solid the signal is.
Proposed approach
-----------------
The analysis workflow is roughly:
- Compute the sky maps keeping each observation separately using the `Analysis` class
- Estimate the background using the `RingBackgroundMaker`
- Compute the correlated excess and significance maps using the `ExcessMapEstimator`
The normalised background thus obtained can be used for general
modelling and fitting.
"""
######################################################################
# Setup
# -----
#
# As usual, we’ll start with some general imports…
#
# %matplotlib inline
import astropy.units as u
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
import numpy as np
from regions import CircleSkyRegion
from scipy.stats import norm
from gammapy.analysis import Analysis, AnalysisConfig
from gammapy.makers import RingBackgroundMaker
from gammapy.estimators import ExcessMapEstimator
from gammapy.datasets import MapDatasetOnOff
import logging
log = logging.getLogger(__name__)
######################################################################
# Check setup
# -----------
from gammapy.utils.check import check_tutorials_setup
check_tutorials_setup()
######################################################################
# Creating the config file
# ------------------------
#
# Now, we create a config file for out analysis. You may load this from
# disc if you have a pre-defined config file.
#
# In this example, we will use a few HESS runs on the pulsar wind nebula,
# MSH 1552
#
# source_pos = SkyCoord.from_name("MSH 15-52")
source_pos = SkyCoord(228.32, -59.08, unit="deg")
config = AnalysisConfig()
# Select observations - 2.5 degrees from the source position
config.observations.datastore = "$GAMMAPY_DATA/hess-dl3-dr1/"
config.observations.obs_cone = {
"frame": "icrs",
"lon": source_pos.ra,
"lat": source_pos.dec,
"radius": 2.5 * u.deg,
}
config.datasets.type = "3d"
config.datasets.geom.wcs.skydir = {
"lon": source_pos.ra,
"lat": source_pos.dec,
"frame": "icrs",
} # The WCS geometry - centered on MSH 15-52
config.datasets.geom.wcs.width = {"width": "3 deg", "height": "3 deg"}
config.datasets.geom.wcs.binsize = "0.02 deg"
# Cutout size (for the run-wise event selection)
config.datasets.geom.selection.offset_max = 3.5 * u.deg
# We now fix the energy axis for the counts map - (the reconstructed energy binning)
config.datasets.geom.axes.energy.min = "0.5 TeV"
config.datasets.geom.axes.energy.max = "5 TeV"
config.datasets.geom.axes.energy.nbins = 10
# We need to extract the ring for each observation separately, hence, no stacking at this stage
config.datasets.stack = False
print(config)
######################################################################
# Getting the reduced dataset
# ---------------------------
#
# We now use the config file to do the initial data reduction which will
# then be used for a ring extraction
#
# %%time
# create the config
analysis = Analysis(config)
# for this specific case,w e do not need fine bins in true energy
analysis.config.datasets.geom.axes.energy_true = (
analysis.config.datasets.geom.axes.energy
)
# `First get the required observations
analysis.get_observations()
print(analysis.config)
# %%time
# Data extraction
analysis.get_datasets()
######################################################################
# Extracting the ring background
# ------------------------------
#
# Since the ring background is extracted from real off events, we need to
# use the wstat statistics in this case. For this, we will use the
# `MapDatasetOnOFF` and the `RingBackgroundMaker` classes.
#
######################################################################
# Create exclusion mask
# ~~~~~~~~~~~~~~~~~~~~~
#
# First, we need to create an exclusion mask on the known sources. In this
# case, we need to mask only `MSH 15-52` but this depends on the sources
# present in our field of view.
#
# get the geom that we use
geom = analysis.datasets[0].counts.geom
energy_axis = analysis.datasets[0].counts.geom.axes["energy"]
geom_image = geom.to_image().to_cube([energy_axis.squash()])
# Make the exclusion mask
regions = CircleSkyRegion(center=source_pos, radius=0.3 * u.deg)
exclusion_mask = ~geom_image.region_mask([regions])
exclusion_mask.sum_over_axes().plot();
######################################################################
# For the present analysis, we use a ring with an inner radius of 0.5 deg
# and width of 0.3 deg.
#
ring_maker = RingBackgroundMaker(
r_in="0.5 deg", width="0.3 deg", exclusion_mask=exclusion_mask
)
######################################################################
# Create a stacked dataset
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# Now, we extract the background for each dataset and then stack the maps
# together to create a single stacked map for further analysis
#
#%%time
energy_axis_true = analysis.datasets[0].exposure.geom.axes["energy_true"]
stacked_on_off = MapDatasetOnOff.create(
geom=geom_image, energy_axis_true=energy_axis_true, name="stacked"
)
for dataset in analysis.datasets:
# Ring extracting makes sense only for 2D analysis
dataset_on_off = ring_maker.run(dataset.to_image())
stacked_on_off.stack(dataset_on_off)
######################################################################
# This `stacked_on_off` has `on` and `off` counts and acceptance
# maps which we will use in all further analysis. The `acceptance` and
# `acceptance_off` maps are the system acceptance of gamma-ray like
# events in the `on` and `off` regions respectively.
#
print(stacked_on_off)
######################################################################
# Compute correlated significance and correlated excess maps
# ----------------------------------------------------------
#
# We need to convolve our maps with an appropriate smoothing kernel. The
# significance is computed according to the Li & Ma expression for ON and
# OFF Poisson measurements, see
# `here <https://ui.adsabs.harvard.edu/abs/1983ApJ...272..317L/abstract>`__.
# Since astropy convolution kernels only accept integers, we first convert
# our required size in degrees to int depending on our pixel size.
#
# Using a convolution radius of 0.04 degrees
estimator = ExcessMapEstimator(0.04 * u.deg, selection_optional=[])
lima_maps = estimator.run(stacked_on_off)
significance_map = lima_maps["sqrt_ts"]
excess_map = lima_maps["npred_excess"]
# We can plot the excess and significance maps
plt.figure(figsize=(10, 10))
ax1 = plt.subplot(221, projection=significance_map.geom.wcs)
ax2 = plt.subplot(222, projection=excess_map.geom.wcs)
ax1.set_title("Significance map")
significance_map.plot(ax=ax1, add_cbar=True)
ax2.set_title("Excess map")
excess_map.plot(ax=ax2, add_cbar=True)
######################################################################
# It is often important to look at the signficance distribution outside
# the exclusion region to check that the background estimation is not
# contaminated by gamma-ray events. This can be the case when exclusion
# regions are not large enough. Typically, we expect the off distribution
# to be a standard normal distribution.
#
# create a 2D mask for the images
significance_map_off = significance_map * exclusion_mask
significance_all = significance_map.data[np.isfinite(significance_map.data)]
significance_off = significance_map_off.data[
np.isfinite(significance_map_off.data)
]
plt.hist(
significance_all,
density=True,
alpha=0.5,
color="red",
label="all bins",
bins=21,
)
plt.hist(
significance_off,
density=True,
alpha=0.5,
color="blue",
label="off bins",
bins=21,
)
# Now, fit the off distribution with a Gaussian
mu, std = norm.fit(significance_off)
x = np.linspace(-8, 8, 50)
p = norm.pdf(x, mu, std)
plt.plot(x, p, lw=2, color="black")
plt.legend()
plt.xlabel("Significance")
plt.yscale("log")
plt.ylim(1e-5, 1)
xmin, xmax = np.min(significance_all), np.max(significance_all)
plt.xlim(xmin, xmax)
print(f"Fit results: mu = {mu:.2f}, std = {std:.2f}")
| [
"[email protected]"
] | |
90840608526f68e58b13cf7c7de7a1412b61919a | 159cd20a8570f4acce8d32ed2d1b052be25e3936 | /exercises/models.py | b6f635bad52bf700883c097900546b6344241473 | [] | no_license | fredericosachweh/amostra2 | 95e717eb0ce7ef03326670326c00765ae8bc6a35 | e2d24a82462a735fc722f0b228be04a4495185c1 | refs/heads/master | 2021-01-17T14:48:32.079439 | 2014-10-09T00:08:29 | 2014-10-09T00:08:29 | 84,095,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,441 | py | from decimal import Decimal
import datetime
import random
from django.db import models
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.template.defaultfilters import floatformat
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from clients.models import Client, Contract, Klass
from excludeddates.models import SystemDate
from utils.models import MultiDict, MultiDictManager
QUESTION_TYPES = [
('char', _('One line text')),
('boolean', _('Boolean option')),
('text', _('Multi line text')),
('image', _('Image')),
]
ANSWER_TYPES = [
('boolean', _('Boolean')),
('digit', _('Exact digit')),
('digit_or_blank', _('Exact digit or blank')),
('exact', _('Exact')),
('exact_or_blank', _('Exact or blank')),
('near', _('Approximate')),
('radio', _('Single choice')),
('checkbox', _('Multiple choices')),
]
class Matter(models.Model):
name = models.CharField(_('name'), max_length=255)
slug = models.SlugField(_('slug'))
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('matter')
verbose_name_plural = _('matters')
class Subject(models.Model):
matter = models.ForeignKey(Matter, verbose_name=_('matter'))
name = models.CharField(_('name'), max_length=255)
slug = models.SlugField(_('slug'))
description = models.TextField(_('description'), blank=True)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('subject')
verbose_name_plural = _('subjects')
class CategoryManager(models.Manager):
def eligible_for_demos(self):
return self.filter(eligible_for_demos=True)
class Category(models.Model):
subject = models.ForeignKey(Subject, verbose_name=_('subject'))
matter = models.ForeignKey(Matter,
verbose_name=_('matter'),
editable=False,
blank=True,
null=True) # autopopulated from the subject
name = models.CharField(_('name'), max_length=255, db_index=True)
slug = models.SlugField(_('slug'))
eligible_for_demos = models.BooleanField(_('eligible for demonstrations?'),
default=False
)
sites = models.ManyToManyField(Site, verbose_name=_('sites'))
objects = CategoryManager()
def __unicode__(self):
return self.name
class Meta:
ordering = ('subject', 'name')
verbose_name = _('category')
verbose_name_plural = _('categories')
def set_matter_from_subject(sender, instance, **kwargs):
instance.matter = instance.subject.matter
models.signals.pre_save.connect(set_matter_from_subject, sender=Category)
class QuestionType(models.Model):
category = models.ForeignKey(Category, verbose_name=_('category'))
type = models.CharField(_('type'), max_length=20, choices=QUESTION_TYPES)
group = models.SlugField(_('group name'), max_length=10)
group_short = models.SlugField(_('group short name'),
max_length=10,
help_text=_('Short group name for data '
'importing.')
)
class Meta:
ordering = ('group',)
verbose_name = _('question type')
verbose_name_plural = _('question types')
class AnswerType(models.Model):
category = models.ForeignKey(Category, verbose_name=_('category'))
type = models.CharField(_('type'), max_length=20, choices=ANSWER_TYPES)
group = models.SlugField(_('group name'), max_length=10)
group_short = models.SlugField(_('group short name'),
max_length=10,
help_text=_('Short group name for data '
'importing.')
)
next_group = models.CharField(_('next group'), max_length=50)
class Meta:
ordering = ('group',)
verbose_name = _('answer type')
verbose_name_plural = _('answer types')
class ExerciseManager(models.Manager):
def public(self):
return self.filter(is_public=True)
def get_random(self, used=[]):
return self.exclude(id__in=used).order_by('?')[0]
class Exercise(models.Model):
category = models.ForeignKey(Category, verbose_name=_('category'))
tags = models.CharField(_('tags'),
max_length=255,
blank=True,
db_index=True,
help_text=_('Comma separated list of tags to be '
'more specific than category')
)
description = models.TextField(_('description'))
created_at = models.DateTimeField(_('created at'), default=timezone.now)
# TODO prevent exercise changing after it was being used
times_used = models.IntegerField(_('times used'), default=0)
is_public = models.BooleanField(_('is public?'), default=True)
objects = ExerciseManager()
# filtering
filter1 = models.DecimalField(_('filter 1'),
max_digits=17,
decimal_places=9,
blank=True,
null=True,
db_index=True
)
filter2 = models.DecimalField(_('filter 2'),
max_digits=17,
decimal_places=9,
blank=True,
null=True,
db_index=True
)
# denornalization
matter = models.ForeignKey(Matter,
verbose_name=_('matter'),
editable=False,
blank=True,
null=True
) # autofilled from the category
subject = models.ForeignKey(Subject,
verbose_name=_('subject'),
editable=False,
blank=True,
null=True
) # autofilled from the category
def __unicode__(self):
return self.description
@models.permalink
def get_absolute_url(self):
return ('admin-chance-create', (), {'pk': self.pk})
@cached_property
def questions(self):
return self.question_set.as_dict()
@cached_property
def answers(self):
return self.answer_set.as_dict()
class Meta:
verbose_name = _('exercise')
verbose_name_plural = _('exercises')
def set_matter_and_subject_from_category(sender, instance, **kwargs):
instance.subject = instance.category.subject
instance.matter = instance.category.matter
models.signals.pre_save.connect(set_matter_and_subject_from_category,
sender=Exercise
)
class Question(models.Model):
exercise = models.ForeignKey(Exercise, verbose_name=_('exercise'))
type = models.CharField(_('type'), max_length=20, choices=QUESTION_TYPES)
position = models.PositiveIntegerField(_('position'), default=0)
group = models.SlugField(_('group'), max_length=20)
char_value = models.CharField(_('one line text'),
max_length=100,
blank=True
)
boolean_value = models.NullBooleanField(_('boolean value'),
blank=True,
null=True
)
text_value = models.TextField(_('multiline text'), blank=True)
image_value = models.ImageField(_('image'),
upload_to='exercises/question/image_value',
blank=True
)
objects = MultiDictManager()
class Meta:
ordering = ('group', '-position',)
verbose_name = _('question')
verbose_name_plural = _('questions')
def get_value(self):
if self.type == 'char':
return self.char_value
if self.type == 'boolean':
return self.boolean_value
elif self.type == 'text':
return self.text_value
elif self.type == 'image':
return self.image_value
else:
raise ValueError('Wrong question type')
def set_value(self, value):
if self.type == 'char':
self.char_value = value
elif self.type == 'boolean':
try:
self.boolean_value = bool(int(value))
except ValueError:
self.boolean_value = True
elif self.type == 'text':
self.text_value = value
elif self.type == 'image':
raise ValueError('Assign an image value directly in his attribute')
else:
raise ValueError('Wrong question type')
value = property(get_value, set_value)
class Answer(models.Model):
exercise = models.ForeignKey(Exercise, verbose_name=_('exercise'))
type = models.CharField(_('type'), max_length=20, choices=ANSWER_TYPES)
position = models.PositiveIntegerField(_('position'), default=0)
tabindex = models.IntegerField(_('tabindex'), default=1)
group = models.SlugField(_('group'), max_length=20)
value = models.DecimalField(_('value'),
max_digits=17,
decimal_places=9,
blank=True,
null=True
)
error_limit = models.DecimalField(_('error limit'),
max_digits=17,
decimal_places=9,
blank=True,
null=True
)
choices_map = models.TextField(_('choices'),
blank=True,
help_text=_('Set a choice per line, '
'start with a plus sign (+) '
'for a correct choice and a '
'minus sign (-) for a '
'incorrect choice.')
)
choices_sample = models.IntegerField(_('choices samples'),
blank=True,
null=True,
help_text=_('How many choices to '
'pick from all choices '
'map?')
)
objects = MultiDictManager()
class Meta:
ordering = ('group', '-position',)
verbose_name = _('answer')
verbose_name_plural = _('answers')
def get_random_choices(self):
"""
Returns as many random choices as choices_sample, certifying that all
correct choices are in this list. We don't want to use database
randoming as it does not scale well.
"""
correct = []
incorrect = []
for choice in self.choice_set.all():
if choice.is_correct:
correct.append(choice)
else:
incorrect.append(choice)
if len(correct) + len(incorrect) == self.choices_sample:
choices = correct + incorrect
else:
limit = self.choices_sample - len(correct)
if len(incorrect) > limit:
incorrect = random.sample(incorrect, limit)
choices = correct + incorrect
random.shuffle(choices)
return choices
def create_answer_choices(sender, instance, **kwargs):
"""
Creates choice instances based on the textual choices filled by the user.
"""
instance.choice_set.all().delete()
lines = instance.choices_map.split('\n')
choices = []
for line in filter(None, lines): # blank lines ignored
sign = line[0:1]
description = line[1:].strip()
if sign == '+':
is_correct = True
elif sign == '-':
is_correct = False
else:
raise ValueError('Start the answer by a plus sign for a correct '
'answer or a minus sign for an incorrect one.')
choices.append(Choice(answer=instance,
description=description,
is_correct=is_correct))
Choice.objects.bulk_create(choices)
models.signals.post_save.connect(create_answer_choices,
sender=Answer,
dispatch_uid='create-choices'
)
class ChoiceManager(models.Manager):
def correct(self):
return self.filter(is_correct=True)
def incorrect(self):
return self.filter(is_correct=False)
class Choice(models.Model):
answer = models.ForeignKey(Answer, verbose_name=_('answer'))
description = models.TextField(_('description'))
is_correct = models.BooleanField(_('is correct?'))
objects = ChoiceManager()
def __unicode__(self):
return self.description
class Meta:
ordering = ('id',)
verbose_name = _('choice')
verbose_name_plural = _('choices')
class Program(models.Model):
matter = models.ForeignKey(Matter, verbose_name=_('matter'))
name = models.CharField(_('name'), max_length=255)
description = models.TextField(_('description'), blank=True)
batteries_count = models.IntegerField(_('batteries count'),
default=0,
editable=False,
help_text=_('Would reflect the '
'number of activity '
'days.')
)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('exercises program')
verbose_name_plural = _('exercises programs')
class Module(models.Model):
program = models.ForeignKey(Program, verbose_name=_('exercises program'))
name = models.CharField(_('name'), max_length=255)
batteries_count = models.IntegerField(_('batteries count'),
default=0,
editable=False,
help_text=_('Would reflect the '
'number of activity '
'days.')
)
position = models.PositiveSmallIntegerField('Position')
syllabus = models.TextField(_('syllabus'), blank=True)
class Meta:
ordering = ('position',)
verbose_name = _('exercises program\'s module')
verbose_name_plural = _('exercises program\'s modules')
def __unicode__(self):
return self.name
class Battery(models.Model):
module = models.ForeignKey(Module,
verbose_name=_('exercises program\'s module')
)
categories = models.ManyToManyField(Category,
blank=True,
null=True, through='CategoryUsage',
help_text=_('Choose categories to '
'use exercises from')
)
name = models.CharField(_('optional name'),
max_length=255,
blank=True, help_text=_('Set an optional name '
'for the battery.')
)
position = models.PositiveSmallIntegerField(_('position'))
# denormalization:
matters_names = models.CharField(_('matters'),
max_length=255,
blank=True,
editable=False
)
subjects_names = models.CharField(_('subjects'),
max_length=255,
blank=True,
editable=False)
categories_names = models.TextField(_('categories'),
blank=True,
editable=False)
class Meta:
ordering = ('module', 'position',)
verbose_name = _('exercises battery')
verbose_name_plural = _('exercises batteries')
def __unicode__(self):
if self.name:
return self.name
else:
if self.id:
return '; '.join([unicode(c) for c in self.categories.all()])
else:
return unicode(_('battery')) # the object don't exist yet,
# we don't have m2m relationships
@property
def exercises_count(self):
return sum([x.exercises_count for x in self.categoryusage_set.all()])
def count_batteries(sender, instance, **kwargs):
"""
Cache the number of batteries in a module and his program.
"""
program = instance.module.program
program_batteries = Battery.objects.filter(module__program=program)
program.batteries_count = program_batteries.count()
program.save()
module = instance.module
module.batteries_count = module.battery_set.count()
module.save()
models.signals.post_save.connect(count_batteries, sender=Battery)
class CategoryUsage(models.Model):
"""
A battery/day may have one or more category usages.
"""
battery = models.ForeignKey(Battery, verbose_name=_('battery'))
category = models.ForeignKey(Category, verbose_name=_('category'))
exercises_count = models.IntegerField(_('exercises count'))
random_sorting = models.BooleanField(_('sorting'),
choices=[(False, _('Sequential')),
(True, _('Random'))]
)
filter1_lower = models.DecimalField(_('filter 1 lower limit'), max_digits=17, decimal_places=9, blank=True, null=True, db_index=True)
filter1_upper = models.DecimalField(_('filter 1 upper limit'), max_digits=17, decimal_places=9, blank=True, null=True, db_index=True)
filter2_lower = models.DecimalField(_('filter 2 lower limit'), max_digits=17, decimal_places=9, blank=True, null=True, db_index=True)
filter2_upper = models.DecimalField(_('filter 2 upper limit'), max_digits=17, decimal_places=9, blank=True, null=True, db_index=True)
tags = models.CharField(_('tags'), max_length=255, blank=True, db_index=True,
help_text=_('Comma separated list of tags to be more specific than category'))
class Meta:
ordering = ('battery', 'id')
verbose_name = _('battery\'s category usage')
verbose_name_plural = _('battery\'s categories usage')
def __unicode__(self):
return _('{count} exercises from {category}').format(
count=self.exercises_count,
category=self.category
)
def get_tags(self):
return [t.strip() for t in self.tags.split(',')]
def get_clauses(self):
"""
Returns a list of clauses to select exercises.
The clauses includes the upper limits, lower limits and all tags joined
through and AND operator.
"""
clauses = []
if self.filter1_lower is not None and self.filter1_upper is not None:
if self.filter1_lower == self.filter1_upper:
clauses.append(models.Q(filter1=self.filter1_lower))
else:
clauses.append(models.Q(filter1__gte=self.filter1_lower))
clauses.append(models.Q(filter1__lte=self.filter1_upper))
if self.filter2_lower is not None and self.filter2_upper is not None:
if self.filter2_lower == self.filter2_upper:
clauses.append(models.Q(filter2=self.filter2_lower))
else:
clauses.append(models.Q(filter2__gte=self.filter2_lower))
clauses.append(models.Q(filter2__lte=self.filter2_upper))
if self.tags:
for tag in self.get_tags():
clauses.append(models.Q(tags__contains=tag))
if clauses:
return reduce(lambda a, b: a & b, clauses)
else:
return None
def cache_matters_and_subjects_names(sender, instance, **kwargs):
battery = instance.battery
categories = battery.categories.all()
battery.categories_names = ', '.join([c.name for c in categories])
matters = Matter.objects.filter(category__in=categories).distinct()
battery.matters_names = ', '.join([m.name for m in matters])
subjects = Subject.objects.filter(category__in=categories).distinct()
battery.subjects_names = ', '.join([s.name for s in subjects])
battery.save()
models.signals.post_save.connect(cache_matters_and_subjects_names, sender=CategoryUsage)
class ProgramUsage(models.Model):
"""
A program usage is the application of a program for the users of a client.
"""
klass = models.OneToOneField(Klass, verbose_name=_('class'), related_name='program_usage')
program = models.ForeignKey(Program, verbose_name=_('exercises program'))
start_date = models.DateField(_('start date'), default=datetime.datetime.today)
end_date = models.DateField(_('end date'), help_text=_('E.g.: the end of school year.'))
# denormalization:
client = models.ForeignKey(Client, verbose_name=_('client'), editable=False)
contract = models.ForeignKey(Contract, verbose_name=_('contract'), editable=False)
class Meta:
verbose_name = _('program usage')
verbose_name_plural = _('program usages')
def __unicode__(self):
return _('%(program)s usage by %(client)s') % {'program': self.program, 'client': self.client}
def get_excluded_dates(self, how_many, starting_at):
"""
Gives a number of excluded dates starting at some date. If the program
has 100 days, we get 100 excluded dates to be ignored during filling.
"""
client = self.klass.contract.client
teacher = self.klass.teacher.teacher_set.get(client=client)
base_querysets = [
SystemDate.objects.all(),
client.clientdate_set.all(),
teacher.teacherdate_set.all(),
]
querysets = [qs.filter(date__gte=starting_at)[:how_many] for qs in base_querysets]
dates = [set(qs.values_list('date', flat=True)) for qs in querysets]
# symmetric different is a set method that returns items in A or B but
# not in both. We are comparing 3 sets:
#
# - if the date appears once, it is excluded
# - if appears twice, it is included by the client or teacher
# - if appears three times, it is excluded by teacher
return reduce(lambda a, b: a.symmetric_difference(b), dates)
def distribute_batteries(self, modules_sequence=None):
"""
Create a battery schedule for each battery in the choosen program
modules.
"""
one_day = datetime.timedelta(days=1) # cache for reuse
i = 0
battery_date = self.start_date
if isinstance(battery_date, datetime.datetime):
battery_date = battery_date.date()
excluded_dates = []
modules = list(self.program.module_set.all())
if modules_sequence:
modules.sort(key=lambda module: modules_sequence.index(module.pk))
batteries = []
while 1:
# get excluded dates on demand as needed
if not excluded_dates:
excluded_dates = self.get_excluded_dates(
how_many=self.program.batteries_count - i,
starting_at=battery_date
)
if battery_date in excluded_dates:
excluded_dates.remove(battery_date) # waste the date if excluded
else:
# get the next battery from the current module or get the next module
# note that battery schedules aren't grouped by module, once a
# class is started, it nevermind
if batteries:
battery = batteries.pop(0)
else:
if not modules:
break # we ended the batteries and modules!
else:
module = modules.pop(0)
batteries = list(module.battery_set.all())
battery = batteries.pop(0)
battery.batteryschedule_set.create(program_usage=self,
date=battery_date)
i += 1 # update counter to know how many excluded dates to take
battery_date = battery_date + one_day
# Stops the battery creation when exhaust available dates
if battery_date > self.end_date:
break
def cache_program_usage_client(sender, instance, **kwargs):
instance.contract = instance.klass.contract
instance.client = instance.klass.contract.client
models.signals.pre_save.connect(cache_program_usage_client, sender=ProgramUsage)
class BatteryScheduleManager(models.Manager):
def pending(self, ref=None):
"""
Tells what are the next schedules for a klass based in the date.
"""
if not ref:
ref = timezone.now().date()
return self.filter(date__gte=ref)
def pending_for_user(self, user, ref=None):
"""
Tells what is the schedules not done or not started for the given user.
"""
qs = self.pending(ref).filter(program_usage__klass__students=user)
late_klasses = user.took_klasses.late_payment()
qs = qs.exclude(program_usage__klass__in=late_klasses)
qs = qs.extra(
where=['select not count(is_done) from exercises_userbattery where ' \
'battery_schedule_id = exercises_batteryschedule.id and ' \
'user_id = %s and is_done = %s'],
params=[user.pk, True],
)
return qs.distinct()
class BatterySchedule(models.Model):
"""
The batteries must be done according to a predefined schedule. This
schedule is defined here.
"""
program_usage = models.ForeignKey(ProgramUsage, verbose_name=_('program usage'))
battery = models.ForeignKey(Battery, verbose_name=_('battery'))
date = models.DateField(_('date'))
attempts = models.IntegerField(_('attempts count'), default=1,
help_text=_('How many attempts the user will have for each exercise of this battery?'))
objects = BatteryScheduleManager()
class Meta:
ordering = ('date',)
verbose_name = _('battery schedule')
verbose_name_plural = _('batteries schedule')
def __unicode__(self):
return unicode(self.battery)
def is_past(self):
return self.date <= timezone.now().date()
class UserBattery(models.Model):
"""
A user battery is the application of a battery for some user. The user must
be part of the client owner of the program (of the battery). The exercises
will be the flat list of battery's categories exercises plus the manually
selected ones. If the battery has more exercises than needed, they will be
randomized.
"""
user = models.ForeignKey(User, verbose_name=_('user'))
battery_schedule = models.ForeignKey(BatterySchedule, verbose_name=_('battery schedule'), blank=True, null=True)
exercises = models.ManyToManyField(Exercise, through='UserBatteryExercise', blank=True, verbose_name=_('exercises'))
is_done = models.BooleanField(_('is done?'), default=False)
# denormalization
battery = models.ForeignKey(Battery, verbose_name=_('battery'), editable=False)
correct_answers = models.IntegerField(_('correct answers count'), default=0)
exercises_count = models.IntegerField(_('exercises count'), default=0)
attempts_spent = models.DecimalField(_('attempts spent'), max_digits=6,
decimal_places=5, default=0, help_text=_('Average attempts count.'))
time_spent = models.IntegerField(_('time spent'), default=0,
help_text=_('Sum of the overall time spent.'))
class Meta:
ordering = ('battery__position',)
verbose_name = _('user battery')
verbose_name_plural = _('user batteries')
@property
def attempts(self):
return self.battery_schedule.attempts
@property
def score(self):
return self.correct_answers / Decimal(self.exercises_count) * 10
def copy_battery_from_schedule(sender, instance, **kwargs):
instance.battery = instance.battery_schedule.battery
models.signals.pre_save.connect(copy_battery_from_schedule, sender=UserBattery)
def fill_user_battery(sender, instance, **kwargs):
"""
Copies the list of battery exercises to the user battery when created.
Limits exercises of category with proper filter1, filter2 and tags
configuration. Randomly sort them when needed and validates there is enough
exercises (as planned in the battery).
"""
if kwargs['created']:
exercises = []
for usage in instance.battery.categoryusage_set.all():
qs = usage.category.exercise_set.all()
clauses = usage.get_clauses()
if clauses:
qs = qs.filter(clauses)
if usage.random_sorting:
qs = qs.order_by('?')
usage_exercises = qs[:usage.exercises_count]
if len(usage_exercises) == usage.exercises_count:
exercises.extend(usage_exercises)
else:
# Avoid a user do a battery with less than the exercises count
# planned
error = 'There is not {expected} exercises for the ' \
'category `{category}` with conditions ' \
'{conditions}. Found {found}.'
raise ValueError(error.format(expected=usage.exercises_count,
found=len(usage_exercises),
category=usage.category,
conditions=clauses))
bulk = []
for n, exercise in enumerate(exercises):
bulk.append(UserBatteryExercise(user_battery=instance,
exercise=exercise,
position=n+1))
UserBatteryExercise.objects.bulk_create(bulk)
# denormalize the exercises count: make it after th bulk creation,
# even if we know the number of exercises in the category usages,
# we can't assert there is enough execises as planned
instance.exercises_count = len(bulk)
instance.save()
models.signals.post_save.connect(fill_user_battery, sender=UserBattery)
class UserBatteryExerciseManager(models.Manager):
def correct(self):
return self.filter(is_correct=True)
def next(self, ref=None):
"""
Gets the next exercise on the user battery. Get it as the first with a
attempts available after the current one or the first from the list
before it.
"""
qs = self.all().filter(
user_battery__battery_schedule__attempts__gt=models.F('attempts_spent')
)
if not ref:
return qs[0]
try:
forward = qs.filter(position__gt=ref.position)
return forward[0]
except IndexError:
try:
backward = qs.filter(position__lt=ref.position)
return backward[0]
except IndexError:
return None
class UserBatteryExercise(models.Model):
"""
Intermediate table for the exercises many to many on the user battery. Adds
a position field to let set a custom ordering for each user.
"""
user_battery = models.ForeignKey(UserBattery, verbose_name=_('user battery'))
exercise = models.ForeignKey(Exercise, verbose_name=_('exercise'))
position = models.PositiveSmallIntegerField(_('position'))
# fields denormalized from the attempts/chances
is_correct = models.NullBooleanField(_('is correct?'), blank=True, null=True)
attempts_spent = models.IntegerField(_('attempts left'), default=0)
time_spent = models.IntegerField(_('time spent'), default=0)
objects = UserBatteryExerciseManager()
class Meta:
ordering = ('position',)
unique_together = ('user_battery', 'position')
def next(self):
return self.user_battery.userbatteryexercise_set.next(ref=self)
def denormalize_exercise_results(sender, instance, **kwargs):
"""
When a user battery exercise is saved, updates the user battery to cache
score, time spent and attempts.
"""
user_battery = instance.user_battery
qs = user_battery.userbatteryexercise_set.all()
# done exercises is_correct or not. Not done ones don't know their state
qs = qs.extra(select={'done': "SUM(is_correct IS NOT NULL)"})
aggr = qs.aggregate(
correct=models.Sum('is_correct'),
attempts=models.Sum('attempts_spent'),
time=models.Sum('time_spent'),
)
user_battery.correct_answers = int(aggr['correct'])
user_battery.attempts_spent = int(aggr['attempts']) / Decimal(int(qs[0].done))
user_battery.time_spent = int(aggr['time'])
user_battery.save()
models.signals.post_save.connect(denormalize_exercise_results, sender=UserBatteryExercise)
class ChanceManager(models.Manager):
def finished(self):
return self.exclude(finished_at=None)
def unfinished(self):
return self.filter(finished_at=None)
class Chance(models.Model):
"""
A chance is the application of an exercise on a user battery. It can be a
standalone chance for testing purpouses.
"""
user_battery_exercise = models.ForeignKey(UserBatteryExercise,
verbose_name=_('user battery'), blank=True, null=True)
number = models.IntegerField(_('number'), default=1)
started_at = models.DateTimeField(_('started at'), default=timezone.now)
finished_at = models.DateTimeField(_('finished at'), blank=True, null=True)
# denormalization
exercise = models.ForeignKey(Exercise, verbose_name=_('exercise'))
user_battery = models.ForeignKey(UserBattery, verbose_name=_('user battery'),
blank=True, null=True)
objects = ChanceManager()
class Meta:
verbose_name = _('chance')
verbose_name_plural = _('chances')
unique_together = ('user_battery_exercise', 'number')
@models.permalink
def get_absolute_url(self):
return ('student:chance-detail', (), {'user_battery': self.user_battery.pk,
'position': self.user_battery_exercise.position,
'number': self.number})
@cached_property
def answers(self):
return self.chanceitem_set.as_dict()
@property
def attempts(self):
""" Tells the attempts limit. """
return self.user_battery.attempts
@property
def attempts_left(self):
return self.attempts - self.user_battery_exercise.attempts_spent
@property
def time_spent(self):
delta = self.finished_at - self.started_at
return delta.seconds
def is_finished(self):
return self.finished_at is not None
def is_correct(self):
""" Tells if the chance is correct if all his parts are correct. """
# TODO this is a bottleneck, use a better approach
for item in self.chanceitem_set.all():
if not item.is_correct():
return False
return True
def cache_exercise_and_user_battery(sender, instance, **kwargs):
"""
When the user_battery_exercise is set, we must denormalize the related
fields, otherwise, it may be a sample chance attached directly to an
exercise, we don't need to bother with.
"""
if instance.user_battery_exercise:
instance.exercise = instance.user_battery_exercise.exercise
instance.user_battery = instance.user_battery_exercise.user_battery
models.signals.pre_save.connect(cache_exercise_and_user_battery, sender=Chance)
def denormalize_chance_results(sender, instance, **kwargs):
"""
Every time a chance is saved, count how many attempts where took, if the
exercise is correct or not and how many time was spent and save it in the
user exercise instance.
This will be useful to tell the next exercise with available attempts.
"""
user_exercise = instance.user_battery_exercise
# ignore standalone chances (done through admin panel)
if user_exercise is None:
return
attempts = list(user_exercise.chance_set.finished())
if attempts:
user_exercise.attempts_spent = len(attempts)
user_exercise.is_correct = any([x.is_correct() for x in attempts])
user_exercise.time_spent = sum([x.time_spent for x in attempts])
user_exercise.save()
models.signals.post_save.connect(denormalize_chance_results, sender=Chance)
class ChanceItemManager(models.Manager):
def as_dict(self):
return MultiDict(self.all(), group_callback=lambda c: c.answer.group)
class ChanceItem(models.Model):
chance = models.ForeignKey(Chance, verbose_name=_('chance'))
answer = models.ForeignKey(Answer, verbose_name=_('answer'))
value = models.DecimalField(_('value'), max_digits=17, decimal_places=9,
blank=True, null=True)
choices = models.ManyToManyField(Choice, verbose_name=_('choices'), blank=True, null=True)
objects = ChanceItemManager()
def has_choices(self):
""" Tells when a chance item uses the choices to store the answer data. """
return self.answer.type in ('radio', 'checkbox')
def is_correct(self):
if self.answer.type == 'boolean':
# boolean answers are 0 for false and are 1 for true values, this
# way, we can just compare the bool version of the values.
return bool(self.value) == bool(self.answer.value)
elif self.has_choices():
# choiced answers must have the answer and the user data equal for
# both the single choice and multiple choices
answer_choices = set([x.id for x in self.answer.choice_set.correct()])
chance_choices = set([x.id for x in self.choices.all()])
return answer_choices == chance_choices
else:
if (self.answer.type.endswith('or_blank') and not self.answer.value
and not self.value):
return True # the value can be any false value (0, '', None)
return self.value == self.answer.value
@property
def correct_value(self):
value = self.answer.value
if value is None:
return ''
elif not value and self.answer.type.endswith('or_blank'):
return ''
elif self.answer.type == 'boolean':
return bool(value)
elif self.answer.type.startswith('digit'):
return floatformat(value, 0) # avoid return 1.0
else:
return value
class Meta:
verbose_name = _('chance item')
verbose_name_plural = _('chance items')
| [
"[email protected]"
] | |
43e980b35ac84de26ba65cce540a4d5a8ca11f20 | 6d2cf861c46230de97d5244b7915057419f8125d | /sdk/cognitiveservices/azure-cognitiveservices-language-textanalytics/samples/async_samples/sample_recognize_entities_async.py | 6111e4ec8f8009664718b89662fd6c86bef2616e | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | pombredanne/azure-sdk-for-python | 075bfef712c014445bacdef4dd05aacd82673dcc | ebd73c3fc22dcf17be2a903f32bdd95d9090f283 | refs/heads/master | 2020-10-01T15:56:00.475346 | 2019-12-11T21:31:24 | 2019-12-11T21:31:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,464 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_entities_async.py
DESCRIPTION:
This sample demonstrates how to recognize named entities in a batch of documents.
USAGE:
python sample_recognize_entities_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your cognitive services resource.
2) AZURE_TEXT_ANALYTICS_KEY - your text analytics subscription key
OUTPUT:
Document text: Microsoft was founded by Bill Gates and Paul Allen.
Entity: Microsoft Type: Organization Confidence Score: 1.0
Entity: Bill Gates Type: Person Confidence Score: 1.0
Entity: Paul Allen Type: Person Confidence Score: 1.0
Document text: I had a wonderful trip to Seattle last week.
Entity: Seattle Type: Location Confidence Score: 0.806
Entity: last week Type: DateTime Confidence Score: 0.8
Document text: I visited the Space Needle 2 times.
Entity: Space Needle Type: Organization Confidence Score: 0.922
Entity: 2 Type: Quantity Confidence Score: 0.8
"""
import os
import asyncio
class RecognizeEntitiesSampleAsync(object):
endpoint = os.getenv("AZURE_TEXT_ANALYTICS_ENDPOINT")
key = os.getenv("AZURE_TEXT_ANALYTICS_KEY")
async def recognize_entities_async(self):
# [START batch_recognize_entities_async]
from azure.cognitiveservices.language.textanalytics.aio import TextAnalyticsClient
text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint, credential=self.key)
documents = [
"Microsoft was founded by Bill Gates and Paul Allen.",
"I had a wonderful trip to Seattle last week.",
"I visited the Space Needle 2 times.",
]
async with text_analytics_client:
result = await text_analytics_client.recognize_entities(documents)
docs = [doc for doc in result if not doc.is_error]
for idx, doc in enumerate(docs):
print("\nDocument text: {}".format(documents[idx]))
for entity in doc.entities:
print("Entity: \t", entity.text, "\tType: \t", entity.type,
"\tConfidence Score: \t", round(entity.score, 3))
# [END batch_recognize_entities_async]
async def alternative_scenario_recognize_entities_async(self):
"""This sample demonstrates how to retrieve batch statistics, the
model version used, and the raw response returned from the service.
It additionally shows an alternative way to pass in the input documents
using a list[TextDocumentInput] and supplying your own IDs and language hints along
with the text.
"""
from azure.cognitiveservices.language.textanalytics.aio import TextAnalyticsClient
text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint, credential=self.key)
documents = [
{"id": "0", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
{"id": "1", "language": "de", "text": "I had a wonderful trip to Seattle last week."},
{"id": "2", "language": "es", "text": "I visited the Space Needle 2 times."},
]
extras = []
def callback(resp):
extras.append(resp.statistics)
extras.append(resp.model_version)
extras.append(resp.raw_response)
async with text_analytics_client:
result = await text_analytics_client.recognize_entities(
documents,
show_stats=True,
model_version="latest",
response_hook=callback
)
async def main():
sample = RecognizeEntitiesSampleAsync()
await sample.recognize_entities_async()
await sample.alternative_scenario_recognize_entities_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [
"[email protected]"
] | |
7c13f5d302cee4d4a134bad26d56721e3d3d4450 | fb2a07bbf368076b83e31639c4152799fcccfdcd | /siteEtu/accountCreator.py | bac6f408ceb7f1c017bd05300e040e40f5634235 | [] | no_license | Trymal/TrombinoscopeCorrec | ae696473593c6d01b2533765c037e436f84fea98 | cebcfffa7df5a45d60da125b11523f933c0341e2 | refs/heads/master | 2022-07-08T08:21:07.441738 | 2020-05-10T15:51:48 | 2020-05-10T15:51:48 | 257,204,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | import random
import hashlib
from setup import *
def saveAccount(account):
with open("./comptes.csv", "a") as fichier:
fichier.write(account)
nb_comptes = 150
m_hash = hashlib.sha256()
for nb in range(nb_comptes):
prenom = random.choice(PRENOMS)
nom = random.choice(NOMS)
filiere = random.choice(FILIERES)
groupe = random.choice(GROUPES[filiere])
mail = (prenom + "." + nom + "@gmail.com").lower()
mdp = (nom[0] + prenom).lower().encode()
m_hash.update(mdp)
compte = "{};{};{};{};{};{};{}\n".format(nom, prenom, mail, filiere, groupe, m_hash.hexdigest(),
DIR_PP)
saveAccount(compte)
| [
"[email protected]"
] | |
df11f48a42b4597d6517b5f0ba783ccce171f5c3 | 382df78024f588acea08039a0b0a9e24f297b6a3 | /python/pandas/ewma.py | bee90fab01dfaa6c378c98d0f5f613642413a4a8 | [] | no_license | id774/sandbox | c365e013654790bfa3cda137b0a64d009866d19b | aef67399893988628e0a18d53e71e2038992b158 | refs/heads/master | 2023-08-03T05:04:20.111543 | 2023-07-31T14:01:55 | 2023-07-31T14:01:55 | 863,038 | 4 | 1 | null | 2020-03-05T06:18:03 | 2010-08-26T01:05:11 | TeX | UTF-8 | Python | false | false | 275 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
ewma = pd.stats.moments.ewma
x = list(range(1, 50)) + list(range(50, 0, -1))
ma = ewma(np.array(x), span=15)
plt.plot(x, linewidth=1.0)
plt.plot(ma, linewidth=1.0)
plt.show()
plt.savefig("image.png")
| [
"[email protected]"
] | |
0836dc0604cbb303c5dd9456b4ff69711997d47a | a4c04117685c3d28dd60bdfc45654cb2c935f746 | /rasterio_example.py | e5c89ffdf395eb38ede6fd20f40df78b4fb1d5d6 | [] | no_license | DKnapp64/General_Python_Codes | 1ca40779bb381d526d61c5d5fedcc76ae797c590 | 8d4669c82c17455640a0a3123f92760cd65cc26a | refs/heads/main | 2023-02-28T05:55:46.018482 | 2021-02-01T21:55:16 | 2021-02-01T21:55:16 | 335,077,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | import rasterio
import numpy
with rasterio.drivers():
with rasterio.open('baikal_subset.tif') as src:
b1, b2, b3, b4, b5 = src.read()
profile = src.profile
profile.update(
dtype=rasterio.float64,
count=1,
compress='lzw')
ndvi = numpy.zeros(b1.shape)
ndvi = (b1-b2)/(b1+b2)
with rasterio.open('ndvi_python.tif', 'w', **profile) as dst:
dst.write(ndvi.astype(rasterio.float64), 1)
| [
"[email protected]"
] | |
710b13be4f032fd0628dfcd05eec3fa390d50805 | 9b0f102588acdc125cf8d4dfa1e51dffe12e1f2f | /apps/trade/urls.py | e272b659438321b9cf37e506f4956b8d547cecd9 | [] | no_license | aurthurm/sagetrader | d6e8fc3df5847acc05d134e7a39797ca6258d4ef | 97ca91b4f460fdf83837244e1dc5517fc0d74850 | refs/heads/master | 2023-01-24T23:23:59.129638 | 2020-07-21T09:19:46 | 2020-07-21T09:19:46 | 165,684,233 | 1 | 0 | null | 2023-01-03T16:11:42 | 2019-01-14T15:23:52 | CSS | UTF-8 | Python | false | false | 1,019 | py | from django.urls import path, include
from .views import *
app_name = 'trade'
urlpatterns = [
path('dashboard/', Dashboard.as_view(), name='dashboard'),
path('statistics/member', Statistics.as_view(), name='member-statistics'),
path('trading-plan/update/', UpdatePlan.as_view(), name='update-plan'),
path('trading-portfolio/update/', UpdatePortfolio.as_view(), name='update-portfolio'),
path('trading-portfolio/remove/', UpdatePortfolioRemove.as_view(), name='update-portfolio-remove'),
path('mine/', TradeList.as_view(), name='my-trades'),
path('strategies/mine/', StrategiesList.as_view(), name='my-strategies'),
path('place/', PlaceTrade.as_view(), name='place-trade'),
path('strategy/add/', StrategyCreate.as_view(), name='add-strategy'),
path('<int:trade_id>/detail', TradeDetail.as_view(), name='trade-detail'),
path('<int:trade_id>/followup', AddFollowUp.as_view(), name='trade-followup'),
path('<int:trade_id>/charts/', AddChart.as_view(), name='add-chart'),
]
| [
"[email protected]"
] | |
7c91cb81835b975d6fc34bfe38d28812aecc2704 | b0cdbad299f6174bfdb0fba173dbcf3889b82209 | /Object Oriented Programming/oops/class_11.py | a4d0fb9c8a988a9e9eb3c05db8bf43193528c9e4 | [] | no_license | deesaw/PythonD-06 | a33e676f1e0cfc13b4ea645c8b60547b198239ac | 3c6f065d7be2e3e10cafb6cef79d6cae9d55a7fa | refs/heads/master | 2023-03-18T08:24:42.030935 | 2021-03-02T14:15:09 | 2021-03-02T14:15:09 | 343,797,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | #usage of super
# parent class
class Deer:
def __init__(self):
print("Deer is in forest")
def whoisThis(self):
print("Parent - Deer")
def jump(self):
print("Deer is jumping")
# child class
class Stag(Deer):
def __init__(self):
print("Stag is ready")
def whoisThis(self):
print("Child - Stag")
def run(self):
print("Runs faster")
def parentwhoisThis(self):
#super(Stag,self).whoisThis()
super().whoisThis()
bucky = Stag()
bucky.whoisThis()
bucky.jump()
bucky.run()
bucky.parentwhoisThis()
| [
"[email protected]"
] | |
b055b4c4e168f77967e6a90390a200084d7360a1 | 265b451de88d2cbc7cb7af24e42c78b128cd80ee | /SQL/group-by-time.py | e6bf6985cece32effe0706b8b63c7b57e2c4140a | [] | no_license | ryanchang1005/Django-lab | c967f0bf01fc1bc5727194bb036e6e378cfe075c | 9186c07177d6f563a8b8dcd00464ac741f736ce9 | refs/heads/master | 2023-06-30T03:44:37.749216 | 2021-08-03T03:06:01 | 2021-08-03T03:06:01 | 288,667,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | """
Group by minute
Input:
6 2020-12-09 09:29:04.215836
7 2020-12-09 09:29:18.682882
8 2020-12-09 09:29:19.831128
9 2020-12-09 09:29:34.918914
10 2020-12-09 09:29:37.036617
11 2020-12-09 09:29:38.672620
12 2020-12-09 09:29:40.311120
13 2020-12-09 09:29:40.820016
14 2020-12-09 09:29:41.537559
15 2020-12-09 09:29:53.676690
16 2020-12-09 09:30:02.336606
17 2020-12-09 09:30:03.815859
18 2020-12-09 09:30:05.412835
19 2020-12-09 09:30:15.673348
20 2020-12-09 09:34:50.976693
21 2020-12-09 09:34:53.490987
Output:
[
{'log_minute': '17:29', 'log_count': 10},
{'log_minute': '17:30', 'log_count': 4},
{'log_minute': '17:34', 'log_count': 2}
]
"""
def format(x):
return '%02d' % x
from datetime import datetime
from django.db.models import Count
from django.db.models.functions import Trunc
qs = Log.objects.all().annotate(log_minute=Trunc('created', 'minute')).values('log_minute').annotate(log_count=Count('id'))
data = []
for it in qs:
data.append({
'log_minute': format(it['log_minute'].hour) + ':' + format(it['log_minute'].minute),
'log_count': it['log_count'],
})
data.sort(key=lambda it: it['log_minute']) | [
"[email protected]"
] | |
63cd9f828d4d3281fe784bc4ca8745855af72fc0 | b95aa6509714fba3c404ecbc7a0bdc7a7788dab9 | /mycpp/cppgen_pass.py | e5c9db590e6952230b5c4e2f8c004085f99efc77 | [] | no_license | moneytech/oil | 6ba13ff30b94302afc9e58ed4d686971a148a1a0 | f9d72ea96d8535913232d2a286a520debed97323 | refs/heads/master | 2021-02-15T11:54:39.027780 | 2020-03-04T06:30:50 | 2020-03-04T06:30:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72,949 | py | """
cppgen.py - AST pass to that prints C++ code
"""
import io
import json # for "C escaping"
import sys
from typing import overload, Union, Optional, Any, Dict
from mypy.visitor import ExpressionVisitor, StatementVisitor
from mypy.types import (
Type, AnyType, NoneTyp, TupleType, Instance, Overloaded, CallableType,
UnionType, UninhabitedType, PartialType)
from mypy.nodes import (
Expression, Statement, Block, NameExpr, IndexExpr, MemberExpr, TupleExpr,
ExpressionStmt, AssignmentStmt, IfStmt, StrExpr, SliceExpr, FuncDef,
UnaryExpr, ComparisonExpr, CallExpr, IntExpr, ListComprehension)
import format_strings
from crash import catch_errors
from util import log
T = None
class UnsupportedException(Exception):
pass
def _GetCTypeForCast(type_expr):
if isinstance(type_expr, MemberExpr):
subtype_name = '%s::%s' % (type_expr.expr.name, type_expr.name)
elif isinstance(type_expr, IndexExpr):
# List[word_t] would be a problem.
# But worked around it in osh/word_parse.py
#subtype_name = 'List<word_t>'
raise AssertionError()
else:
subtype_name = type_expr.name
# Hack for now
if subtype_name != 'int':
subtype_name += '*'
return subtype_name
def _GetCastKind(module_path, subtype_name):
cast_kind = 'static_cast'
# Hack for the CastDummy in expr_to_ast.py
if 'expr_to_ast.py' in module_path:
for name in (
'sh_array_literal', 'command_sub', 'braced_var_sub',
'double_quoted', 'single_quoted',
# Another kind of hack, not because of CastDummy
'place_expr_t',
):
if name in subtype_name:
cast_kind = 'reinterpret_cast'
break
return cast_kind
def _GetContainsFunc(t):
contains_func = None
if isinstance(t, Instance):
type_name = t.type.fullname()
if type_name == 'builtins.list':
contains_func = 'list_contains'
elif type_name == 'builtins.str':
contains_func = 'str_contains'
elif type_name == 'builtins.dict':
contains_func = 'dict_contains'
elif isinstance(t, UnionType):
# Special case for Optional[T] == Union[T, None]
if len(t.items) != 2:
raise NotImplementedError('Expected Optional, got %s' % t)
if not isinstance(t.items[1], NoneTyp):
raise NotImplementedError('Expected Optional, got %s' % t)
contains_func = _GetContainsFunc(t.items[0])
return contains_func # None checked later
def _CheckConditionType(t):
"""
strings, lists, and dicts shouldn't be used in boolean contexts, because that
doesn't translate to C++.
"""
if isinstance(t, Instance):
type_name = t.type.fullname()
if type_name == 'builtins.str':
return False
elif type_name == 'builtins.list':
return False
elif type_name == 'builtins.dict':
return False
return True
def get_c_type(t):
if isinstance(t, NoneTyp): # e.g. a function that doesn't return anything
return 'void'
if isinstance(t, AnyType):
# Note: this usually results in another compile-time error. We should get
# rid of the 'Any' types.
return 'void*'
# TODO: It seems better not to check for string equality, but that's what
# mypyc/genops.py does?
if isinstance(t, Instance):
type_name = t.type.fullname()
if type_name == 'builtins.int':
c_type = 'int'
elif type_name == 'builtins.float':
c_type = 'double'
elif type_name == 'builtins.bool':
c_type = 'bool'
elif type_name == 'builtins.str':
c_type = 'Str*'
elif type_name == 'builtins.list':
assert len(t.args) == 1, t.args
type_param = t.args[0]
inner_c_type = get_c_type(type_param)
c_type = 'List<%s>*' % inner_c_type
elif type_name == 'builtins.dict':
params = []
for type_param in t.args:
params.append(get_c_type(type_param))
c_type = 'Dict<%s>*' % ', '.join(params)
# TODO: we might want Writer and LineReader base classes, and
# mylib::Writer
# CFileWriter
# BufWriter
elif type_name == 'typing.IO':
c_type = 'mylib::File*'
else:
# fullname() => 'parse.Lexer'; name() => 'Lexer'
# NOTE: It would be nice to leave off the namespace if we're IN that
# namespace. But that is cosmetic.
# Check base class for runtime.SimpleObj so we can output
# expr_asdl::tok_t instead of expr_asdl::tok_t*. That is a enum, while
# expr_t is a "regular base class".
# NOTE: Could we avoid the typedef? If it's SimpleObj, just generate
# tok_e instead?
base_class_names = [b.type.fullname() for b in t.type.bases]
#log('** base_class_names %s', base_class_names)
# not sure why this isn't runtime.SimpleObj
if 'asdl.pybase.SimpleObj' in base_class_names:
is_pointer = ''
else:
is_pointer = '*'
parts = t.type.fullname().split('.')
c_type = '%s::%s%s' % (parts[-2], parts[-1], is_pointer)
elif isinstance(t, PartialType):
# For Any?
c_type = 'void*'
elif isinstance(t, UninhabitedType):
# UninhabitedType has a NoReturn flag
c_type = 'void'
elif isinstance(t, TupleType):
inner_c_types = []
for inner_type in t.items:
inner_c_types.append(get_c_type(inner_type))
c_type = 'Tuple%d<%s>*' % (len(t.items), ', '.join(inner_c_types))
elif isinstance(t, UnionType):
# Special case for Optional[T] == Union[T, None]
if len(t.items) != 2:
raise NotImplementedError('Expected Optional, got %s' % t)
if not isinstance(t.items[1], NoneTyp):
raise NotImplementedError('Expected Optional, got %s' % t)
c_type = get_c_type(t.items[0])
elif isinstance(t, CallableType):
# Function types are expanded
# Callable[[Parser, Token, int], arith_expr_t] =>
# arith_expr_t* (*f)(Parser*, Token*, int) nud;
ret_type = get_c_type(t.ret_type)
arg_types = [get_c_type(typ) for typ in t.arg_types]
c_type = '%s (*f)(%s)' % (ret_type, ', '.join(arg_types))
else:
raise NotImplementedError('MyPy type: %s %s' % (type(t), t))
return c_type
class Generate(ExpressionVisitor[T], StatementVisitor[None]):
def __init__(self, types: Dict[Expression, Type], const_lookup, f,
virtual=None, local_vars=None, fmt_ids=None,
decl=False, forward_decl=False):
self.types = types
self.const_lookup = const_lookup
self.f = f
self.virtual = virtual
# local_vars: FuncDef node -> list of type, var
# This is different from member_vars because we collect it in the 'decl'
# phase. But then write it in the definition phase.
self.local_vars = local_vars
self.fmt_ids = fmt_ids
self.fmt_funcs = io.StringIO()
self.decl = decl
self.forward_decl = forward_decl
self.unique_id = 0
self.indent = 0
self.local_var_list = [] # Collected at assignment
self.prepend_to_block = None # For writing vars after {
self.in_func_body = False
self.in_return_expr = False
# This is cleared when we start visiting a class. Then we visit all the
# methods, and accumulate the types of everything that looks like
# self.foo = 1. Then we write C++ class member declarations at the end
# of the class.
# This is all in the 'decl' phase.
self.member_vars = {} # type: Dict[str, Type]
self.current_class_name = None # for prototypes
self.imported_names = set() # For module::Foo() vs. self.foo
def log(self, msg, *args):
ind_str = self.indent * ' '
log(ind_str + msg, *args)
def write(self, msg, *args):
if self.decl or self.forward_decl:
return
if args:
msg = msg % args
self.f.write(msg)
# Write respecting indent
def write_ind(self, msg, *args):
if self.decl or self.forward_decl:
return
ind_str = self.indent * ' '
if args:
msg = msg % args
self.f.write(ind_str + msg)
# A little hack to reuse this pass for declarations too
def decl_write(self, msg, *args):
if args:
msg = msg % args
self.f.write(msg)
def decl_write_ind(self, msg, *args):
ind_str = self.indent * ' '
if args:
msg = msg % args
self.f.write(ind_str + msg)
#
# COPIED from IRBuilder
#
@overload
def accept(self, node: Expression) -> T: ...
@overload
def accept(self, node: Statement) -> None: ...
def accept(self, node: Union[Statement, Expression]) -> Optional[T]:
with catch_errors(self.module_path, node.line):
if isinstance(node, Expression):
try:
res = node.accept(self)
#res = self.coerce(res, self.node_type(node), node.line)
# If we hit an error during compilation, we want to
# keep trying, so we can produce more error
# messages. Generate a temp of the right type to keep
# from causing more downstream trouble.
except UnsupportedException:
res = self.alloc_temp(self.node_type(node))
return res
else:
try:
node.accept(self)
except UnsupportedException:
pass
return None
# Not in superclasses:
def visit_mypy_file(self, o: 'mypy.nodes.MypyFile') -> T:
# Skip some stdlib stuff. A lot of it is brought in by 'import
# typing'.
if o.fullname() in (
'__future__', 'sys', 'types', 'typing', 'abc', '_ast', 'ast',
'_weakrefset', 'collections', 'cStringIO', 're', 'builtins'):
# These module are special; their contents are currently all
# built-in primitives.
return
self.log('')
self.log('mypyfile %s', o.fullname())
mod_parts = o.fullname().split('.')
if self.forward_decl:
comment = 'forward declare'
elif self.decl:
comment = 'declare'
else:
comment = 'define'
self.decl_write_ind('namespace %s { // %s\n', mod_parts[-1], comment)
self.module_path = o.path
if self.forward_decl:
self.indent += 1
self.log('defs %s', o.defs)
for node in o.defs:
# skip module docstring
if (isinstance(node, ExpressionStmt) and
isinstance(node.expr, StrExpr)):
continue
self.accept(node)
# Write fmtX() functions inside the namespace.
if self.decl:
self.decl_write(self.fmt_funcs.getvalue())
self.fmt_funcs = io.StringIO() # clear it for the next file
if self.forward_decl:
self.indent -= 1
self.decl_write('\n')
self.decl_write_ind(
'} // %s namespace %s\n', comment, mod_parts[-1])
self.decl_write('\n')
# NOTE: Copied ExpressionVisitor and StatementVisitor nodes below!
# LITERALS
def visit_int_expr(self, o: 'mypy.nodes.IntExpr') -> T:
self.write(str(o.value))
def visit_str_expr(self, o: 'mypy.nodes.StrExpr') -> T:
self.write(self.const_lookup[o])
def visit_bytes_expr(self, o: 'mypy.nodes.BytesExpr') -> T:
pass
def visit_unicode_expr(self, o: 'mypy.nodes.UnicodeExpr') -> T:
pass
def visit_float_expr(self, o: 'mypy.nodes.FloatExpr') -> T:
pass
def visit_complex_expr(self, o: 'mypy.nodes.ComplexExpr') -> T:
pass
# Expressions
def visit_ellipsis(self, o: 'mypy.nodes.EllipsisExpr') -> T:
pass
def visit_star_expr(self, o: 'mypy.nodes.StarExpr') -> T:
pass
def visit_name_expr(self, o: 'mypy.nodes.NameExpr') -> T:
if o.name == 'None':
self.write('nullptr')
return
if o.name == 'True':
self.write('true')
return
if o.name == 'False':
self.write('false')
return
if o.name == 'self':
self.write('this')
return
self.write(o.name)
def visit_member_expr(self, o: 'mypy.nodes.MemberExpr') -> T:
t = self.types[o]
if o.expr:
#log('member o = %s', o)
# This is an approximate hack that assumes that locals don't shadow
# imported names. Might be a problem with names like 'word'?
if (isinstance(o.expr, NameExpr) and (
o.expr.name in self.imported_names or
o.expr.name in ('mylib', 'libc', 'posix') or
o.name == '__init__'
)):
op = '::'
else:
op = '->' # Everything is a pointer
self.accept(o.expr)
self.write(op)
self.write('%s', o.name)
def visit_yield_from_expr(self, o: 'mypy.nodes.YieldFromExpr') -> T:
pass
def visit_yield_expr(self, o: 'mypy.nodes.YieldExpr') -> T:
pass
def visit_call_expr(self, o: 'mypy.nodes.CallExpr') -> T:
if o.callee.name == 'isinstance':
assert len(o.args) == 2, args
obj = o.args[0]
typ = o.args[1]
if 0:
log('obj %s', obj)
log('typ %s', typ)
self.accept(obj)
self.write('->tag_() == ')
assert isinstance(typ, NameExpr), typ
# source__CFlag -> source_e::CFlag
tag = typ.name.replace('__', '_e::')
self.write(tag)
return
# return cast(sh_array_literal, tok)
# -> return static_cast<sh_array_literal*>(tok)
# TODO: Consolidate this with AssignmentExpr logic.
if o.callee.name == 'cast':
call = o
type_expr = call.args[0]
subtype_name = _GetCTypeForCast(type_expr)
cast_kind = _GetCastKind(self.module_path, subtype_name)
self.write('%s<%s>(', cast_kind, subtype_name)
self.accept(call.args[1]) # variable being casted
self.write(')')
return
# Translate printf-style vargs for some functions, e.g.
#
# p_die('foo %s', x, token=t)
# =>
# p_die(fmt1(x), t)
#
# And then we need 3 or 4 version of p_die()? For the rest of the
# tokens.
# Others:
# - errfmt.Print
# - debug_f.log()?
# Maybe I should rename them all printf()
# or fprintf()? Except p_die() has extra args
if o.callee.name == 'log' or o.callee.name == 'stderr_line':
args = o.args
if len(args) == 1: # log(CONST)
self.write('println_stderr(')
self.accept(args[0])
self.write(')')
return
rest = args[1:]
if self.decl:
fmt = args[0].value
fmt_types = [self.types[arg] for arg in rest]
temp_name = self._WriteFmtFunc(fmt, fmt_types)
self.fmt_ids[o] = temp_name
# DEFINITION PASS: Write the call
self.write('println_stderr(%s(' % self.fmt_ids[o])
for i, arg in enumerate(rest):
if i != 0:
self.write(', ')
self.accept(arg)
self.write('))')
return
# TODO: Consolidate X_die() and log()? It has an extra arg though.
if o.callee.name in ('p_die', 'e_die', 'e_strict', 'e_usage'):
args = o.args
if len(args) == 1: # log(CONST)
self.write('%s(' % o.callee.name)
self.accept(args[0])
self.write(')')
return
has_keyword_arg = o.arg_names[-1] is not None
if has_keyword_arg:
rest = args[1:-1]
else:
rest = args[1:]
# If there are no arguments, it must look like
# Same with
# e_die('constant string')
if not rest:
pass
if self.decl:
fmt_arg = args[0]
if isinstance(fmt_arg, StrExpr):
fmt_types = [self.types[arg] for arg in rest]
temp_name = self._WriteFmtFunc(fmt_arg.value, fmt_types)
self.fmt_ids[o] = temp_name
else:
# oil_lang/expr_to_ast.py uses RANGE_POINT_TOO_LONG, etc.
self.fmt_ids[o] = "dynamic_fmt_dummy"
# Should p_die() be in mylib?
# DEFINITION PASS: Write the call
self.write('%s(%s(' % (o.callee.name, self.fmt_ids[o]))
for i, arg in enumerate(rest):
if i != 0:
self.write(', ')
self.accept(arg)
if has_keyword_arg:
self.write('), ')
self.accept(args[-1])
else:
self.write(')')
self.write(')')
return
callee_name = o.callee.name
callee_type = self.types[o.callee]
# e.g. int() takes str, float, etc. It doesn't matter for translation.
if isinstance(callee_type, Overloaded):
if 0:
for item in callee_type.items():
self.log('item: %s', item)
if isinstance(callee_type, CallableType):
# If the function name is the same as the return type, then add 'new'.
# f = Foo() => f = new Foo().
ret_type = callee_type.ret_type
if 0:
log('callee %s', o.callee)
log('callee name %s', callee_name)
if isinstance(ret_type, Instance):
log('ret_type name %s', ret_type.type.name())
log('---')
# str(i) doesn't need new. For now it's a free function.
# TODO: rename int_to_str? or Str::from_int()?
if callee_name not in ('str',) and isinstance(ret_type, Instance):
ret_type_name = ret_type.type.name()
# HACK: Const is the callee; expr__Const is the return type
if (callee_name == ret_type_name or
ret_type_name.endswith('__' + callee_name)):
self.write('new ')
# Namespace.
if callee_name == 'int': # int('foo') in Python conflicts with keyword
self.write('to_int')
else:
self.accept(o.callee) # could be f() or obj.method()
self.write('(')
# Don't pass any args to AssertionError()
if callee_name != 'AssertionError':
for i, arg in enumerate(o.args):
if i != 0:
self.write(', ')
self.accept(arg)
self.write(')')
# TODO: look at keyword arguments!
#self.log(' arg_kinds %s', o.arg_kinds)
#self.log(' arg_names %s', o.arg_names)
def _WriteFmtFunc(self, fmt, fmt_types):
"""Append a fmtX() function to a buffer.
Returns:
the temp fmtX() name we used.
"""
temp_name = 'fmt%d' % self.fmt_ids['_counter']
self.fmt_ids['_counter'] += 1
fmt_parts = format_strings.Parse(fmt)
self.fmt_funcs.write('Str* %s(' % temp_name)
for i, typ in enumerate(fmt_types):
if i != 0:
self.fmt_funcs.write(', ');
self.fmt_funcs.write('%s a%d' % (get_c_type(typ), i))
self.fmt_funcs.write(') {\n')
self.fmt_funcs.write(' gBuf.reset();\n')
for part in fmt_parts:
if isinstance(part, format_strings.LiteralPart):
# MyPy does bad escaping.
# NOTE: We could do this in the CALLER to _WriteFmtFunc?
byte_string = bytes(part.s, 'utf-8')
# In Python 3
# >>> b'\\t'.decode('unicode_escape')
# '\t'
raw_string = format_strings.DecodeMyPyString(part.s)
n = len(raw_string) # NOT using part.strlen
escaped = json.dumps(raw_string)
self.fmt_funcs.write(
' gBuf.write_const(%s, %d);\n' % (escaped, n))
elif isinstance(part, format_strings.SubstPart):
self.fmt_funcs.write(
' gBuf.format_%s(a%d);\n' %
(part.char_code, part.arg_num))
else:
raise AssertionError(part)
self.fmt_funcs.write(' return gBuf.getvalue();\n')
self.fmt_funcs.write('}\n')
self.fmt_funcs.write('\n')
return temp_name
def visit_op_expr(self, o: 'mypy.nodes.OpExpr') -> T:
c_op = o.op
# a + b when a and b are strings. (Can't use operator overloading
# because they're pointers.)
left_type = self.types[o.left]
right_type = self.types[o.right]
# NOTE: Need get_c_type to handle Optional[Str*] in ASDL schemas.
# Could tighten it up later.
left_ctype = get_c_type(left_type)
right_ctype = get_c_type(right_type)
#if c_op == '+':
if 0:
self.log('*** %r', c_op)
self.log('%s', o.left)
self.log('%s', o.right)
#self.log('t0 %r', t0.type.fullname())
#self.log('t1 %r', t1.type.fullname())
self.log('left_ctype %r', left_ctype)
self.log('right_ctype %r', right_ctype)
self.log('')
if left_ctype == right_ctype == 'Str*' and c_op == '+':
self.write('str_concat(')
self.accept(o.left)
self.write(', ')
self.accept(o.right)
self.write(')')
return
if left_ctype == 'Str*' and right_ctype == 'int' and c_op == '*':
self.write('str_repeat(')
self.accept(o.left)
self.write(', ')
self.accept(o.right)
self.write(')')
return
# [None] * 3 => list_repeat(None, 3)
if left_ctype.startswith('List<') and right_ctype == 'int' and c_op == '*':
self.write('list_repeat(')
self.accept(o.left.items[0])
self.write(', ')
self.accept(o.right)
self.write(')')
return
# RHS can be primitive or tuple
if left_ctype == 'Str*' and c_op == '%':
if not isinstance(o.left, StrExpr):
raise AssertionError('Expected constant format string, got %s' % o.left)
#log('right_type %s', right_type)
if isinstance(right_type, Instance):
fmt_types = [right_type]
elif isinstance(right_type, TupleType):
fmt_types = right_type.items
# Handle Optional[str]
elif (isinstance(right_type, UnionType) and
len(right_type.items) == 2 and
isinstance(right_type.items[1], NoneTyp)):
fmt_types = [right_type.items[0]]
else:
raise AssertionError(right_type)
# Write a buffer with fmtX() functions.
if self.decl:
fmt = o.left.value
temp_name = self._WriteFmtFunc(fmt, fmt_types)
self.fmt_ids[o] = temp_name
# In the definition pass, write the call site.
self.write('%s(' % self.fmt_ids[o])
if isinstance(right_type, TupleType):
for i, item in enumerate(o.right.items):
if i != 0:
self.write(', ')
self.accept(item)
else: # '[%s]' % x
self.accept(o.right)
self.write(')')
return
self.accept(o.left)
self.write(' %s ', c_op)
self.accept(o.right)
def visit_comparison_expr(self, o: 'mypy.nodes.ComparisonExpr') -> T:
# Make sure it's binary
assert len(o.operators) == 1, o.operators
assert len(o.operands) == 2, o.operands
operator = o.operators[0]
left = o.operands[0]
right = o.operands[1]
# Assume is and is not are for None / nullptr comparison.
if operator == 'is': # foo is None => foo == nullptr
self.accept(o.operands[0])
self.write(' == ')
self.accept(o.operands[1])
return
if operator == 'is not': # foo is not None => foo != nullptr
self.accept(o.operands[0])
self.write(' != ')
self.accept(o.operands[1])
return
# TODO: Change Optional[T] to T for our purposes?
t0 = self.types[left]
t1 = self.types[right]
# 0: not a special case
# 1: str
# 2: Optional[str] which is Union[str, None]
left_type = 0 # not a special case
right_type = 0 # not a special case
if isinstance(t0, Instance) and t0.type.fullname() == 'builtins.str':
left_type = 1
if (isinstance(t0, UnionType) and len(t0.items) == 2 and
isinstance(t0.items[1], NoneTyp)):
left_type += 1
if isinstance(t1, Instance) and t1.type.fullname() == 'builtins.str':
right_type = 1
if (isinstance(t1, UnionType) and len(t1.items) == 2 and
isinstance(t1.items[1], NoneTyp)):
right_type += 1
if left_type > 0 and right_type > 0 and operator in ('==', '!='):
if operator == '!=':
self.write('!(')
# NOTE: This could also be str_equals(left, right)? Does it make a
# difference?
if left_type > 1 or right_type > 1:
self.write('maybe_str_equals(')
else:
self.write('str_equals(')
self.accept(left)
self.write(', ')
self.accept(right)
self.write(')')
if operator == '!=':
self.write(')')
return
# Note: we could get rid of this altogether and rely on C++ function
# overloading. But somehow I like it more explicit, closer to C (even
# though we use templates).
contains_func = _GetContainsFunc(t1)
if operator == 'in':
if isinstance(right, TupleExpr):
# x in (1, 2, 3) => (x == 1 || x == 2 || x == 3)
self.write('(')
for i, item in enumerate(right.items):
if i != 0:
self.write(' || ')
self.accept(left)
self.write(' == ')
self.accept(item)
self.write(')')
return
assert contains_func, "RHS of 'in' has type %r" % t1
# x in mylist => list_contains(mylist, x)
self.write('%s(', contains_func)
self.accept(right)
self.write(', ')
self.accept(left)
self.write(')')
return
if operator == 'not in':
if isinstance(right, TupleExpr):
# x not in (1, 2, 3) => (x != 1 && x != 2 && x != 3)
self.write('(')
for i, item in enumerate(right.items):
if i != 0:
self.write(' && ')
self.accept(left)
self.write(' != ')
self.accept(item)
self.write(')')
return
assert contains_func, t1
# x not in mylist => !list_contains(mylist, x)
self.write('!%s(', contains_func)
self.accept(right)
self.write(', ')
self.accept(left)
self.write(')')
return
# Default case
self.accept(o.operands[0])
self.write(' %s ', o.operators[0])
self.accept(o.operands[1])
def visit_cast_expr(self, o: 'mypy.nodes.CastExpr') -> T:
pass
def visit_reveal_expr(self, o: 'mypy.nodes.RevealExpr') -> T:
pass
def visit_super_expr(self, o: 'mypy.nodes.SuperExpr') -> T:
pass
def visit_assignment_expr(self, o: 'mypy.nodes.AssignmentExpr') -> T:
pass
def visit_unary_expr(self, o: 'mypy.nodes.UnaryExpr') -> T:
# e.g. a[-1] or 'not x'
if o.op == 'not':
op_str = '!'
else:
op_str = o.op
self.write(op_str)
self.accept(o.expr)
def visit_list_expr(self, o: 'mypy.nodes.ListExpr') -> T:
list_type = self.types[o]
#self.log('**** list_type = %s', list_type)
c_type = get_c_type(list_type)
assert c_type.endswith('*'), c_type
c_type = c_type[:-1] # HACK TO CLEAN UP
if len(o.items) == 0:
self.write('new %s()' % c_type)
else:
# Use initialize list. Lists are MUTABLE so we can't pull them to
# the top level.
self.write('new %s({' % c_type)
for i, item in enumerate(o.items):
if i != 0:
self.write(', ')
self.accept(item)
# TODO: const_lookup
self.write('})')
def visit_dict_expr(self, o: 'mypy.nodes.DictExpr') -> T:
dict_type = self.types[o]
c_type = get_c_type(dict_type)
assert c_type.endswith('*'), c_type
c_type = c_type[:-1] # HACK TO CLEAN UP
self.write('new %s(' % c_type)
if o.items:
self.write('{')
for i, item in enumerate(o.items):
# TODO: we can use an initializer list, I think.
pass
self.write('}')
self.write(')')
def visit_tuple_expr(self, o: 'mypy.nodes.TupleExpr') -> T:
tuple_type = self.types[o]
c_type = get_c_type(tuple_type)
assert c_type.endswith('*'), c_type
c_type = c_type[:-1] # HACK TO CLEAN UP
maybe_new = '' if self.in_return_expr else 'new '
if len(o.items) == 0:
self.write('(%s%s())' % (maybe_new, c_type))
else:
# Use initialize list. Lists are MUTABLE so we can't pull them to
# the top level.
self.write('(%s%s(' % (maybe_new, c_type))
for i, item in enumerate(o.items):
if i != 0:
self.write(', ')
self.accept(item)
# TODO: const_lookup
self.write('))')
def visit_set_expr(self, o: 'mypy.nodes.SetExpr') -> T:
pass
def visit_index_expr(self, o: 'mypy.nodes.IndexExpr') -> T:
self.accept(o.base)
#base_type = self.types[o.base]
#self.log('*** BASE TYPE %s', base_type)
if isinstance(o.index, SliceExpr):
self.accept(o.index) # method call
else:
# it's hard syntactically to do (*a)[0], so do it this way.
self.write('->index(')
self.accept(o.index)
self.write(')')
def visit_type_application(self, o: 'mypy.nodes.TypeApplication') -> T:
pass
def visit_lambda_expr(self, o: 'mypy.nodes.LambdaExpr') -> T:
pass
def visit_list_comprehension(self, o: 'mypy.nodes.ListComprehension') -> T:
pass
def visit_set_comprehension(self, o: 'mypy.nodes.SetComprehension') -> T:
pass
def visit_dictionary_comprehension(self, o: 'mypy.nodes.DictionaryComprehension') -> T:
pass
def visit_generator_expr(self, o: 'mypy.nodes.GeneratorExpr') -> T:
pass
def visit_slice_expr(self, o: 'mypy.nodes.SliceExpr') -> T:
self.write('->slice(')
if o.begin_index:
self.accept(o.begin_index)
else:
self.write('0') # implicit begining
if o.end_index:
self.write(', ')
self.accept(o.end_index)
self.write(')')
if o.stride:
raise AssertionError('Stride not supported')
def visit_conditional_expr(self, o: 'mypy.nodes.ConditionalExpr') -> T:
# 0 if b else 1 -> b ? 0 : 1
self.accept(o.cond)
self.write(' ? ')
self.accept(o.if_expr)
self.write(' : ')
self.accept(o.else_expr)
def visit_backquote_expr(self, o: 'mypy.nodes.BackquoteExpr') -> T:
pass
def visit_type_var_expr(self, o: 'mypy.nodes.TypeVarExpr') -> T:
pass
def visit_type_alias_expr(self, o: 'mypy.nodes.TypeAliasExpr') -> T:
pass
def visit_namedtuple_expr(self, o: 'mypy.nodes.NamedTupleExpr') -> T:
pass
def visit_enum_call_expr(self, o: 'mypy.nodes.EnumCallExpr') -> T:
pass
def visit_typeddict_expr(self, o: 'mypy.nodes.TypedDictExpr') -> T:
pass
def visit_newtype_expr(self, o: 'mypy.nodes.NewTypeExpr') -> T:
pass
def visit__promote_expr(self, o: 'mypy.nodes.PromoteExpr') -> T:
pass
def visit_await_expr(self, o: 'mypy.nodes.AwaitExpr') -> T:
pass
def visit_temp_node(self, o: 'mypy.nodes.TempNode') -> T:
pass
def _write_tuple_unpacking(self, temp_name, lval_items, item_types,
is_return=False):
"""Used by assignment and for loops."""
for i, (lval_item, item_type) in enumerate(zip(lval_items, item_types)):
#self.log('*** %s :: %s', lval_item, item_type)
if isinstance(lval_item, NameExpr):
if lval_item.name == '_':
continue
item_c_type = get_c_type(item_type)
# declare it at the top of the function
if self.decl:
self.local_var_list.append((lval_item.name, item_c_type))
self.write_ind('%s', lval_item.name)
else:
# Could be MemberExpr like self.foo, self.bar = baz
self.write_ind('')
self.accept(lval_item)
# Tuples that are return values aren't pointers
op = '.' if is_return else '->'
self.write(' = %s%sat%d();\n', temp_name, op, i) # RHS
def visit_assignment_stmt(self, o: 'mypy.nodes.AssignmentStmt') -> T:
# Declare constant strings. They have to be at the top level.
if self.decl and self.indent == 0 and len(o.lvalues) == 1:
lval = o.lvalues[0]
c_type = get_c_type(self.types[lval])
if not lval.name.startswith('_'):
self.decl_write('extern %s %s;\n', c_type, lval.name)
# I think there are more than one when you do a = b = 1, which I never
# use.
assert len(o.lvalues) == 1, o.lvalues
lval = o.lvalues[0]
# src = cast(source__SourcedFile, src)
# -> source__SourcedFile* src = static_cast<source__SourcedFile>(src)
if isinstance(o.rvalue, CallExpr) and o.rvalue.callee.name == 'cast':
assert isinstance(lval, NameExpr)
call = o.rvalue
type_expr = call.args[0]
subtype_name = _GetCTypeForCast(type_expr)
cast_kind = _GetCastKind(self.module_path, subtype_name)
# Distinguish between UP cast and DOWN cast.
# osh/cmd_parse.py _MakeAssignPair does an UP cast within branches.
# _t is the base type, so that means it's an upcast.
if isinstance(type_expr, NameExpr) and type_expr.name.endswith('_t'):
if self.decl:
self.local_var_list.append((lval.name, subtype_name))
self.write_ind(
'%s = %s<%s>(', lval.name, cast_kind, subtype_name)
else:
self.write_ind(
'%s %s = %s<%s>(', subtype_name, lval.name, cast_kind,
subtype_name)
self.accept(call.args[1]) # variable being casted
self.write(');\n')
return
if isinstance(lval, NameExpr):
if lval.name == '_': # Skip _ = log
return
lval_type = self.types[lval]
c_type = get_c_type(lval_type)
# for "hoisting" to the top of the function
if self.in_func_body:
self.write_ind('%s = ', lval.name)
if self.decl:
self.local_var_list.append((lval.name, c_type))
else:
# globals always get a type -- they're not mutated
self.write_ind('%s %s = ', c_type, lval.name)
# Special case for list comprehensions. Note that a variable has to
# be on the LHS, so we can append to it.
#
# y = [i+1 for i in x[1:] if i]
# =>
# y = []
# for i in x[1:]:
# if i:
# y.append(i+1)
# (but in C++)
if isinstance(o.rvalue, ListComprehension):
gen = o.rvalue.generator # GeneratorExpr
left_expr = gen.left_expr
index_expr = gen.indices[0]
seq = gen.sequences[0]
cond = gen.condlists[0] # TODO: not used!
# Write empty container as initialization.
assert c_type.endswith('*'), c_type # Hack
self.write('new %s();\n' % c_type[:-1])
over_type = self.types[seq]
self.log(' iterating over type %s', over_type)
if over_type.type.fullname() == 'builtins.list':
c_type = get_c_type(over_type)
assert c_type.endswith('*'), c_type
c_iter_type = c_type.replace('List', 'ListIter', 1)[:-1] # remove *
else:
# Example: assoc == Optional[Dict[str, str]]
c_iter_type = 'TODO_ASSOC'
self.write_ind('for (%s it(', c_iter_type)
self.accept(seq)
self.write('); !it.Done(); it.Next()) {\n')
seq_type = self.types[seq]
item_type = seq_type.args[0] # get 'int' from 'List<int>'
if isinstance(item_type, Instance):
self.write_ind(' %s ', get_c_type(item_type))
self.accept(index_expr)
self.write(' = it.Value();\n')
elif isinstance(item_type, TupleType): # for x, y in pairs
c_item_type = get_c_type(item_type)
if isinstance(index_expr, TupleExpr):
temp_name = 'tup%d' % self.unique_id
self.unique_id += 1
self.write_ind(' %s %s = it.Value();\n', c_item_type, temp_name)
self.indent += 1
self._write_tuple_unpacking(
temp_name, index_expr.items, item_type.items)
self.indent -= 1
else:
raise AssertionError()
else:
raise AssertionError('Unexpected type %s' % item_type)
self.write_ind(' %s->append(', lval.name)
self.accept(left_expr)
self.write(');\n')
self.write_ind('}\n')
return
self.accept(o.rvalue)
self.write(';\n')
elif isinstance(lval, MemberExpr):
self.write_ind('')
self.accept(lval)
self.write(' = ')
self.accept(o.rvalue)
self.write(';\n')
# Collect statements that look like self.foo = 1
if isinstance(lval.expr, NameExpr) and lval.expr.name == 'self':
log(' lval.name %s', lval.name)
lval_type = self.types[lval]
self.member_vars[lval.name] = lval_type
elif isinstance(lval, IndexExpr): # a[x] = 1
# TODO: a->set(x, 1) for both List and Dict
self.accept(lval.base)
self.write('->set(')
self.accept(lval.index)
self.write(', ')
self.accept(o.rvalue)
self.write(');\n')
elif isinstance(lval, TupleExpr):
# An assignment to an n-tuple turns into n+1 statements. Example:
#
# x, y = mytuple
#
# Tuple2<int, Str*> tup1 = mytuple
# int x = tup1->at0()
# Str* y = tup1->at1()
rvalue_type = self.types[o.rvalue]
c_type = get_c_type(rvalue_type)
is_return = isinstance(o.rvalue, CallExpr)
if is_return:
assert c_type.endswith('*')
c_type = c_type[:-1]
temp_name = 'tup%d' % self.unique_id
self.unique_id += 1
self.write_ind('%s %s = ', c_type, temp_name)
self.accept(o.rvalue)
self.write(';\n')
self._write_tuple_unpacking(temp_name, lval.items, rvalue_type.items,
is_return=is_return)
else:
raise AssertionError(lval)
def visit_for_stmt(self, o: 'mypy.nodes.ForStmt') -> T:
self.log('ForStmt')
self.log(' index_type %s', o.index_type)
self.log(' inferred_item_type %s', o.inferred_item_type)
self.log(' inferred_iterator_type %s', o.inferred_iterator_type)
func_name = None # does the loop look like 'for x in func():' ?
if isinstance(o.expr, CallExpr) and isinstance(o.expr.callee, NameExpr):
func_name = o.expr.callee.name
# special case: 'for i in xrange(3)'
if func_name == 'xrange':
index_name = o.index.name
args = o.expr.args
num_args = len(args)
if num_args == 1: # xrange(end)
self.write_ind('for (int %s = 0; %s < ', index_name, index_name)
self.accept(args[0])
self.write('; ++%s) ', index_name)
elif num_args == 2: # xrange(being, end)
self.write_ind('for (int %s = ', index_name)
self.accept(args[0])
self.write('; %s < ', index_name)
self.accept(args[1])
self.write('; ++%s) ', index_name)
elif num_args == 3: # xrange(being, end, step)
self.write_ind('for (int %s = ', index_name)
self.accept(args[0])
self.write('; %s < ', index_name)
self.accept(args[1])
self.write('; %s += ', index_name)
self.accept(args[2])
self.write(') ')
else:
raise AssertionError()
self.accept(o.body)
return
reverse = False
# for i, x in enumerate(...):
index0_name = None
if func_name == 'enumerate':
assert isinstance(o.index, TupleExpr), o.index
index0 = o.index.items[0]
assert isinstance(index0, NameExpr), index0
index0_name = index0.name # generate int i = 0; ; ++i
# type of 'x' in 'for i, x in enumerate(...)'
item_type = o.inferred_item_type.items[1]
index_expr = o.index.items[1]
# enumerate(mylist) turns into iteration over mylist with variable i
assert len(o.expr.args) == 1, o.expr.args
iterated_over = o.expr.args[0]
elif func_name == 'reversed':
# NOTE: enumerate() and reversed() can't be mixed yet. But you CAN
# reverse iter over tuples.
item_type = o.inferred_item_type
index_expr = o.index
args = o.expr.args
assert len(args) == 1, args
iterated_over = args[0]
reverse = True # use different iterate
elif func_name == 'iteritems':
item_type = o.inferred_item_type
index_expr = o.index
args = o.expr.args
assert len(args) == 1, args
# This should be a dict
iterated_over = args[0]
log('------------ ITERITEMS OVER %s', iterated_over)
else:
item_type = o.inferred_item_type
index_expr = o.index
iterated_over = o.expr
over_type = self.types[iterated_over]
self.log(' iterating over type %s', over_type)
self.log(' iterating over type %s', over_type.type.fullname())
over_dict = False
if over_type.type.fullname() == 'builtins.list':
c_type = get_c_type(over_type)
assert c_type.endswith('*'), c_type
c_iter_type = c_type.replace('List', 'ListIter', 1)[:-1] # remove *
# ReverseListIter!
if reverse:
c_iter_type = 'Reverse' + c_iter_type
elif over_type.type.fullname() == 'builtins.dict':
# Iterator
c_type = get_c_type(over_type)
assert c_type.endswith('*'), c_type
c_iter_type = c_type.replace('Dict', 'DictIter', 1)[:-1] # remove *
over_dict = True
assert not reverse
elif over_type.type.fullname() == 'builtins.str':
c_iter_type = 'StrIter'
assert not reverse # can't reverse iterate over string yet
else: # assume it's like d.iteritems()? Iterator type
assert False, over_type
if index0_name:
# can't initialize two things in a for loop, so do it on a separate line
if self.decl:
self.local_var_list.append((index0_name, 'int'))
self.write_ind('%s = 0;\n', index0_name)
index_update = ', ++%s' % index0_name
else:
index_update = ''
self.write_ind('for (%s it(', c_iter_type)
self.accept(iterated_over) # the thing being iterated over
self.write('); !it.Done(); it.Next()%s) {\n', index_update)
# for x in it: ...
# for i, x in enumerate(pairs): ...
if isinstance(item_type, Instance) or index0_name:
c_item_type = get_c_type(item_type)
self.write_ind(' %s ', c_item_type)
self.accept(index_expr)
if over_dict:
self.write(' = it.Key();\n')
else:
self.write(' = it.Value();\n')
elif isinstance(item_type, TupleType): # for x, y in pairs
if over_dict:
assert isinstance(o.index, TupleExpr), o.index
index_items = o.index.items
assert len(index_items) == 2, index_items
assert len(item_type.items) == 2, item_type.items
key_type = get_c_type(item_type.items[0])
val_type = get_c_type(item_type.items[1])
self.write_ind(' %s %s = it.Key();\n', key_type, index_items[0].name)
self.write_ind(' %s %s = it.Value();\n', val_type, index_items[1].name)
else:
# Example:
# for (ListIter it(mylist); !it.Done(); it.Next()) {
# Tuple2<int, Str*> tup1 = it.Value();
# int i = tup1->at0();
# Str* s = tup1->at1();
# log("%d %s", i, s);
# }
c_item_type = get_c_type(item_type)
if isinstance(o.index, TupleExpr):
temp_name = 'tup%d' % self.unique_id
self.unique_id += 1
self.write_ind(' %s %s = it.Value();\n', c_item_type, temp_name)
self.indent += 1
self._write_tuple_unpacking(
temp_name, o.index.items, item_type.items)
self.indent -= 1
else:
self.write_ind(' %s %s = it.Value();\n', c_item_type, o.index.name)
else:
raise AssertionError('Unexpected type %s' % item_type)
# Copy of visit_block, without opening {
self.indent += 1
block = o.body
for stmt in block.body:
# Ignore things that look like docstrings
if isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, StrExpr):
continue
#log('-- %d', self.indent)
self.accept(stmt)
self.indent -= 1
self.write_ind('}\n')
if o.else_body:
raise AssertionError("can't translate for-else")
def _write_cases(self, if_node):
"""
The MyPy AST has a recursive structure for if-elif-elif rather than a
flat one. It's a bit confusing.
"""
assert isinstance(if_node, IfStmt), if_node
assert len(if_node.expr) == 1, if_node.expr
assert len(if_node.body) == 1, if_node.body
expr = if_node.expr[0]
body = if_node.body[0]
# case 1:
# case 2:
# case 3: {
# print('body')
# }
# break; // this indent is annoying but hard to get rid of
assert isinstance(expr, CallExpr), expr
for i, arg in enumerate(expr.args):
if i != 0:
self.write('\n')
self.write_ind('case ')
self.accept(arg)
self.write(': ')
self.accept(body)
self.write_ind(' break;\n')
if if_node.else_body:
first_of_block = if_node.else_body.body[0]
if isinstance(first_of_block, IfStmt):
self._write_cases(first_of_block)
else:
# end the recursion
self.write_ind('default: ')
self.accept(if_node.else_body) # the whole block
# no break here
def _write_switch(self, expr, o):
"""Write a switch statement over integers."""
assert len(expr.args) == 1, expr.args
self.write_ind('switch (')
self.accept(expr.args[0])
self.write(') {\n')
assert len(o.body.body) == 1, o.body.body
if_node = o.body.body[0]
assert isinstance(if_node, IfStmt), if_node
self.indent += 1
self._write_cases(if_node)
self.indent -= 1
self.write_ind('}\n')
def _write_typeswitch(self, expr, o):
"""Write a switch statement over ASDL types."""
assert len(expr.args) == 1, expr.args
self.write_ind('switch (')
self.accept(expr.args[0])
self.write('->tag_()) {\n')
assert len(o.body.body) == 1, o.body.body
if_node = o.body.body[0]
assert isinstance(if_node, IfStmt), if_node
self.indent += 1
self._write_cases(if_node)
self.indent -= 1
self.write_ind('}\n')
def visit_with_stmt(self, o: 'mypy.nodes.WithStmt') -> T:
"""
Translate only blocks of this form:
with switch(x) as case:
if case(0):
print('zero')
elif case(1, 2, 3):
print('low')
else:
print('other')
switch(x) {
case 0:
# TODO: need casting here
print('zero')
break;
case 1:
case 2:
case 3:
print('low')
break;
default:
print('other')
break;
}
"""
log('WITH')
log('expr %s', o.expr)
log('target %s', o.target)
assert len(o.expr) == 1, o.expr
expr = o.expr[0]
assert isinstance(expr, CallExpr), expr
if expr.callee.name == 'switch':
self._write_switch(expr, o)
elif expr.callee.name == 'tagswitch':
self._write_typeswitch(expr, o)
else:
raise AssertionError(expr.callee.name)
def visit_del_stmt(self, o: 'mypy.nodes.DelStmt') -> T:
# TODO:
# del mylist[:] -> mylist->clear()
# del mydict[mykey] -> mydict->remove(key)
d = o.expr
if isinstance(d, IndexExpr):
self.write_ind('')
self.accept(d.base)
if isinstance(d.index, SliceExpr):
sl = d.index
assert sl.begin_index is None, sl
assert sl.end_index is None, sl
self.write('->clear()')
else:
self.write('->remove(')
self.accept(d.index)
self.write(')')
self.write(';\n')
def _WriteFuncParams(self, arg_types, arguments):
first = True # first NOT including self
for arg_type, arg in zip(arg_types, arguments):
if not first:
self.decl_write(', ')
c_type = get_c_type(arg_type)
arg_name = arg.variable.name()
# C++ has implicit 'this'
if arg_name == 'self':
continue
self.decl_write('%s %s', c_type, arg_name)
first = False
# We can't use __str__ on these Argument objects? That seems like an
# oversight
#self.log('%r', arg)
if 0:
self.log('Argument %s', arg.variable)
self.log(' type_annotation %s', arg.type_annotation)
# I think these are for default values
self.log(' initializer %s', arg.initializer)
self.log(' kind %s', arg.kind)
def visit_func_def(self, o: 'mypy.nodes.FuncDef') -> T:
# Skip these for now
if o.name() == '__repr__':
return
# No function prototypes when forward declaring.
if self.forward_decl:
self.virtual.OnMethod(self.current_class_name, o.name())
return
# Hack to turn _Next() with keyword arg into a set of overloaded
# methods
#
# Other things I tried:
# if mylib.CPP: def _Next() # MyPy doesn't like this
# if not TYPE_CHECKING: def _Next() # get UnboundType?
# @overload decorator -- not sure how to do it, will probably cause
# runtime overhead
# Have:
# MakeOshParser(_Reader* line_reader, bool emit_comp_dummy)
# Want:
# MakeOshParser(_Reader* line_reader) {
# return MakeOshParser(line_reader, True);
# }
# TODO: restrict this
class_name = self.current_class_name
func_name = o.name()
ret_type = o.type.ret_type
if (class_name in ('BoolParser', 'CommandParser') and func_name == '_Next' or
class_name == 'ParseContext' and func_name == 'MakeOshParser' or
class_name == 'ErrorFormatter' and func_name == 'PrettyPrintError' or
class_name is None and func_name == 'PrettyPrintError' or
class_name == 'WordParser' and func_name == '_ParseVarExpr' or
class_name == 'AbstractWordEvaluator' and
func_name in ('EvalWordSequence2', '_EvalWordToParts',
'_EmptyStrOrError', '_EvalWordPart') or
# virtual method in several classes
func_name == 'EvalWordToString' or
class_name == 'ArithEvaluator' and func_name == '_ValToIntOrError' or
class_name is None and func_name == '_StringToInteger' or
class_name == 'BoolEvaluator' and func_name in ('_EvalCompoundWord', '_StringToIntegerOrError') or
class_name == 'Executor' and func_name == '_Execute' or
class_name is None and func_name == '_PackFlags' or
class_name == 'Mem' and func_name in ('GetVar', 'SetVar') or
class_name == 'SearchPath' and func_name == 'Lookup' or
# osh/sh_expr_eval.py
class_name is None and func_name == 'EvalLhsAndLookup' or
class_name == 'SplitContext' and
func_name in ('SplitForWordEval', '_GetSplitter')
):
default_val = o.arguments[-1].initializer
if default_val: # e.g. osh/bool_parse.py has default val
if self.decl or class_name is None:
func_name = o.name()
else:
func_name = '%s::%s' % (self.current_class_name, o.name())
self.write('\n')
# Write _Next() with no args
virtual = ''
c_ret_type = get_c_type(ret_type)
if isinstance(ret_type, TupleType):
assert c_ret_type.endswith('*')
c_ret_type = c_ret_type[:-1]
self.decl_write_ind('%s%s %s(', virtual, c_ret_type, func_name)
# TODO: Write all params except last optional one
self._WriteFuncParams(o.type.arg_types[:-1], o.arguments[:-1])
self.decl_write(')')
if self.decl:
self.decl_write(';\n')
else:
self.write(' {\n')
# return MakeOshParser()
kw = '' if isinstance(ret_type, NoneTyp) else 'return '
self.write(' %s%s(' % (kw, o.name()))
# Don't write self or last optional argument
first_arg_index = 0 if class_name is None else 1
pass_through = o.arguments[first_arg_index:-1]
if pass_through:
for i, arg in enumerate(pass_through):
if i != 0:
self.write(', ')
self.write(arg.variable.name())
self.write(', ')
# Now write default value, e.g. lex_mode_e::DBracket
self.accept(default_val)
self.write(');\n')
self.write('}\n')
virtual = ''
if self.decl:
self.local_var_list = [] # Make a new instance to collect from
self.local_vars[o] = self.local_var_list
#log('Is Virtual? %s %s', self.current_class_name, o.name())
if self.virtual.IsVirtual(self.current_class_name, o.name()):
virtual = 'virtual '
if not self.decl and self.current_class_name:
# definition looks like
# void Type::foo(...);
func_name = '%s::%s' % (self.current_class_name, o.name())
else:
# declaration inside class { }
func_name = o.name()
self.write('\n')
# TODO: if self.current_class_name ==
# write 'virtual' here.
# You could also test NotImplementedError as abstract?
c_ret_type = get_c_type(ret_type)
if isinstance(ret_type, TupleType):
assert c_ret_type.endswith('*')
c_ret_type = c_ret_type[:-1]
self.decl_write_ind('%s%s %s(', virtual, c_ret_type, func_name)
self._WriteFuncParams(o.type.arg_types, o.arguments)
if self.decl:
self.decl_write(');\n')
self.in_func_body = True
self.accept(o.body) # Collect member_vars, but don't write anything
self.in_func_body = False
return
self.write(') ')
# Write local vars we collected in the 'decl' phase
if not self.forward_decl and not self.decl:
arg_names = [arg.variable.name() for arg in o.arguments]
no_args = [
(lval_name, c_type) for (lval_name, c_type) in self.local_vars[o]
if lval_name not in arg_names
]
self.prepend_to_block = no_args
self.in_func_body = True
self.accept(o.body)
self.in_func_body = False
def visit_overloaded_func_def(self, o: 'mypy.nodes.OverloadedFuncDef') -> T:
pass
def visit_class_def(self, o: 'mypy.nodes.ClassDef') -> T:
#log(' CLASS %s', o.name)
base_class_name = None # single inheritance only
for b in o.base_type_exprs:
if isinstance(b, NameExpr):
# TODO: inherit from std::exception?
if b.name != 'object' and b.name != 'Exception':
base_class_name = b.name
# Forward declare types because they may be used in prototypes
if self.forward_decl:
self.decl_write_ind('class %s;\n', o.name)
if base_class_name:
self.virtual.OnSubclass(base_class_name, o.name)
# Visit class body so we get method declarations
self.current_class_name = o.name
for stmt in o.defs.body:
# Ignore things that look like docstrings
if (isinstance(stmt, ExpressionStmt) and
isinstance(stmt.expr, StrExpr)):
continue
self.accept(stmt)
self.current_class_name = None
return
if self.decl:
self.member_vars.clear() # make a new list
self.decl_write('\n')
self.decl_write_ind('class %s', o.name) # block after this
# e.g. class TextOutput : public ColorOutput
if base_class_name:
self.decl_write(' : public %s', base_class_name)
self.decl_write(' {\n')
self.decl_write_ind(' public:\n')
# NOTE: declaration still has to traverse the whole body to fill out
# self.member_vars!!!
block = o.defs
self.indent += 1
self.current_class_name = o.name
for stmt in block.body:
# Ignore things that look like docstrings
if (isinstance(stmt, ExpressionStmt) and
isinstance(stmt.expr, StrExpr)):
continue
# Constructor is named after class
if isinstance(stmt, FuncDef) and stmt.name() == '__init__':
self.decl_write_ind('%s(', o.name)
self._WriteFuncParams(stmt.type.arg_types, stmt.arguments)
self.decl_write(');\n')
# Must visit these for member vars!
self.accept(stmt.body)
continue
self.accept(stmt)
self.current_class_name = None
# Now write member defs
#log('MEMBERS for %s: %s', o.name, list(self.member_vars.keys()))
if self.member_vars:
self.decl_write('\n') # separate from functions
for name in sorted(self.member_vars):
c_type = get_c_type(self.member_vars[name])
self.decl_write_ind('%s %s;\n', c_type, name)
self.indent -= 1
self.decl_write_ind('};\n')
return
self.current_class_name = o.name
# Now we're visiting for definitions (not declarations).
#
block = o.defs
for stmt in block.body:
# Collect __init__ calls within __init__, and turn them into
# initialize lists.
if isinstance(stmt, FuncDef) and stmt.name() == '__init__':
self.write('\n')
self.write_ind('%s::%s(', o.name, o.name)
self._WriteFuncParams(stmt.type.arg_types, stmt.arguments)
self.write(') ')
# Taking into account the docstring, look at the first statement to
# see if it's a superclass __init__ call. Then move that to the
# initializer list.
first_index = 0
maybe_skip_stmt = stmt.body.body[0]
if (isinstance(maybe_skip_stmt, ExpressionStmt) and
isinstance(maybe_skip_stmt.expr, StrExpr)):
first_index += 1
first_stmt = stmt.body.body[first_index]
if (isinstance(first_stmt, ExpressionStmt) and
isinstance(first_stmt.expr, CallExpr)):
expr = first_stmt.expr
#log('expr %s', expr)
callee = first_stmt.expr.callee
# TextOutput() : ColorOutput(f), ... {
if isinstance(callee, MemberExpr) and callee.name == '__init__':
base_constructor_args = expr.args
#log('ARGS %s', base_constructor_args)
self.write(': %s(', base_class_name)
for i, arg in enumerate(base_constructor_args):
if i == 0:
continue # Skip 'this'
if i != 1:
self.write(', ')
self.accept(arg)
self.write(') {\n')
self.indent += 1
for node in stmt.body.body[first_index+1:]:
self.accept(node)
self.indent -= 1
self.write('}\n')
continue
# Normal function body
self.accept(stmt.body)
continue
# Write body
if isinstance(stmt, FuncDef):
self.accept(stmt)
self.current_class_name = None # Stop prefixing functions with class
def visit_global_decl(self, o: 'mypy.nodes.GlobalDecl') -> T:
pass
def visit_nonlocal_decl(self, o: 'mypy.nodes.NonlocalDecl') -> T:
pass
def visit_decorator(self, o: 'mypy.nodes.Decorator') -> T:
pass
def visit_var(self, o: 'mypy.nodes.Var') -> T:
pass
# Module structure
def visit_import(self, o: 'mypy.nodes.Import') -> T:
pass
def visit_import_from(self, o: 'mypy.nodes.ImportFrom') -> T:
if self.decl: # No duplicate 'using'
return
if o.id in ('__future__', 'typing'):
return # do nothing
# Later we need to turn module.func() into module::func(), without
# disturbing self.foo.
for name, alias in o.names:
if alias:
self.imported_names.add(alias)
else:
self.imported_names.add(name)
# A heuristic that works for the OSH import style.
#
# from core.util import log => using core::util::log
# from core import util => NOT translated
for name, alias in o.names:
# TODO: Should these be moved to core/pylib.py or something?
# They are varargs functions that have to be rewritten.
if name in ('log', 'p_die', 'e_die', 'e_strict', 'e_usage',
'stderr_line'):
continue # do nothing
if name in ('switch', 'tagswitch', 'iteritems'): # mylib
continue # do nothing
if '.' in o.id:
last_dotted = o.id.split('.')[-1]
# Omit this:
# from _devbuild.gen import grammar_nt
if last_dotted == 'gen':
return
# ASDL:
#
# namespaces:
# expr_e::Const # Compound sum
# expr::Const
# Id
#
# types:
# expr__Const
# expr_t # sum type
# expr_context_e # simple sum. This one is hard
# double_quoted
# Id_str
# Tag numbers/namespaces end with _n. enum types end with _e.
# TODO: rename special cases
is_namespace = False
if last_dotted.endswith('_asdl'):
if name.endswith('_n') or name.endswith('_i') or name in (
'Id', 'hnode_e', 'source_e', 'place_e',
# syntax_asdl
're', 're_repeat', 'class_literal_term', 'proc_sig',
'bracket_op', 'bracket_op_e',
'source', 'source_e',
'suffix_op', 'suffix_op_e',
'sh_lhs_expr', 'redir', 'parse_result',
'command_e', 'command',
'arith_expr_e', 'arith_expr',
'bool_expr_e', 'bool_expr',
'expr_e', 'expr',
'place_expr_e', 'place_expr',
'word_part_e', 'word_part',
'word_e', 'word',
'redir_e', 'redir',
'proc_sig_e', 'proc_sig',
'glob_part_e', 'glob_part',
're_e', 're',
're_repeat_e', 're_repeat',
'class_literal_term_e', 'class_literal_term',
'sh_lhs_expr_e', 'sh_lhs_expr',
# runtime_asdl
'lvalue_e', 'lvalue',
'value_e', 'value',
'part_value_e', 'part_value',
'cmd_value_e', 'cmd_value',
'redirect_e', 'redirect',
):
is_namespace = True
if is_namespace:
# No aliases yet?
#lhs = alias if alias else name
self.write_ind(
'namespace %s = %s::%s;\n', name, last_dotted, name)
else:
if alias:
# using runtime_asdl::emit_e = EMIT;
self.write_ind('using %s = %s::%s;\n', alias, last_dotted, name)
else:
# from _devbuild.gen.id_kind_asdl import Id
# -> using id_kind_asdl::Id.
self.write_ind('using %s::%s;\n', last_dotted, name)
else:
# If we're importing a module without an alias, we don't need to do
# anything. 'namespace cmd_exec' is already defined.
if not alias:
return
# from asdl import format as fmt
# -> namespace fmt = format;
self.write_ind('namespace %s = %s;\n', alias, name)
# Old scheme
# from testpkg import module1 =>
# namespace module1 = testpkg.module1;
# Unfortunately the MyPy AST doesn't have enough info to distinguish
# imported packages and functions/classes?
def visit_import_all(self, o: 'mypy.nodes.ImportAll') -> T:
pass
# Statements
def visit_block(self, block: 'mypy.nodes.Block') -> T:
self.write('{\n') # not indented to use same line as while/if
self.indent += 1
if self.prepend_to_block:
done = set()
for lval_name, c_type in self.prepend_to_block:
if lval_name not in done:
self.write_ind('%s %s;\n', c_type, lval_name)
done.add(lval_name)
self.write('\n')
self.prepend_to_block = None
for stmt in block.body:
# Ignore things that look like docstrings
if isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, StrExpr):
continue
#log('-- %d', self.indent)
self.accept(stmt)
self.indent -= 1
self.write_ind('}\n')
def visit_expression_stmt(self, o: 'mypy.nodes.ExpressionStmt') -> T:
# TODO: Avoid writing docstrings.
# If it's just a string, then we don't need it.
self.write_ind('')
self.accept(o.expr)
self.write(';\n')
def visit_operator_assignment_stmt(self, o: 'mypy.nodes.OperatorAssignmentStmt') -> T:
self.write_ind('')
self.accept(o.lvalue)
self.write(' %s= ', o.op) # + to +=
self.accept(o.rvalue)
self.write(';\n')
def visit_while_stmt(self, o: 'mypy.nodes.WhileStmt') -> T:
self.write_ind('while (')
self.accept(o.expr)
self.write(') ')
self.accept(o.body)
def visit_return_stmt(self, o: 'mypy.nodes.ReturnStmt') -> T:
self.write_ind('return ')
if o.expr:
self.in_return_expr = True
self.accept(o.expr)
self.in_return_expr = False
self.write(';\n')
def visit_assert_stmt(self, o: 'mypy.nodes.AssertStmt') -> T:
pass
def visit_if_stmt(self, o: 'mypy.nodes.IfStmt') -> T:
# Not sure why this wouldn't be true
assert len(o.expr) == 1, o.expr
# Omit anything that looks like if __name__ == ...
cond = o.expr[0]
if isinstance(cond, UnaryExpr) and cond.op == 'not':
# check 'if not mylist'
cond_expr = cond.expr
else:
# TODO: if x > 0 and mylist
# if x > 0 and not mylist , etc.
cond_expr = cond
cond_type = self.types[cond_expr]
if not _CheckConditionType(cond_type):
raise AssertionError(
"Can't use str, list, or dict in boolean context")
if (isinstance(cond, ComparisonExpr) and
isinstance(cond.operands[0], NameExpr) and
cond.operands[0].name == '__name__'):
return
# Omit if 0:
if isinstance(cond, IntExpr) and cond.value == 0:
return
# Omit if TYPE_CHECKING blocks. They contain type expressions that
# don't type check!
if isinstance(cond, NameExpr) and cond.name == 'TYPE_CHECKING':
return
# mylib.CPP
if isinstance(cond, MemberExpr) and cond.name == 'CPP':
# just take the if block
self.write_ind('// if MYCPP\n')
self.write_ind('')
for node in o.body:
self.accept(node)
self.write_ind('// endif MYCPP\n')
return
# mylib.PYTHON
if isinstance(cond, MemberExpr) and cond.name == 'PYTHON':
if o.else_body:
self.write_ind('// if not PYTHON\n')
self.write_ind('')
self.accept(o.else_body)
self.write_ind('// endif MYCPP\n')
return
self.write_ind('if (')
for e in o.expr:
self.accept(e)
self.write(') ')
for node in o.body:
self.accept(node)
if o.else_body:
self.write_ind('else ')
self.accept(o.else_body)
def visit_break_stmt(self, o: 'mypy.nodes.BreakStmt') -> T:
self.write_ind('break;\n')
def visit_continue_stmt(self, o: 'mypy.nodes.ContinueStmt') -> T:
self.write_ind('continue;\n')
def visit_pass_stmt(self, o: 'mypy.nodes.PassStmt') -> T:
self.write_ind('; // pass\n')
def visit_raise_stmt(self, o: 'mypy.nodes.RaiseStmt') -> T:
self.write_ind('throw ')
# it could be raise -> throw ; . OSH uses that.
if o.expr:
self.accept(o.expr)
self.write(';\n')
def visit_try_stmt(self, o: 'mypy.nodes.TryStmt') -> T:
self.write_ind('try ')
self.accept(o.body)
caught = False
for t, v, handler in zip(o.types, o.vars, o.handlers):
# Heuristic
if isinstance(t, MemberExpr):
c_type = '%s::%s*' % (t.expr.name, t.name)
elif isinstance(t, TupleExpr):
c_type = 'MultipleExceptions' # TODO: implement this
else:
c_type = '%s*' % t.name
if v:
self.write_ind('catch (%s %s) ', c_type, v.name)
else:
self.write_ind('catch (%s) ', c_type)
self.accept(handler)
caught = True
# DUMMY to prevent compile errors
# TODO: Remove this
if not caught:
self.write_ind('catch (std::exception) { }')
#if o.else_body:
# raise AssertionError('try/else not supported')
#if o.finally_body:
# raise AssertionError('try/finally not supported')
def visit_print_stmt(self, o: 'mypy.nodes.PrintStmt') -> T:
pass
def visit_exec_stmt(self, o: 'mypy.nodes.ExecStmt') -> T:
pass
| [
"[email protected]"
] | |
93bfabe4dfcefff36871c856a5090eb67cc0c941 | 4dd5dbebc7b7f6dbfcbd6cc662311c91ad6d47e9 | /AtCoder/ABC070A.py | 7d5a0d0a6b0207c1d07f1f9502b6357f7e80c41d | [] | no_license | sourjp/programming_contest | aa6925b3317bd3aeb646df93a611af1199bfc7aa | 2a50e1be45441789e81eb49bfdfc0c598d2a534b | refs/heads/master | 2021-04-01T05:08:44.097226 | 2020-08-20T13:01:55 | 2020-08-20T13:01:55 | 248,158,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | a, b, c = map(str, input())
if a == c:
print('Yes')
else:
print('No')
# print('Yes' if a == c else 'No') | [
"[email protected]"
] | |
96ce5236d2509a84730721428f0aa0e3a53f1054 | 2d54ab7a1e829f89b554d6abc27527fdb38539ff | /run.py | ec5c92c704cc0c8aa94e6d0680f4482f76dfc1e3 | [] | no_license | zhtjtcz/Software-Backend | 1c3c73d8863d0d0df9cdfa08e4900f878127ed6c | ca865f1fe75493098050b236634f776f7b97d04d | refs/heads/main | 2023-06-07T06:28:05.345830 | 2021-06-17T16:30:47 | 2021-06-17T16:30:47 | 367,622,524 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | import os
os.system("python manage.py makemigrations")
os.system("python manage.py migrate")
os.system("nohup python manage.py runserver 0.0.0.0:8000 & \n")
print("The backend is running!") | [
"[email protected]"
] | |
956d2b8dc4154b4476b17b8c513d88086fefd4be | b47c136e077f5100478338280495193a8ab81801 | /Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/register_unarystruct.py | 42858d8bc61cd90d8330727506be4c495ca3014f | [
"Apache-2.0"
] | permissive | IanSMoyes/SpiderPi | 22cd8747cc389f674cc8d95f32b4d86f9b7b2d8e | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | refs/heads/master | 2023-03-20T22:30:23.362137 | 2021-03-12T17:37:33 | 2021-03-12T17:37:33 | 339,555,949 | 16 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
from board import SCL, SDA
from busio import I2C
from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_register.i2c_struct import UnaryStruct
DEVICE_ADDRESS = 0x74 # device address of PCA9685 board
A_DEVICE_REGISTER_1 = 0x00 # Configuration register on the is31fl3731 board
A_DEVICE_REGISTER_2 = 0x03 # Auto Play Control Register 2 on the is31fl3731 board
class DeviceControl: # pylint: disable-msg=too-few-public-methods
def __init__(self, i2c):
self.i2c_device = i2c # self.i2c_device required by UnaryStruct class
register1 = UnaryStruct(A_DEVICE_REGISTER_1, "<B") # 8-bit number
register2 = UnaryStruct(A_DEVICE_REGISTER_2, "<B") # 8-bit number
# The follow is for I2C communications
comm_port = I2C(SCL, SDA)
device = I2CDevice(comm_port, DEVICE_ADDRESS)
registers = DeviceControl(device)
# set the bits in the device
registers.register1 = 1 << 3 | 2
registers.register2 = 32
# display the device values for the bits
print("register 1: {}; register 2: {}".format(registers.register1, registers.register2))
# toggle the bits
registers.register1 = 2 << 3 | 5
registers.register2 = 60
# display the device values for the bits
print("register 1: {}; register 2: {}".format(registers.register1, registers.register2))
| [
"[email protected]"
] | |
cedc2eac003eeb79ab64df271f1e9558adb34096 | 7b1543ec496a2aec2624bb0ef04ed5ecf944675a | /Histo/HistoAnalyzer/test/jetValidationWithPfAK5GoodEleLast_cfg.py | dae2fb2e28e3c6bbc04be02b18e62f48e145f398 | [] | no_license | iihe-cms-sw/VJets_TreeMaker5311 | 45954e62e289907384d187a76230b76f64cea62f | 8c37cf7241e58ee7d83bb261ebf9c9a237080eac | refs/heads/master | 2020-06-06T18:10:09.921453 | 2014-09-06T17:05:37 | 2014-09-06T17:05:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,404 | py |
import FWCore.ParameterSet.Config as cms
import os
process = cms.Process("JetValidation")
###################
##### Loading what we need!
###################
from PhysicsTools.PatAlgos.patTemplate_cfg import *
from PhysicsTools.PatAlgos.tools.trigTools import *
switchOnTrigger(process,sequence='patDefaultSequence',hltProcess = '*')
from PhysicsTools.PatAlgos.tools.coreTools import *
from PhysicsTools.PatAlgos.tools.pfTools import *
from RecoJets.JetProducers.FastjetParameters_cfi import *
from RecoJets.JetProducers.ak5TrackJets_cfi import *
from RecoJets.JetProducers.GenJetParameters_cfi import *
from RecoJets.JetProducers.AnomalousCellParameters_cfi import *
process.load("CommonTools.ParticleFlow.pfElectrons_cff")
process.load("CommonTools.ParticleFlow.pfMuons_cff")
process.load("CommonTools.ParticleFlow.ParticleSelectors.pfSortByType_cff")
process.load("CommonTools.ParticleFlow.pfNoPileUp_cff")
process.load("CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi")
##-------------------- Import the JEC services -----------------------
process.load("JetMETCorrections.Configuration.DefaultJEC_cff")
process.load("JetMETCorrections.Configuration.JetCorrectionServices_cff")
process.load("JetMETCorrections.Configuration.JetCorrectionServicesAllAlgos_cff")
##process.load("JetMETCorrections.Configuration.JetCorrectionProducers_cff")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
#process.load("MagneticField.Engine.uniformMagneticField_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
##-------------------- Import the Jet RECO modules -----------------------
process.load('RecoJets.Configuration.RecoPFJets_cff')
process.load("RecoJets.Configuration.GenJetParticles_cff")
process.load("RecoJets.Configuration.RecoGenJets_cff")
##-------------------- Turn-on the FastJet density calculation -----------------------
process.kt6PFJets.doRhoFastjet = True
##-------------------- Turn-on the FastJet jet area calculation for your favorite algorithm -----------------------
process.kt6PFJets.doAreaFastjet = True
process.ak5PFJets.doAreaFastjet = True
# to compute FastJet rho to correct isolation (note: EtaMax restricted to 2.5)
process.kt6PFJetsForIsolation = process.kt4PFJets.clone( rParam = 0.6, doRhoFastjet = True)
process.kt6PFJetsForIsolation.Rho_EtaMax = cms.double(2.5)
#################################################################
############ WARNING! to be run on data only! (r.c. 2011)########
############ need to be adapted for MC ########
#################################################################
removeMCMatching(process, ['All'])###############################
#################################################################
process.options = cms.untracked.PSet(wantSummary=cms.untracked.bool(True),
makeTriggerResults=cms.untracked.bool(True),
)
process.GlobalTag.globaltag = 'FT_R_44_V9::All'
####################
#### Files
###################
readFiles = cms.untracked.vstring()
readFiles.extend([
#"file:/gpfs/grid/srm/cms/store/data/Run2011A/DoubleElectron/RAW-RECO/ZElectron-08Nov2011-v1/0000/9213ACEA-B01B-E111-9BD9-002618943833.root"
#"file:/gpfs/grid/srm/cms/store/data/Run2011B/DoubleElectron/RAW-RECO/ZElectron-PromptSkim-v1/0000/B05CFB4E-7AF1-E011-B4BF-0015178C1574.root"
"file:/gpfs/grid/srm/cms/store/data/Run2011B/DoubleElectron/RAW-RECO/ZElectron-19Nov2011-v1/0000/08EABC25-971A-E111-9EDC-001D0967D625.root"
])
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(500),
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
fileNames = readFiles,
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
)
####################
#### Trigger
###################
trigger2011v3 = cms.vstring("HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v1","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v2","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v3","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v4","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v5","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v6","HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v6","HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v7","HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v8","HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v9", "HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v10")
trigger2011RunB= cms.vstring("HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v8", "HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v9", "HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v10")
trigger2011v1 = cms.vstring("HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v3","HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_SC8_Mass30_v3","HLT_Ele17_CaloIdT_TrkIdVL_CaloIsoVL_TrkIsoVL_Ele8_CaloIdT_TrkIdVL_CaloIsoVL_TrkIsoVL_v3","HLT_Ele27_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_v3","HLT_Ele32_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_v2","HLT_Ele32_CaloIdL_CaloIsoVL_SC17_v3","HLT_Ele45_CaloIdVT_TrkIdT_v3","HLT_Ele15_CaloIdVT_TrkIdT_LooseIsoPFTau15_v4","HLT_Ele15_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_LooseIsoPFTau15_v4","HLT_Ele15_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_LooseIsoPFTau20_v4")
trigger2010 = cms.vstring("HLT_Ele17_CaloIdl_Ele8_CaloIsoIdL_CaloIsoVL_v3","HLT_Ele15_SW_L1R","HLT_Ele15_SW_CaloEleId_L1R","HLT_Ele17_SW_CaloEleId_L1R","HLT_Ele17_SW_TightEleId_L1R","HLT_Ele17_SW_TightEleId_L1R_v2","HLT_Ele17_SW_TightEleId_L1R_v3","HLT_Photon10_L1R","HLT_Photon15_L1R","HTL_Photon15_Cleaned_L1R")
alltriggers = cms.vstring() # In this way, the HLT string is empty and it will trigger every event
trigger2011v2 = cms.vstring("HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v1","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v2","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v3","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v4","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v5","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v6","HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v6","HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v7","HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v8")
# from dav HLT analysis
triggersMay10Jul05 = cms.vstring("HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v1","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v2","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v3","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v4","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v5","HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v6")
triggersAug05 = cms.vstring("HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v7","HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_Ele8_Mass30_v6")
triggersOct03 = cms.vstring("HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v7","HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_Ele8_Mass30_v6","HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v8","HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_Ele8_Mass30_v7","HLT_Ele32_CaloIdT_CaloIsoT_TrkIdT_TrkIsoT_Ele17_v1")
####################
#### Lepton Selection
###################
process.Selection = cms.EDFilter('ZpatFilterPf',
electronCollection = cms.InputTag("patElectronsWithTrigger"),
triggerCollectionTag = cms.InputTag("TriggerResults","","HLT"),
UseCombinedPrescales = cms.bool(False),
doTheHLTAnalysis = cms.bool(True),
removePU= cms.bool(False),
#TriggerNames = triggersMay10Jul05+triggersAug05+triggersOct03+trigger2011RunB,
TriggerNames = trigger2011v3,
secondEleEnThrhold = cms.double(20.0),
firstEleEnThrhold = cms.double(20.0),
lowZmassLimit = cms.double(71.0),
highZmassLimit = cms.double(111.0),
maxEtaForElectron = cms.double(2.4),
)
####################
#### TAP
###################
process.TAPwp80 = cms.EDFilter('EfficiencyFilter',
electronCollection = cms.InputTag("patElectronsWithTrigger"),
superClusterCollection_EB = cms.InputTag("correctedHybridSuperClusters"),
superClusterCollection_EE = cms.InputTag("correctedMulti5x5SuperClustersWithPreshower"),
triggerCollectionTag = cms.untracked.InputTag("TriggerResults","","HLT"),
filename=cms.untracked.string("ZAnalysisFilter.root"),
UseCombinedPrescales = cms.bool(False),
removePU= cms.bool(False),
WP80_efficiency = cms.bool(True),
HLTele17_efficiency = cms.bool(False),
HLTele8_efficiency = cms.bool(False),
RECO_efficiency = cms.bool(False),
New_HE = cms.bool(False),
VertexCollectionTag = cms.InputTag('offlinePrimaryVertices'),
electronIsolatedProducer= cms.InputTag( "hltPixelMatchElectronsL1Iso" ),
candTag= cms.InputTag("hltL1NonIsoHLTNonIsoSingleElectronEt15LTIPixelMatchFilter"),
JetCollectionLabel = cms.InputTag("ak5PFJetsL1FastL2L3Residual"),
#TriggerNames = triggersMay10Jul05+triggersAug05+triggersOct03+trigger2011RunB,
TriggerNames = trigger2011v3
)
process.TAPwp80newHE = cms.EDFilter('EfficiencyFilter',
electronCollection = cms.InputTag("patElectronsWithTrigger"),
superClusterCollection_EB = cms.InputTag("correctedHybridSuperClusters"),
superClusterCollection_EE = cms.InputTag("correctedMulti5x5SuperClustersWithPreshower"),
triggerCollectionTag = cms.untracked.InputTag("TriggerResults","","HLT"),
filename=cms.untracked.string("ZAnalysisFilter.root"),
UseCombinedPrescales = cms.bool(False),
removePU= cms.bool(False),
WP80_efficiency = cms.bool(True),
HLTele17_efficiency = cms.bool(False),
HLTele8_efficiency = cms.bool(False),
RECO_efficiency = cms.bool(False),
New_HE = cms.bool(True),
VertexCollectionTag = cms.InputTag('offlinePrimaryVertices'),
electronIsolatedProducer= cms.InputTag( "hltPixelMatchElectronsL1Iso" ),
candTag= cms.InputTag("hltL1NonIsoHLTNonIsoSingleElectronEt15LTIPixelMatchFilter"),
JetCollectionLabel = cms.InputTag("ak5PFJetsL1FastL2L3Residual"),
#TriggerNames = triggersMay10Jul05+triggersAug05+triggersOct03+trigger2011RunB
TriggerNames = trigger2011v3
)
process.TAPhltele8 = cms.EDFilter('EfficiencyFilter',
electronCollection = cms.InputTag("patElectronsWithTrigger"),
superClusterCollection_EB = cms.InputTag("correctedHybridSuperClusters"),
superClusterCollection_EE = cms.InputTag("correctedMulti5x5SuperClustersWithPreshower"),
triggerCollectionTag = cms.untracked.InputTag("TriggerResults","","HLT"),
filename=cms.untracked.string("ZAnalysisFilter.root"),
UseCombinedPrescales = cms.bool(False),
removePU= cms.bool(False),
WP80_efficiency = cms.bool(False),
HLTele17_efficiency = cms.bool(False),
HLTele8_efficiency = cms.bool(True),
RECO_efficiency = cms.bool(False),
New_HE = cms.bool(False),
VertexCollectionTag = cms.InputTag('offlinePrimaryVertices'),
electronIsolatedProducer= cms.InputTag( "hltPixelMatchElectronsL1Iso" ),
candTag= cms.InputTag("hltL1NonIsoHLTNonIsoSingleElectronEt15LTIPixelMatchFilter"),
JetCollectionLabel = cms.InputTag("ak5PFJetsL1FastL2L3Residual"),
#TriggerNames = triggersMay10Jul05+triggersAug05+triggersOct03+trigger2011RunB
TriggerNames = trigger2011v3
)
process.TAPhltele17 = cms.EDFilter('EfficiencyFilter',
electronCollection = cms.InputTag("patElectronsWithTriggerele17"),
superClusterCollection_EB = cms.InputTag("correctedHybridSuperClusters"),
superClusterCollection_EE = cms.InputTag("correctedMulti5x5SuperClustersWithPreshower"),
triggerCollectionTag = cms.untracked.InputTag("TriggerResults","","HLT"),
filename=cms.untracked.string("ZAnalysisFilter.root"),
UseCombinedPrescales = cms.bool(False),
removePU= cms.bool(False),
WP80_efficiency = cms.bool(False),
HLTele17_efficiency = cms.bool(True),
HLTele8_efficiency = cms.bool(False),
RECO_efficiency = cms.bool(False),
New_HE = cms.bool(False),
VertexCollectionTag = cms.InputTag('offlinePrimaryVertices'),
electronIsolatedProducer= cms.InputTag( "hltPixelMatchElectronsL1Iso" ),
candTag= cms.InputTag("hltL1NonIsoHLTNonIsoSingleElectronEt15LTIPixelMatchFilter"),
JetCollectionLabel = cms.InputTag("ak5PFJetsL1FastL2L3Residual"),
#TriggerNames = triggersMay10Jul05+triggersAug05+triggersOct03+trigger2011RunB
TriggerNames = trigger2011v3
)
process.TAPreco = cms.EDFilter('EfficiencyFilter',
electronCollection = cms.InputTag("patElectronsWithTrigger"),
superClusterCollection_EB = cms.InputTag("correctedHybridSuperClusters"),
superClusterCollection_EE = cms.InputTag("correctedMulti5x5SuperClustersWithPreshower"),
triggerCollectionTag = cms.untracked.InputTag("TriggerResults","","HLT"),
filename=cms.untracked.string("ZAnalysisFilter.root"),
UseCombinedPrescales = cms.bool(False),
removePU= cms.bool(False),
WP80_efficiency = cms.bool(False),
HLTele17_efficiency = cms.bool(False),
HLTele8_efficiency = cms.bool(False),
RECO_efficiency = cms.bool(True),
New_HE = cms.bool(False),
VertexCollectionTag = cms.InputTag('offlinePrimaryVertices'),
electronIsolatedProducer= cms.InputTag( "hltPixelMatchElectronsL1Iso" ),
candTag= cms.InputTag("hltL1NonIsoHLTNonIsoSingleElectronEt15LTIPixelMatchFilter"),
JetCollectionLabel = cms.InputTag("ak5PFJetsL1FastL2L3Residual"),
#TriggerNames = triggersMay10Jul05+triggersAug05+triggersOct03+trigger2011RunB
TriggerNames = trigger2011v3
)
####################
#### Jets..
###################
process.goodEPair = cms.EDProducer('pfAnalyzer',
electronCollection = cms.InputTag("patElectronsWithTrigger"),
pflowEleCollection = cms.untracked.InputTag("pfIsolatedElectrons"),
removePU= cms.bool(False),
)
process.validationJEC = cms.EDAnalyzer('jetValidation',
electronCollection = cms.InputTag("particleFlow:electrons"),
jetCollection = cms.InputTag("ak5PFJetsL1FastL2L3Residual"),
VertexCollection = cms.InputTag("offlinePrimaryVertices"),
goodEPair = cms.InputTag("goodEPair"),
tpMapName = cms.string('EventWeight'),
genJets = cms.InputTag("ak5GenJets"),
usingMC = cms.untracked.bool(False),
usingPF = cms.untracked.bool(True),
deltaRCone = cms.double(0.3),
deltaRConeGen = cms.double(0.1),
maxEtaJets = cms.double(2.4),
minPtJets = cms.double(30.0),
chargedEmEnergyFraction = cms.double(0.99),
neutralHadronEnergyFraction= cms.double(0.99),
neutralEmEnergyFraction= cms.double(0.99),
chargedHadronEnergyFraction= cms.double(0.0),
chargedMultiplicity= cms.int32(0),
JECUncertainties= cms.double(0),
)
process.validationL2L3Residual = cms.EDAnalyzer('jetValidation',
electronCollection = cms.InputTag("particleFlow:electrons"),
jetCollection = cms.InputTag("ak5PFJetsL2L3Residual"),
VertexCollection = cms.InputTag("offlinePrimaryVertices"),
goodEPair = cms.InputTag("goodEPair"),
tpMapName = cms.string('EventWeight'),
genJets = cms.InputTag("ak5GenJets"),
usingMC = cms.untracked.bool(False),
usingPF = cms.untracked.bool(True),
deltaRCone = cms.double(0.3),
deltaRConeGen = cms.double(0.1),
maxEtaJets = cms.double(2.4),
minPtJets = cms.double(30.0),
chargedEmEnergyFraction = cms.double(0.99),
neutralHadronEnergyFraction= cms.double(0.99),
neutralEmEnergyFraction= cms.double(0.99),
chargedHadronEnergyFraction= cms.double(0.0),
chargedMultiplicity= cms.int32(0),
JECUncertainties= cms.double(0),
)
process.validation = cms.EDAnalyzer('jetValidation',
electronCollection = cms.InputTag("particleFlow:electrons"),
jetCollection = cms.InputTag("ak5PFJets"),
VertexCollection = cms.InputTag("offlinePrimaryVertices"),
goodEPair = cms.InputTag("goodEPair"),
tpMapName = cms.string('EventWeight'),
genJets = cms.InputTag("ak5GenJets"),
usingMC = cms.untracked.bool(False),
usingPF = cms.untracked.bool(True),
deltaRCone = cms.double(0.3),
deltaRConeGen = cms.double(0.1),
maxEtaJets = cms.double(2.4),
minPtJets = cms.double(30.0),
chargedEmEnergyFraction = cms.double(0.99),
neutralHadronEnergyFraction= cms.double(0.99),
neutralEmEnergyFraction= cms.double(0.99),
chargedHadronEnergyFraction= cms.double(0.0),
chargedMultiplicity= cms.int32(0),
JECUncertainties= cms.double(0),
)
process.validationJECScaleUp = cms.EDAnalyzer('jetValidation',
electronCollection = cms.InputTag("particleFlow:electrons"),
jetCollection = cms.InputTag("ak5PFJetsL1FastL2L3Residual"),
VertexCollection = cms.InputTag("offlinePrimaryVertices"),
goodEPair = cms.InputTag("goodEPair"),
tpMapName = cms.string('EventWeight'),
genJets = cms.InputTag("ak5GenJets"),
usingMC = cms.untracked.bool(False),
usingPF = cms.untracked.bool(True),
deltaRCone = cms.double(0.3),
deltaRConeGen = cms.double(0.1),
maxEtaJets = cms.double(2.4),
minPtJets = cms.double(30.0),
chargedEmEnergyFraction = cms.double(0.99),
neutralHadronEnergyFraction= cms.double(0.99),
neutralEmEnergyFraction= cms.double(0.99),
chargedHadronEnergyFraction= cms.double(0.0),
chargedMultiplicity= cms.int32(0),
JECUncertainties= cms.double(1),
)
process.validationJECScaleDown = cms.EDAnalyzer('jetValidation',
electronCollection = cms.InputTag("particleFlow:electrons"),
jetCollection = cms.InputTag("ak5PFJetsL1FastL2L3Residual"),
VertexCollection = cms.InputTag("offlinePrimaryVertices"),
goodEPair = cms.InputTag("goodEPair"),
tpMapName = cms.string('EventWeight'),
genJets = cms.InputTag("ak5GenJets"),
usingMC = cms.untracked.bool(False),
usingPF = cms.untracked.bool(True),
deltaRCone = cms.double(0.3),
deltaRConeGen = cms.double(0.1),
maxEtaJets = cms.double(2.4),
minPtJets = cms.double(30.0),
chargedEmEnergyFraction = cms.double(0.99),
neutralHadronEnergyFraction= cms.double(0.99),
neutralEmEnergyFraction= cms.double(0.99),
chargedHadronEnergyFraction= cms.double(0.0),
chargedMultiplicity= cms.int32(0),
JECUncertainties= cms.double(-1),
)
####################
#### HLT Analysis, MC reweight, and other stuff
###################
process.demo = cms.EDProducer('HistoProducer',
electronCollection = cms.InputTag('patElectronsWithTrigger'),# Change it, sooner or later...
triggerCollection = cms.InputTag("TriggerResults","","HLT"),
UseCombinedPrescales = cms.bool(False),
#TriggerNames = triggersMay10Jul05+triggersAug05+triggersOct03+trigger2011RunB,
TriggerNames = trigger2011v3,
removePU= cms.bool(True),
usingMC= cms.bool(False),
doTheHLTAnalysis = cms.bool(True),
VertexCollectionTag = cms.InputTag('offlinePrimaryVertices'),
TotalNEventTag = cms.vstring('TotalEventCounter'),
WhichRun = cms.string("Run2011AB"), ##UNESSENTIAL FOR DATA:Select which datasets you wonna use to reweight..
eventWeightsCollection= cms.string("EventWeight"),
giveEventWeightEqualToOne= cms.bool(False),
RootuplaName = cms.string("treeVJ_")
)
process.demobefore = cms.EDProducer('HistoProducer',
electronCollection = cms.InputTag('patElectronsWithTrigger'),# Change it, sooner or later...
triggerCollection = cms.InputTag("TriggerResults","","HLT"),
UseCombinedPrescales = cms.bool(False),
#TriggerNames = triggersMay10Jul05+triggersAug05+triggersOct03+trigger2011RunB,
TriggerNames = trigger2011v3,
removePU= cms.bool(True),
usingMC= cms.bool(False),
doTheHLTAnalysis = cms.bool(True),
VertexCollectionTag = cms.InputTag('offlinePrimaryVertices'),
TotalNEventTag = cms.vstring('TotalEventCounter'),
WhichRun = cms.string("Run2011AB"), ##UNESSENTIAL FOR DATA:Select which datasets you wonna use to reweight..
eventWeightsCollection= cms.string("EventWeight"),
giveEventWeightEqualToOne= cms.bool(False),
RootuplaName = cms.string("treeVJBefore_"),
)
######################
# #
# pfNoPileUP #
# #
######################
process.pfPileUp.Vertices = 'goodOfflinePrimaryVertices' # recipe 15th March JEC
process.pfPileUp.checkClosestZVertex = cms.bool(False) # recipe 15th March JEC
process.pfPileUp.PFCandidates = cms.InputTag("particleFlow")
process.pfNoPileUp.bottomCollection = cms.InputTag("particleFlow")
######################
# #
# pfElectrons #
# #
######################
process.patElectrons.useParticleFlow=True
#process.pfAllElectrons.src = "particleFlow"
process.pfAllElectrons.src = "pfNoPileUp"
process.isoValElectronWithNeutral.deposits[0].deltaR = 0.4
process.isoValElectronWithCharged.deposits[0].deltaR = 0.4
process.isoValElectronWithPhotons.deposits[0].deltaR = 0.4
process.pfIsolatedElectrons.isolationCut = 0.2
######################
# #
# TRG MATCHING -ON- #
# #
######################
### ELE8
process.eleTriggerMatchHLT = cms.EDProducer( "PATTriggerMatcherDRLessByR",
src = cms.InputTag( "patElectrons" ),
matched = cms.InputTag( "patTrigger"),
matchedCuts = cms.string('(path("HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v*",0,0) && filter("hltEle17CaloIdIsoEle8CaloIdIsoPixelMatchDoubleFilter")) || (path("HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v*",0,0) && filter("hltEle17TightIdLooseIsoEle8TightIdLooseIsoTrackIsolDoubleFilter"))'),
maxDPtRel = cms.double( 5 ),
maxDeltaR = cms.double( 0.3 ),
resolveAmbiguities = cms.bool( True ),
resolveByMatchQuality = cms.bool( True )
)
process.patElectronsWithTrigger = cms.EDProducer("PATTriggerMatchElectronEmbedder",
src = cms.InputTag("patElectrons"),
matches = cms.VInputTag(cms.InputTag('eleTriggerMatchHLT'))
)
switchOnTriggerMatching( process, ['eleTriggerMatchHLT' ],sequence ='patDefaultSequence', hltProcess = '*' )
### ELE17
process.eleTriggerMatchHLTele17 = cms.EDProducer( "PATTriggerMatcherDRLessByR",
src = cms.InputTag( "patElectrons" ),
matched = cms.InputTag( "patTrigger"),
matchedCuts = cms.string('(path("HLT_Ele17_CaloIdL_CaloIsoVL_Ele8_CaloIdL_CaloIsoVL_v*",0,0) && filter("hltEle17CaloIdLCaloIsoVLPixelMatchFilter")) || (path("HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v*",0,0) && filter("hltEle17TightIdLooseIsoEle8TightIdLooseIsoTrackIsolFilter"))'),
maxDPtRel = cms.double( 5 ),
maxDeltaR = cms.double( 0.3 ),
resolveAmbiguities = cms.bool( True ),
resolveByMatchQuality = cms.bool( True )
)
process.patElectronsWithTriggerele17 = cms.EDProducer("PATTriggerMatchElectronEmbedder",
src = cms.InputTag("patElectrons"),
matches = cms.VInputTag(cms.InputTag('eleTriggerMatchHLTele17'))
)
switchOnTriggerMatching( process, ['eleTriggerMatchHLTele17' ],sequence ='patDefaultSequence', hltProcess = '*' )
#####################
# #
# OUTPUT #
# #
#####################
process.out.fileName = cms.untracked.string('test-filtering.root')
process.out.outputCommands = cms.untracked.vstring(
'drop *',
)
process.out.SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('JetValidation'))
process.TFileService = cms.Service("TFileService",
fileName = cms.string('jetValidation.root')
)
#####################
# #
# Counting #
# #
#####################
process.TotalEventCounter = cms.EDProducer("EventCountProducer")
#####################
# #
# SEQUENCE #
# #
#####################
process.ToolInizialization = cms.Path(
process.kt6PFJetsForIsolation*
process.kt6PFJets*
process.ak5PFJets*
process.ak5PFJetsL2L3Residual*
process.ak5PFJetsL1FastL2L3Residual*
process.goodOfflinePrimaryVertices*
process.pfNoPileUpSequence*
process.pfAllNeutralHadrons*
process.pfAllChargedHadrons*
process.pfAllPhotons*
process.pfElectronSequence*
process.patTrigger*
process.patDefaultSequence
)
process.TAPAnalysisWP80 = cms.Path(
process.goodOfflinePrimaryVertices*
process.eleTriggerMatchHLT*
process.patElectronsWithTrigger*
process.TAPwp80
)
process.TAPAnalysisWP80newHE = cms.Path(
process.goodOfflinePrimaryVertices*
process.eleTriggerMatchHLT*
process.patElectronsWithTrigger*
process.TAPwp80newHE
)
process.TAPAnalysisHLTele8 = cms.Path(
process.goodOfflinePrimaryVertices*
process.eleTriggerMatchHLT*
process.patElectronsWithTrigger*
process.TAPhltele8
)
process.TAPAnalysisHLTele17 = cms.Path(
process.goodOfflinePrimaryVertices*
process.eleTriggerMatchHLTele17*
process.patElectronsWithTriggerele17*
process.TAPhltele17
)
process.TAPAnalysisRECO = cms.Path(
process.goodOfflinePrimaryVertices*
process.eleTriggerMatchHLT*
process.patElectronsWithTrigger*
process.TAPreco
)
process.JetValidation = cms.Path(
process.TotalEventCounter*
process.eleTriggerMatchHLT*
process.patElectronsWithTrigger*
process.demobefore*
process.goodOfflinePrimaryVertices*
process.Selection*
process.demo*
process.goodEPair*
process.validation*
process.validationL2L3Residual*
process.validationJECScaleUp*
process.validationJECScaleDown*
process.validationJEC
)
#####################
# #
# Outpath #
# #
#####################
process.outpath = cms.EndPath(
#process.out
)
| [
"[email protected]"
] | |
43fb565c520ceba642840b9e2be90020c32f9bab | 00ef8e1eb57b73427508b20aadf0266da6b1f900 | /rlf/rl/loggers/sanity_checker.py | b27b9192a2eb8ad7ad2c58c8f3ceb49e8e0e9079 | [
"MIT"
] | permissive | amy12xx/rl-toolkit | f4643935cc8afd960356bfeae74c233d2596dea9 | 8254df8346752ea0226ae2064cc1eabc839567b0 | refs/heads/master | 2023-08-14T00:56:52.270642 | 2021-09-28T15:59:32 | 2021-09-28T15:59:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,400 | py | from collections import defaultdict
import torch.nn as nn
def print_tensor(x):
if len(x.shape) > 1:
return str(x.view(-1, x.shape[-1])[0, :5])
else:
return str(x[:5])
class SanityChecker:
def __init__(self, should_check, is_verbose, stop_key, stop_iters):
self.is_verbose = is_verbose
self.should_check = should_check
self.log_key_calls = defaultdict(lambda:0)
self.stop_key = stop_key
self.stop_iters = stop_iters
def check(self, log_key, **kwargs):
if not self.should_check:
return
if self.is_verbose:
print('---')
for k,v in kwargs.items():
print(self.get_str(k,v))
self.log_key_calls[log_key] += 1
if self.log_key_calls[self.stop_key] >= self.stop_iters:
raise ValueError('Sanity stopped. Program done.')
def check_rnd_state(self, key):
if not self.should_check:
return
weight = nn.Linear(3,2).weight
print(f"{key}:Rnd", weight.view(-1).detach()[0].item())
def get_str(self, k,v, indent=""):
s = f"{indent}{k}: "
if isinstance(v, dict):
for x,y in v.items():
s += "\n"
s += self.get_str(x,y, " ")
elif isinstance(v, nn.Module):
params = list(v.parameters())
sample_spots = [0, -1, -5, 3]
for x in sample_spots:
s += f"\n{indent} {x}:" + print_tensor(params[x])
else:
s += f"{v}"
return s
sanity_checker = None
def get_sanity_checker():
global sanity_checker
assert sanity_checker is not None
return sanity_checker
def set_sanity_checker(args):
global sanity_checker
cmd = args.sanity_cmd
if len(cmd) == 0:
cmd = ':'
stop_key, stop_iters = cmd.split(':')
if stop_iters == '':
stop_iters = 1
else:
stop_iters = int(stop_iters)
sanity_checker = SanityChecker(args.sanity, args.sanity_verbose, stop_key,
stop_iters)
def set_sanity_checker_simple():
global sanity_checker
sanity_checker = SanityChecker(True, True, "", 1000000000)
def check(*args, **kwargs):
get_sanity_checker().check(*args, **kwargs)
def c(v):
get_sanity_checker().check("tmp", v=v)
def check_rand_state(key=""):
get_sanity_checker().check_rnd_state(key)
| [
"[email protected]"
] | |
49a249a7ab9728cf12bc9b8176cd9f2b40792e1f | 553af2a9b110d54c0c2d972726f69ad9578f772f | /k2_domain/tests.py | 63942882cb5753b4c1bdf78693ee9e5f8bc64b29 | [] | no_license | simonemmott/k2 | 31ca9aca661e4a070ec3dfd6f6533abcc84ed883 | f455727c3f25dd2ad428c9c2936f05d94a62f843 | refs/heads/master | 2022-12-16T23:10:48.687149 | 2019-06-24T22:48:24 | 2019-06-24T22:48:24 | 192,113,010 | 0 | 0 | null | 2022-12-08T05:15:52 | 2019-06-15T18:47:55 | Python | UTF-8 | Python | false | false | 2,428 | py | from django.test import TestCase
from jinja2 import Environment
from k2.jinja2 import environment
from jinja2 import PackageLoader
import json
from k2_util import templates
from k2.settings import BASE_DIR
from posix import lstat
jinja2_env = environment(loader=PackageLoader('k2_domain', 'jinja2'))
def test_domain():
class Object(object):
pass
class List(object):
def __init__(self, lst):
self.lst = lst
def all(self):
return self.lst
domain = Object()
domain.name = 'DOMAIN_NAME'
model1 = Object()
model1.id = 1
model1.name = 'MODEL_1'
model2 = Object()
model2.id = 2
model2.name = 'MODEL_2'
domain.models = List([model1, model2])
return domain
# Create your tests here.
class Jinja2Tests(TestCase):
def test_template_from_string(self):
template = jinja2_env.from_string('Hello {{target}}!')
output = template.render(target='World')
self.assertEqual('Hello World!', output)
def test_list_from_string(self):
template = jinja2_env.from_string('[{% for model in domain.models.all() %}{{model.name}}.py,{% endfor %}]')
lst = template.render(domain=test_domain())[1:-2].split(',')
self.assertEquals('MODEL_1.py', lst[0])
self.assertEquals('MODEL_2.py', lst[1])
class IndexTests(TestCase):
def test_indexes(self):
index = templates.index(jinja2_env, BASE_DIR, 'k2_domain', 'k2_domain', domain=test_domain())
self.assertEquals(1, len(index))
self.assertTrue('DOMAIN_NAME' in index.keys())
self.assertEquals('k2_domain/domain.name', index.get('DOMAIN_NAME'))
index = templates.index(jinja2_env, BASE_DIR, 'k2_domain', 'k2_domain/domain.name/models', domain=test_domain())
self.assertEquals(3, len(index))
self.assertTrue('__init__.py' in index.keys())
self.assertEquals('k2_domain/domain.name/models/__init__.py', index.get('__init__.py'))
self.assertTrue('MODEL_1.py' in index.keys())
self.assertEquals('k2_domain/domain.name/models/model.py&model=1', index.get('MODEL_1.py'))
self.assertTrue('MODEL_2.py' in index.keys())
self.assertEquals('k2_domain/domain.name/models/model.py&model=2', index.get('MODEL_2.py'))
| [
"[email protected]"
] | |
eab2e39b65e58dce27b14af21523d7b9932dfbcf | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/cellblender/bng/bng_operators.py | a10ca18c55f2aa90e867e4bc68e9c7d6f33991d3 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,935 | py | import bpy
import os
import subprocess
from cellblender import cellblender_properties, cellblender_operators
#from . import net
# We use per module class registration/unregistration
filePath = ''
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
def cleanup(filePath):
pass
def execute_bionetgen(filepath,context):
mcell = context.scene.mcell
if mcell.cellblender_preferences.bionetgen_location_valid:
bngpath = mcell.cellblender_preferences.bionetgen_location
print ("\nBioNetGen exe found: " + bngpath)
destpath = os.path.dirname(__file__)
exe_bng = " ".join([bngpath, "--outdir", destpath, filepath]) # create command string for BNG execution
print("*** Starting BioNetGen execution ***")
print(" Command: " + exe_bng )
#os.system(exe_bng) # execute BNG
subprocess.call([bngpath,"--outdir",destpath,filepath])
else:
# Perform the search as done before
from os.path import exists
filebasename = os.path.basename(filepath)
filedirpath = os.path.dirname(filepath) # dir of the bngl script file
check_dir = filedirpath;
n = 0
while(n!=20): # iterative search for BNG exe file (starts from the dir containing the bngl script file)
bng_dir = check_dir # current dir (+ any unchecked child dir) to be checked
checked = {} # list of dirs for which search is complete
i = 0
for (dirpath, dirname, filename) in os.walk(bng_dir): # Search over the current and previously unchecked child dirs
if (i == 0):
check_dir = os.path.dirname(dirpath) # mark the parent dir for next search (after current and child dirs are done)
i = 1
if dirpath in checked: # escape any child dir if already been checked
continue
bngpath = os.path.join(dirpath,"BNG2.pl") # tentative path for the BNG exe. file
print ( "Searching for " + bngpath )
if os.path.exists(bngpath): # if BNG exe.file found, proceed for BNG execution
print ("\nBioNetGen exe found: " + bngpath)
destpath = os.path.dirname(__file__)
exe_bng = " ".join([bngpath, "--outdir", destpath, filepath]) # create command string for BNG execution
print("*** Started BioNetGen execution ***")
#os.system(exe_bng) # execute BNG
subprocess.call([bngpath,"--outdir",destpath,filepath])
return{'FINISHED'}
checked.update({dirpath:True}) # store checked directory in the list
n +=1
if (n==20): # too many iterations; BNG not found, stop further search
print ("Error running BioNetGen. BNG2.pl not found....")
return{'FINISHED'}
| [
"[email protected]"
] | |
d40c9fe210c8f3cad3a2f5d0c8912bb687d1a187 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/subscription/v20191001preview/__init__.py | 809b67527e985eb66ca6611d911e43258c96a031 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 393 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_subscription_alias import *
from .subscription_alias import *
from ._inputs import *
from . import outputs
| [
"[email protected]"
] | |
9e2cf686b700b1a54492e9adc5d477ec83e3fbad | eec698a1cb1b76a5f00ca51726bb7387104dbe89 | /backend/app/models/category.py | a5d236d50fa781ec1431eabd8c1822f4bc571eab | [] | no_license | MSurfer20/recommender-portal | 9450153e94f89785038918c29c227556df3636b4 | 02924c0d813af4ddb29041ccad34e57963e57168 | refs/heads/master | 2022-10-28T11:10:48.265123 | 2020-06-09T20:36:18 | 2020-06-09T20:36:18 | 267,325,013 | 1 | 0 | null | 2020-06-09T13:43:08 | 2020-05-27T13:20:46 | JavaScript | UTF-8 | Python | false | false | 1,119 | py | from enum import Enum
from pydantic import AnyUrl, Field
from typing import List, Optional
from bson.objectid import ObjectId
from .base import Base, ObjectID, MusicEnum, VideoEnum, CategoryEnum
class Category(Base):
"""Category definiton"""
id: CategoryEnum = Field(None, alias="_id")
class ItemBase(Base):
"""Base fields for any item"""
id: ObjectID = Field(None, alias="_id")
flags: List[str] = list()
hidden: bool = False
title: str
url: AnyUrl
year_release: int
genres: List[str] = list()
class Show(ItemBase):
"""Shows category definition"""
seasons: int
episode_length: int
season_length: int
streaming: Optional[VideoEnum]
class Anime(Show):
"""Anime category defintion"""
# NOTE update if any other fields required
pass
class Movie(ItemBase):
"""Movie category definition"""
language: str
director: str
streaming: Optional[VideoEnum]
class Music(ItemBase):
"""Music category definiton"""
artist: str
album: Optional[str]
streaming: Optional[MusicEnum]
class Book(ItemBase):
author: str
| [
"[email protected]"
] | |
468e1bd89d23c215a903bc44ce45617e25a3f400 | c3787b68c6276a2dd10008f609e6091ecbcca0b0 | /two_pointers/triplets_smaller_sum.py | 138fbcefec62ae14e4e1f8c54378519a6183ac46 | [
"MIT"
] | permissive | cosmos-sajal/ds_algo | 45afc8470c2b32fc56041bfbca8acb886cb70864 | d01912b8c42d8465660cee04e7648731c9acbaff | refs/heads/master | 2022-11-06T15:44:25.030664 | 2020-06-20T16:59:56 | 2020-06-20T16:59:56 | 263,012,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # https://www.educative.io/courses/grokking-the-coding-interview/mElknO5OKBO
def triplet_with_smaller_sum(arr, target):
arr.sort()
arr_len = len(arr)
result = 0
for i in range(arr_len - 1):
ptr1 = i + 1
ptr2 = arr_len - 1
sum = target - arr[i]
while ptr1 < ptr2:
if arr[ptr1] + arr[ptr2] < sum:
result += (ptr2 - ptr1)
ptr1 += 1
else:
ptr2 -= 1
return result
def main():
print(triplet_with_smaller_sum([-1, 0, 2, 3], 3))
print(triplet_with_smaller_sum([-1, 4, 2, 1, 3], 5))
main()
| [
"[email protected]"
] | |
9274b3b36010b8ebad3d07e1d6dd9345b598c5df | 5cf0842f3c066b2dbbea703bfff9e90b69905937 | /neurodsp/tests/test_utils_outliers.py | 09058cbc0b8443cc26506d563be3579b51a623c6 | [
"MIT"
] | permissive | srcole/neurodsp | 9e880c990587e928e3cc550fef53f5bb1d112bfa | 6b500d967a2ca63b62d07ab345e021f15e53be6a | refs/heads/master | 2021-05-23T05:48:06.916165 | 2019-06-14T03:44:06 | 2019-06-14T03:44:06 | 94,925,277 | 1 | 0 | null | 2017-08-30T18:30:22 | 2017-06-20T18:58:16 | Jupyter Notebook | UTF-8 | Python | false | false | 1,261 | py | """Tests for outlier related utility functions."""
import numpy as np
from numpy.testing import assert_equal
from neurodsp.utils.outliers import *
###################################################################################################
###################################################################################################
def test_remove_nans():
# Test with equal # of NaNs on either edge
arr = np.array([np.NaN, np.NaN, 1, 2, 3, np.NaN, np.NaN])
arr_no_nans, arr_nans = remove_nans(arr)
assert_equal(arr_no_nans, np.array([1, 2, 3]))
assert_equal(arr_nans, np.array([True, True, False, False, False, True, True]))
# Test with different # of NaNs on either edge
arr = np.array([np.NaN, np.NaN, 1, 2, 3, 4, np.NaN,])
arr_no_nans, arr_nans = remove_nans(arr)
assert_equal(arr_no_nans, np.array([1, 2, 3, 4]))
assert_equal(arr_nans, np.array([True, True, False, False, False, False, True]))
def test_restore_nans():
arr_no_nans = np.array([1, 2, 3])
arr_nans = np.array([True, True, False, False, False, True])
arr_restored = restore_nans(arr_no_nans, arr_nans)
assert_equal(arr_restored, np.array([np.NaN, np.NaN, 1, 2, 3, np.NaN]))
def test_discard_outliers():
pass
| [
"[email protected]"
] | |
019daba148c86709b395b3909db8a86a6c9a1bd3 | bda7a0576e17fe417175680b5698635b876c8091 | /users/migrations/0002_auto_20200219_1614.py | b420d734a989b370f84149c8b0aed3a478f4ff1b | [
"Apache-2.0"
] | permissive | Robotix-NITRR/RobotixWeb2021 | 56bb66667e5bd106930138f7ed69afeee609fe59 | 7f66a0dd5c54c44e6a128d8139d6bfd1135580f9 | refs/heads/master | 2023-04-04T09:59:53.961189 | 2021-04-12T12:07:25 | 2021-04-12T12:07:25 | 334,345,468 | 0 | 2 | Apache-2.0 | 2021-01-30T08:26:24 | 2021-01-30T06:31:56 | Python | UTF-8 | Python | false | false | 541 | py | # Generated by Django 2.1.7 on 2020-02-19 10:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='currency',
field=models.IntegerField(default=1000),
),
migrations.AddField(
model_name='userprofile',
name='is_active',
field=models.BooleanField(default=True),
),
]
| [
"[email protected]"
] | |
d65d42379061c239c3e54ddd63c11dffd5c86e91 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-3/9e31bd9e2d6cba783883eb4905b19fcbe0ca5534-<backend_pyqt4_internal_check>-bug.py | 7487ccb89335c1e72c0ba32a5347a266fdafb3a0 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | def backend_pyqt4_internal_check(self):
try:
from PyQt4 import QtCore
except ImportError:
raise CheckFailed('PyQt4 not found')
try:
qt_version = QtCore.QT_VERSION
pyqt_version_str = QtCore.QT_VERSION_STR
except AttributeError:
raise CheckFailed('PyQt4 not correctly imported')
else:
return ('Qt: %s, PyQt: %s' % (self.convert_qt_version(qt_version), pyqt_version_str)) | [
"[email protected]"
] | |
407652123c34cc2bfd537335f227c65cefbb2c11 | f1af5236a561c07d1def8da8716be333e2fb896c | /test/Service/test_pack_unpack.py | 35fbd6df707d360ab26b285c2acb982ad03f3d84 | [
"Apache-2.0"
] | permissive | chryswoods/acquire | 1a9dbbfa0f82d761a314fd34b77c0ad982ff60e6 | fe4c9cb2b90374b386d5ea38e514faa96661701a | refs/heads/devel | 2022-12-22T08:29:52.155771 | 2022-01-11T11:34:02 | 2022-01-11T11:34:02 | 156,683,200 | 21 | 5 | Apache-2.0 | 2022-12-17T12:47:35 | 2018-11-08T09:35:59 | Python | UTF-8 | Python | false | false | 2,995 | py |
import pytest
from Acquire.Crypto import PrivateKey, get_private_key
from Acquire.Service import pack_arguments, unpack_arguments
from Acquire.Service import pack_return_value, unpack_return_value
from Acquire.Service import create_return_value
from Acquire.ObjectStore import string_to_bytes, bytes_to_string
import random
import json
def _bar():
raise PermissionError("Test Traceback")
def _foo():
_bar()
def test_pack_unpack_args_returnvals():
privkey = get_private_key("testing")
pubkey = privkey.public_key()
args = {"message": "Hello, this is a message",
"status": 0,
"long": [random.random() for _ in range(2)]}
func = "test_function"
packed = pack_arguments(function=func, args=args)
crypted = pubkey.encrypt(packed)
uncrypted = privkey.decrypt(crypted)
(f, unpacked, keys) = unpack_arguments(args=uncrypted)
print(keys)
assert(args == unpacked)
assert(f == func)
packed = pack_arguments(function=func, args=args,
key=pubkey, response_key=pubkey,
public_cert=pubkey)
data = json.loads(packed.decode("utf-8"))
assert(data["encrypted"])
assert(data["fingerprint"] == privkey.fingerprint())
payload = privkey.decrypt(string_to_bytes(data["data"]))
payload = json.loads(payload)
assert(payload["sign_with_service_key"] == privkey.fingerprint())
assert(payload["encryption_public_key"] == bytes_to_string(pubkey.bytes()))
assert(payload["payload"] == args)
(f, unpacked, keys) = unpack_arguments(function=func, args=packed,
key=privkey)
message = {"message": "OK"}
return_value = create_return_value(message)
packed_result = pack_return_value(function=func,
payload=return_value, key=keys,
private_cert=privkey)
result = json.loads(packed_result.decode("utf-8"))
assert(result["fingerprint"] == privkey.fingerprint())
assert(result["encrypted"])
data = string_to_bytes(result["data"])
sig = string_to_bytes(result["signature"])
pubkey.verify(signature=sig, message=data)
data = json.loads(privkey.decrypt(data))
assert(data["payload"]["return"] == message)
result = unpack_return_value(return_value=packed_result,
key=privkey, public_cert=pubkey)
assert(result == message)
try:
return_value = create_return_value(_foo())
except Exception as e:
return_value = create_return_value(e)
packed_result = pack_return_value(function=func,
payload=return_value, key=keys,
private_cert=privkey)
with pytest.raises(PermissionError):
result = unpack_return_value(function=func, return_value=packed_result,
key=privkey, public_cert=pubkey)
| [
"[email protected]"
] | |
7014ef7a1f5107c645c6b794df58809717cbbcd7 | 2d47e5f7f358429ac46ed6bf61e8b8d68581dfeb | /cart/views.py | 3899a4bae7bb3e9e967e15dcfab285467cdcbbb8 | [] | no_license | hossamelneily/Freeshop | 7e24bbf6eb3668a121e9d6903347e6405295d589 | 48b1650164bad5f9691b11dc7f2377c800bd8240 | refs/heads/master | 2021-11-21T20:11:23.306946 | 2019-10-28T13:16:42 | 2019-10-28T13:16:42 | 144,614,064 | 1 | 2 | null | 2021-09-08T00:07:47 | 2018-08-13T17:46:42 | HTML | UTF-8 | Python | false | false | 5,513 | py | from django.shortcuts import render,redirect
from django.contrib.auth import authenticate , login , get_user_model
from django.conf import settings
# Create your views here.
from cart.models import cart
from products.models import product
from products.models import product
from orders.models import orders
from ecommerce.forms import login_page,GuestForm
from billing.models import billing
from Address.models import Address
from Guest.models import Guest
from django.urls import reverse
from Address.forms import AdressForm,UsePrevAdd
from django.http import JsonResponse
import json
user=get_user_model()
def cart_view_API(request):
cart_obj = cart.objects.get_or_create(request)
product = cart_obj.products.all()
product_list=[]
for x in product:
product_list.append({"name":x.Name,"price":x.price,"url":x.get_absolute_url(),"id":x.id})
return JsonResponse({ "product": product_list,
"cart_total": cart_obj.total,
"cart_subtotal":cart_obj.subtotal
})
def cart_view(request):
# i have created a function in the carts.models and i will call it instead of implementing it here.
cart_obj=cart.objects.get_or_create(request)
return render(request,"carts/home.html",{"cart_obj":cart_obj})
def cart_update(request):
prod_obj=product.objects.get_by_id(id=request.POST.get('product_id'))
cart_obj = cart.objects.get_or_create(request)
if prod_obj in cart_obj.products.all():
cart_obj.products.remove(prod_obj)
added=False
else:
cart_obj.products.add(prod_obj)
added=True
request.session['cart_items']=cart_obj.products.count()
if request.is_ajax(): # will return json format
print("Ajax is working ")
json_data={
"added":added,
"cart_items_count":request.session.get("cart_items",0)
}
return JsonResponse(json_data)
return redirect("cart:show") # need to modify this return
def checkout_view(request):
cart_obj = cart.objects.get_or_create(request)
order_obj=None
Address_qs=None
prev_form_shipping = None
prev_form_billing = None
Shipping_Address_qs = None
Billing_Address_qs = None
loginform = login_page(request.POST or None)
guestform = GuestForm(request.POST or None)
adressForm = AdressForm(request.POST or None)
has_card = None
# change =None
billing_profile= billing.objects.get_or_new(request)
if billing_profile is not None:
order_obj, order_created = orders.objects.get_or_new(billing_profile, cart_obj)
order_obj.Associate_orders_to_Addresses(request)
if request.user.is_authenticated:
Address_qs=Address.objects.filter(billing=billing_profile)
Shipping_Address_qs = Address_qs.filter(Address_Type='shipping').values('id','Address_line_1','State','Postal_Code','city').distinct()
#values('id','Address_line_1','State','Postal_Code','city') --> i have added the id beacuse we need to save the id of the address
#but if the addresses all have the same values, this query will return the same addresses even we make ditinct beacuse their ids are different
#so the user have to add different address to return different ones,
if Shipping_Address_qs:
prev_form_shipping = UsePrevAdd(request.POST or None,initial={'Address_Type':'shipping'})
Billing_Address_qs = Address_qs.filter(Address_Type='billing').values('id','Address_line_1','State','Postal_Code','city').distinct()
if Billing_Address_qs:
prev_form_billing = UsePrevAdd(request.POST or None,initial={'Address_Type':'billing'})
# if 'change' in request.build_absolute_uri():
# print('changes')
# change =True
# return redirect("cart:checkout")
has_card=billing_profile.has_card # get the active cards
if request.method=="POST" and not request.is_ajax(): #came from checkout() function when user entered all addresses and payement method, last step
is_done = order_obj.check_orders()
if is_done:
did_charge = billing_profile.process_charge(order_obj=order_obj)
if did_charge:
order_obj.mark_paid()
del request.session['cart_id']
request.session['cart_items'] = 0
# if not billing_profile.user: #not means false or None # is None means None only this for guest user
# billing_profile.set_card_inactive
return redirect("cart:success")
return redirect("cart:checkout")
context={
"cart_obj":cart_obj,
"object": order_obj,
"billing_profile":billing_profile,
"loginpage":loginform,
"guestform":guestform,
"addressform":adressForm,
'prev_form_shipping':prev_form_shipping,
'prev_form_billing': prev_form_billing,
"Address_qs":Address_qs,
'Shipping_Address_qs':Shipping_Address_qs,
'Billing_Address_qs':Billing_Address_qs,
"has_card":has_card,
# 'change':change,
"public_key":getattr(settings,'STRIPE_PUB_Key','pk_test_UmKYvEdkBYpow9jUa9gloSTC')
}
return render(request,"carts/checkout.html",context)
def checkout_done(request):
return render(request, "carts/success.html", {})
| [
"[email protected]"
] | |
20683b9d7eaaf6988c7944329dd9cec29ca2947e | bab737891a602e8afc8b6a132ace3f05c37999e4 | /blog/migrations/0009_auto_20170821_1442.py | d499a002ce2474ef9b550b762290ec0d1950cb34 | [] | no_license | lianchonghui/django-blog | 34ddf308d15adf633b10676f835f27dd94a457f0 | 53f395f1d2ad2e4cea1fe38b99db705bb7fb352e | refs/heads/master | 2021-09-09T15:33:40.171955 | 2018-03-17T14:24:05 | 2018-03-17T14:24:05 | 118,338,599 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,203 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-08-21 06:42
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import blog
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_auto_20170430_1033'),
]
operations = [
migrations.AlterField(
model_name='category',
name='cover',
field=models.ImageField(blank=True, upload_to='covers/categories/%Y/%m/%d/', verbose_name='cover'),
),
migrations.AlterField(
model_name='category',
name='cover_caption',
field=models.CharField(blank=True, max_length=255, verbose_name='cover caption'),
),
migrations.AlterField(
model_name='category',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='creation time'),
),
migrations.AlterField(
model_name='category',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='creator'),
),
migrations.AlterField(
model_name='category',
name='description',
field=models.TextField(blank=True, verbose_name='description'),
),
migrations.AlterField(
model_name='category',
name='genre',
field=models.PositiveSmallIntegerField(choices=[(1, 'collection'), (2, 'tutorial')], default=1, verbose_name='genre'),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=100, verbose_name='name'),
),
migrations.AlterField(
model_name='category',
name='resource',
field=models.URLField(blank=True, verbose_name='resource'),
),
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(unique=True, verbose_name='slug'),
),
migrations.AlterField(
model_name='category',
name='status',
field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'ongoing'), (2, 'finished')], null=True, verbose_name='status'),
),
migrations.AlterField(
model_name='category',
name='title',
field=models.CharField(blank=True, max_length=255, verbose_name='title'),
),
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='author'),
),
migrations.AlterField(
model_name='post',
name='body',
field=models.TextField(verbose_name='body'),
),
migrations.AlterField(
model_name='post',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Category', verbose_name='category'),
),
migrations.AlterField(
model_name='post',
name='cover',
field=models.ImageField(blank=True, upload_to=blog.models.post_cover_path, verbose_name='cover'),
),
migrations.AlterField(
model_name='post',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='creation time'),
),
migrations.AlterField(
model_name='post',
name='excerpt',
field=models.CharField(blank=True, max_length=255, verbose_name='excerpt'),
),
migrations.AlterField(
model_name='post',
name='modified_time',
field=models.DateTimeField(auto_now=True, verbose_name='modification time'),
),
migrations.AlterField(
model_name='post',
name='pub_date',
field=models.DateTimeField(blank=True, null=True, verbose_name='publication time'),
),
migrations.AlterField(
model_name='post',
name='status',
field=models.PositiveSmallIntegerField(choices=[(1, 'published'), (2, 'draft'), (3, 'hidden')], default=2, verbose_name='status'),
),
migrations.AlterField(
model_name='post',
name='tags',
field=models.ManyToManyField(blank=True, to='blog.Tag', verbose_name='tags'),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(max_length=255, verbose_name='title'),
),
migrations.AlterField(
model_name='post',
name='views',
field=models.PositiveIntegerField(default=0, editable=False, verbose_name='views'),
),
migrations.AlterField(
model_name='tag',
name='name',
field=models.CharField(max_length=100, verbose_name='name'),
),
]
| [
"[email protected]"
] | |
90bb6f2d6386a5ad0b37e43f530eb0556d348aa8 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/modules/network/netvisor/pn_ospfarea.py | 34739d451da56d21430f472be5da741f0e3d2cbd | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 6,375 | py | #!/usr/bin/python
""" PN-CLI vrouter-ospf-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_ospfarea
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to add/remove ospf area to/from a vrouter.
description:
- Execute vrouter-ospf-add, vrouter-ospf-remove command.
- This command adds/removes Open Shortest Path First(OSPF) area to/from
a virtual router(vRouter) service.
options:
pn_cliusername:
description:
- Login username.
required: true
pn_clipassword:
description:
- Login password.
required: true
pn_cliswitch:
description:
- Target switch(es) to run the CLI on.
required: False
state:
description:
- State the action to perform. Use 'present' to add ospf-area, 'absent'
to remove ospf-area and 'update' to modify ospf-area.
required: true
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: true
pn_ospf_area:
description:
- Specify the OSPF area number.
required: true
pn_stub_type:
description:
- Specify the OSPF stub type.
choices: ['none', 'stub', 'stub-no-summary', 'nssa', 'nssa-no-summary']
pn_prefix_listin:
description:
- OSPF prefix list for filtering incoming packets.
pn_prefix_listout:
description:
- OSPF prefix list for filtering outgoing packets.
pn_quiet:
description:
- Enable/disable system information.
required: false
default: true
"""
EXAMPLES = """
- name: "Add OSPF area to vrouter"
pn_ospfarea:
state: present
pn_cliusername: admin
pn_clipassword: admin
pn_ospf_area: 1.0.0.0
pn_stub_type: stub
- name: "Remove OSPF from vrouter"
pn_ospf:
state: absent
pn_cliusername: admin
pn_clipassword: admin
pn_vrouter_name: name-string
pn_ospf_area: 1.0.0.0
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the ospf command.
returned: always
type: list
stderr:
description: The set of error responses from the ospf command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-ospf-area-add'
if state == 'absent':
command = 'vrouter-ospf-area-remove'
if state == 'update':
command = 'vrouter-ospf-area-modify'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=True, type='str'),
pn_clipassword=dict(required=True, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_ospf_area=dict(required=True, type='str'),
pn_stub_type=dict(type='str', choices=['none', 'stub', 'nssa',
'stub-no-summary',
'nssa-no-summary']),
pn_prefix_listin=dict(type='str'),
pn_prefix_listout=dict(type='str'),
pn_quiet=dict(type='bool', default='True')
)
)
# Accessing the arguments
cliusername = module.params['pn_cliusername']
clipassword = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
ospf_area = module.params['pn_ospf_area']
stub_type = module.params['pn_stub_type']
prefix_listin = module.params['pn_prefix_listin']
prefix_listout = module.params['pn_prefix_listout']
quiet = module.params['pn_quiet']
command = get_command_from_state(state)
# Building the CLI command string
cli = '/usr/bin/cli'
if quiet is True:
cli += ' --quiet '
cli += ' --user %s:%s ' % (cliusername, clipassword)
if cliswitch:
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
cli += ' %s vrouter-name %s area %s ' % (command, vrouter_name, ospf_area)
if stub_type:
cli += ' stub-type ' + stub_type
if prefix_listin:
cli += ' prefix-list-in ' + prefix_listin
if prefix_listout:
cli += ' prefix-list-out ' + prefix_listout
# Run the CLI command
ospfcommand = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(ospfcommand)
# Response in JSON format
if result != 0:
module.exit_json(
command=cli,
stderr=err.rstrip("\r\n"),
changed=False
)
else:
module.exit_json(
command=cli,
stdout=out.rstrip("\r\n"),
changed=True
)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
52c0c13d7052ae235e036a5de1bc4bb4f2f89261 | a5a3ccf9feae3f5aa475153d92447f8f3ba8c013 | /data/kcbot.py | b5e78e6669e62e23f0901827fbc01946a7740baa | [] | no_license | waldenven/tsdata | 2a309d976ce2994c7cca8ad91e6287cb8180b8cf | a4229b7978f4c14ffc2201ea38a1a44e68dec130 | refs/heads/master | 2020-06-13T22:42:45.334001 | 2012-07-03T03:42:01 | 2012-07-03T03:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | py | import urllib, re
from datetime import datetime
from pandas import DataFrame, Series, read_csv
from BeautifulSoup import BeautifulSoup
from tsdata.data.basedata import basedata
def all_daily_urls():
url = 'http://www.kcbt.com/daily_wheat_price.asp'
f = urllib.urlopen(url)
txt = f.read()
f.close()
soup = BeautifulSoup(txt)
ts = soup.findAll('table', { 'border' : 1, 'cellpadding' : "3", 'align' :"center", 'width' : "50%"})
tds = soup.findAll('td', width='33%', nowrap='nowrap')
return map(lambda x: x.a['href'], tds)
def one_date(url):
f = urllib.urlopen(url)
df = read_csv(f)
df =df.rename(columns=lambda x: x.strip().lower()).dropna()
df['date'] = map(lambda x: datetime.strptime(x.strip(), '%m/%d/%Y').date(), df['date'])
for col in df.columns:
if col not in ['exch', 'comid', 'date']:
df[col] = map(lambda x: float(x), df[col])
elif col in ['exch', 'comid']:
df[col] = map(lambda x: x.strip(), df[col])
df['LYY'] = map(lambda m,y: 'FGHJKMNQUVXZ'[int(m-1)] + '%02d' % (y), df['month'], df['year'])
return df
class kcbotfuts(basedata):
archivename = 'kcbot.pickle'
tag = 'f'
chunktype = 'DAY'
earliest = datetime(2011,11,16)
_cache = None
_changed = False
_updated = False
_scaling = { k:0.01 for k in [ 'previous', 'open', 'high', 'low', 'close', 'settle' ] } # to match CBOT
def handles(self, symbol):
l = len(self.tag) + 1 + 2
return symbol[:l].lower() == self.tag + '_' + 'kw'
def parsesymbol(self, symbol):
synre = re.compile('%s_([^@_]*)_([^@]*)@(.*)' % self.tag )
synrenoat = re.compile('%s_([^@_]*)_([^@]*)' % self.tag )
m = synre.match( symbol )
if m:
commod = m.group(1)
month = m.group(2)
tag = m.group(3)
else:
m = synrenoat.match( symbol )
commod = m.group(1)
month = m.group(2)
tag = 'settle'
commod = commod.upper()
return { 'column' : tag, 'filter' : { 'comid' : commod, 'LYY' : month } }
def _updateday(self, din):
df = DataFrame()
url = 'http://www.kcbt.com/download/kcprccsv/kcprccsv_%4d%02d%02d.csv' % (din.year, din.month, din.day)
return one_date(url) | [
"[email protected]"
] | |
9f73be10103f40ed4d479780397977619122dcb9 | 9947d1e328a3262a35a61385dc537c3dc557ab7d | /pythonnet/day3_PM/day3/recv_file.py | db7d06884dc73ce18d8d0d17c294d3419bf2c549 | [] | no_license | nuass/lzh | d0a7c74a3295523d1fe15eeaa73997fc04469f06 | 3cb1cf1e448b88ade226d113a7da4eab7bbb5c09 | refs/heads/master | 2021-02-06T06:10:32.772831 | 2019-06-10T08:54:49 | 2019-06-10T08:54:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from socket import *
s = socket()
s.bind(('127.0.0.1',8888))
s.listen(3)
c,addr = s.accept()
print("Connect from",addr)
f = open('leg.jpg','wb')
while True:
data = c.recv(1024)
if not data:
break
f.write(data)
f.close()
c.close()
s.close()
| [
"[email protected]"
] | |
3bacc8c066e42bc4d4187216f852f631b9e01070 | e21ab91cf22bf8359831e974d49e1fd90b69197a | /tests/conftest.py | 3208b58757a89871d4e97393c3db78b2a76d169c | [
"BSD-3-Clause"
] | permissive | creamofclubs/odin | 9be21013a7d707aee978e83bc2cb9ef70f81befa | deb00b6af56d319a3e11f71dbee7d217c8713fae | refs/heads/master | 2023-08-12T04:06:34.791245 | 2021-09-06T03:11:55 | 2021-09-06T03:11:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import os
import sys
import datetime
HERE = os.path.abspath(os.path.dirname(__file__))
SRC = os.path.normpath(os.path.join(HERE, "..", "src"))
sys.path.insert(0, SRC)
import odin.datetimeutil
ARE_YOU_EXPERIENCED = datetime.date(1967, 5, 12)
MWT = odin.datetimeutil.FixedTimezone(-6, "Mountain War Time")
BOOM = datetime.datetime(1945, 7, 16, 5, 29, 45, 0, MWT)
| [
"[email protected]"
] | |
10047578aaf146ba74d5f3a29732c1da9c35664d | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/CIENA-CES-RSVPTE-MIB.py | 58dd142d71251b6a7644c3ba009df73d5dab4088 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 13,064 | py | #
# PySNMP MIB module CIENA-CES-RSVPTE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CIENA-CES-RSVPTE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:31:58 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint")
cienaCesNotifications, cienaCesConfig = mibBuilder.importSymbols("CIENA-SMI", "cienaCesNotifications", "cienaCesConfig")
CienaGlobalState, = mibBuilder.importSymbols("CIENA-TC", "CienaGlobalState")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, ObjectIdentity, Bits, TimeTicks, Counter32, Counter64, NotificationType, Integer32, MibIdentifier, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "ObjectIdentity", "Bits", "TimeTicks", "Counter32", "Counter64", "NotificationType", "Integer32", "MibIdentifier", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "ModuleIdentity")
TruthValue, RowStatus, MacAddress, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "RowStatus", "MacAddress", "TextualConvention", "DisplayString")
cienaCesRsvpteMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16))
cienaCesRsvpteMIB.setRevisions(('2016-07-15 00:00', '2016-07-14 00:00', '2016-07-04 00:00', '2013-05-08 00:00', '2011-02-02 00:00',))
if mibBuilder.loadTexts: cienaCesRsvpteMIB.setLastUpdated('201607150000Z')
if mibBuilder.loadTexts: cienaCesRsvpteMIB.setOrganization('Ciena, Inc')
class AdvertisedLabel(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 99))
namedValues = NamedValues(("implicitnull", 1), ("nonreserved", 99))
class RsvpOperStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("operStatusUp", 1), ("operStatusDown", 2), ("operStatusGoingUp", 3), ("operStatusGoingDown", 4), ("operStatusActFailed", 5))
class RsvpGRMode(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("helpNeighbor", 1), ("restartCapable", 2), ("notApplicable", 3))
cienaCesRsvpteMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1))
cienaCesRsvpteObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1))
cienaCesRsvpte = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2))
cienaCesRsvpteAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 1), CienaGlobalState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteAdminStatus.setStatus('current')
cienaCesRsvpteOperStatus = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 2), RsvpOperStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteOperStatus.setStatus('current')
cienaCesRsvpteRetryInterval = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(3, 65)).clone(3)).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRetryInterval.setStatus('current')
cienaCesRsvpteRetryInfiniteState = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("on", 1), ("off", 2))).clone('on')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRetryInfiniteState.setStatus('current')
cienaCesRsvpteRetryMax = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(10)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRetryMax.setStatus('current')
cienaCesRsvpteRefreshInterval = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 6), Integer32().clone(30000)).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRefreshInterval.setStatus('current')
cienaCesRsvpteRefreshMultiple = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 214783647)).clone(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRefreshMultiple.setStatus('current')
cienaCesRsvpteRfrshSlewDenom = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 214783647)).clone(10)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRfrshSlewDenom.setStatus('deprecated')
cienaCesRsvpteRfrshSlewNumerator = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 214783647)).clone(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRfrshSlewNumerator.setStatus('deprecated')
cienaCesRsvpteBlockadeMultiple = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 214783647)).clone(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteBlockadeMultiple.setStatus('current')
cienaCesRsvpteLSPSetupPriority = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7)).clone(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteLSPSetupPriority.setStatus('current')
cienaCesRsvpteLSPHoldingPriority = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7)).clone(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteLSPHoldingPriority.setStatus('current')
cienaCesRsvpteUseHopByHop = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 13), TruthValue().clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteUseHopByHop.setStatus('current')
cienaCesRsvpteRestartCapable = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 14), TruthValue().clone('true')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRestartCapable.setStatus('current')
cienaCesRsvpteRestartTime = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 15), Unsigned32().clone(60000)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRestartTime.setStatus('current')
cienaCesRsvpteRecoveryTime = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 16), Unsigned32().clone(120000)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRecoveryTime.setStatus('current')
cienaCesRsvpteMinPeerRestart = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteMinPeerRestart.setStatus('current')
cienaCesRsvpteRefreshSlewDenominator = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 214783647)).clone(10)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRefreshSlewDenominator.setStatus('current')
cienaCesRsvpteRefreshSlewNumerator = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 214783647)).clone(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteRefreshSlewNumerator.setStatus('current')
cienaCesRsvpteGRAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 20), CienaGlobalState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteGRAdminStatus.setStatus('current')
cienaCesRsvpteGRMode = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 1, 21), RsvpGRMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteGRMode.setStatus('current')
cienaCesRsvpteIfTable = MibTable((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1), )
if mibBuilder.loadTexts: cienaCesRsvpteIfTable.setStatus('current')
cienaCesRsvpteIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1, 1), ).setIndexNames((0, "CIENA-CES-RSVPTE-MIB", "cienaCesRsvpteIfIndex"))
if mibBuilder.loadTexts: cienaCesRsvpteIfEntry.setStatus('current')
cienaCesRsvpteIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4096)))
if mibBuilder.loadTexts: cienaCesRsvpteIfIndex.setStatus('current')
cienaCesRsvpteIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteIfName.setStatus('current')
cienaCesRsvpteIfIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteIfIpAddr.setStatus('current')
cienaCesRsvpteIfMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1500, 9216))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteIfMtu.setStatus('deprecated')
cienaCesRsvpteIfAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1, 1, 5), CienaGlobalState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteIfAdminStatus.setStatus('current')
cienaCesRsvpteIfOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteIfOperStatus.setStatus('current')
cienaCesRsvpteIfHelloInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 30))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteIfHelloInterval.setStatus('current')
cienaCesRsvpteIfHelloTolerance = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 10)).clone(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteIfHelloTolerance.setStatus('current')
cienaCesRsvpteIfAdvertisedLabel = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 16, 1, 2, 1, 1, 9), AdvertisedLabel().clone(99)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesRsvpteIfAdvertisedLabel.setStatus('current')
mibBuilder.exportSymbols("CIENA-CES-RSVPTE-MIB", PYSNMP_MODULE_ID=cienaCesRsvpteMIB, cienaCesRsvpteUseHopByHop=cienaCesRsvpteUseHopByHop, cienaCesRsvpteLSPSetupPriority=cienaCesRsvpteLSPSetupPriority, cienaCesRsvpteGRAdminStatus=cienaCesRsvpteGRAdminStatus, cienaCesRsvpteObjects=cienaCesRsvpteObjects, cienaCesRsvpteRfrshSlewNumerator=cienaCesRsvpteRfrshSlewNumerator, cienaCesRsvpteIfMtu=cienaCesRsvpteIfMtu, cienaCesRsvpteGRMode=cienaCesRsvpteGRMode, cienaCesRsvpteRfrshSlewDenom=cienaCesRsvpteRfrshSlewDenom, cienaCesRsvpteIfOperStatus=cienaCesRsvpteIfOperStatus, cienaCesRsvpteIfAdminStatus=cienaCesRsvpteIfAdminStatus, cienaCesRsvpteRetryInfiniteState=cienaCesRsvpteRetryInfiniteState, cienaCesRsvpteRestartCapable=cienaCesRsvpteRestartCapable, cienaCesRsvpteRefreshMultiple=cienaCesRsvpteRefreshMultiple, cienaCesRsvpteRetryInterval=cienaCesRsvpteRetryInterval, cienaCesRsvpteRecoveryTime=cienaCesRsvpteRecoveryTime, cienaCesRsvpteIfName=cienaCesRsvpteIfName, cienaCesRsvpteBlockadeMultiple=cienaCesRsvpteBlockadeMultiple, RsvpOperStatus=RsvpOperStatus, cienaCesRsvpteAdminStatus=cienaCesRsvpteAdminStatus, cienaCesRsvpteIfAdvertisedLabel=cienaCesRsvpteIfAdvertisedLabel, cienaCesRsvpteLSPHoldingPriority=cienaCesRsvpteLSPHoldingPriority, cienaCesRsvpteRefreshSlewDenominator=cienaCesRsvpteRefreshSlewDenominator, cienaCesRsvpteRetryMax=cienaCesRsvpteRetryMax, RsvpGRMode=RsvpGRMode, cienaCesRsvpteRestartTime=cienaCesRsvpteRestartTime, cienaCesRsvpteIfIndex=cienaCesRsvpteIfIndex, AdvertisedLabel=AdvertisedLabel, cienaCesRsvpteRefreshInterval=cienaCesRsvpteRefreshInterval, cienaCesRsvpteRefreshSlewNumerator=cienaCesRsvpteRefreshSlewNumerator, cienaCesRsvpteMinPeerRestart=cienaCesRsvpteMinPeerRestart, cienaCesRsvpteIfHelloInterval=cienaCesRsvpteIfHelloInterval, cienaCesRsvpte=cienaCesRsvpte, cienaCesRsvpteIfTable=cienaCesRsvpteIfTable, cienaCesRsvpteIfHelloTolerance=cienaCesRsvpteIfHelloTolerance, cienaCesRsvpteMIBObjects=cienaCesRsvpteMIBObjects, cienaCesRsvpteOperStatus=cienaCesRsvpteOperStatus, cienaCesRsvpteIfEntry=cienaCesRsvpteIfEntry, cienaCesRsvpteIfIpAddr=cienaCesRsvpteIfIpAddr, cienaCesRsvpteMIB=cienaCesRsvpteMIB)
| [
"[email protected]"
] | |
28248168d3c9bf357c416aec52b917287ae926cf | 158a327c3cbff52a607d4ec668c658f0d8d8eea8 | /DP/55. Jump Game.py | 13c37440c5ef1d510135f5ac8ff8bfe1773f858b | [] | no_license | smartwell/leet_niuke | f11cbf826f9b2b358b614a63200e93aef47054a2 | eb84aaa1248074dceda831f9385d68a24941fa04 | refs/heads/master | 2020-06-24T16:37:33.073534 | 2019-07-25T13:19:49 | 2019-07-25T13:19:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | ########################45##########################
def Jump(nums): #想象成这一步所能跳跃的范围,若最后终点在范围内则为最佳点 采用贪心算法 最优避免DP规划导致多余计算
n,fir,end,count=len(nums),0,0,0 #count计步器 思路:fir0,end0中找到使其跳的最远的作为end1,fir1=end0+1,依照此规律知道某段firn,endn中的数超过n-1
while end < n-1: #n-1表示从0到len(nums)要完成的最少跳跃步数
count = count + 1
maxend = end + 1
for i in range(fir, end+1):
if i + nums[i] >= n - 1:
return count
maxend = max(i + nums[i], maxend)
fir, end = end + 1 , maxend #下一轮的开始为上一轮while循环的结束
return count
def Jump2(nums): #####以上程序碰到列表中有0的数则可能出现问题
last, cur, step = 0, 0, 0
n = len(nums)
for i in range(n):
if i > last:
step += 1
last = cur
cur = max(cur, i + nums[i])
return step
##############################55###############################
def canJump(nums):
maxend = 0
for i in range(len(nums)): #前序遍历
if i > maxend:
return False
maxend = max(i+nums[i],maxend)
return True
def canJump2(nums):
n = len(nums) -1
goal = nums[n]
for i in range(n,-1,-1): #后序遍历
if i + nums[i] >= goal:
goal = i
return not goal
if __name__ == '__main__':
nums = [0,2,3]
nums2 = [3,2,1,0,4]
print(canJump(nums)) | [
"[email protected]"
] | |
3d0f702ab540285b5e176f6222d8a3dc8c5a96d2 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/network/azure-mgmt-network/generated_samples/network_virtual_appliance_list_by_subscription.py | 175f92d2fd7668c287deb12c365d17aa8ed4f26b | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,527 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python network_virtual_appliance_list_by_subscription.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.network_virtual_appliances.list()
for item in response:
print(item)
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2022-11-01/examples/NetworkVirtualApplianceListBySubscription.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c7e9e9554266187a384876586f55b405a1471944 | 9fd934751ef1d332b792d204df0757ed91f36ef6 | /route66_project/route66/views.py | b3e6cee4b0f257ed94d9ee9aff83c33beaa4838e | [] | no_license | cs-fullstack-2019-spring/django-intro2-routes-cw-cgarciapieto | e55fa7a7b4c9ef317b9c6e3db1a8ba8eaea7602c | f232f3125fce93c03e687940e10b22280ba1d0d0 | refs/heads/master | 2020-04-23T23:57:22.501082 | 2019-02-20T05:04:38 | 2019-02-20T05:04:38 | 171,550,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
# http response function
def goods(request):
return HttpResponse("Here you go, POKEMON FOREVER")
def joy(request):
return HttpResponse("This song drives me nuts")
def index(request):
return HttpResponse("the goods or song of death")
# challenge variable is passed in
def response(request):
return HttpResponse(challenge)
challenge =("I heard you")
| [
"[email protected]"
] | |
189c925a75f59f6b8ae9d4cfeb0648a69fcff9a5 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/388.py | 94106f5f14c7292d2212da973bc6701dbe2060af | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | def solve(s, k):
s = list(s)
k = int(k)
ans = 0
while s:
if s.pop() != '+':
if len(s) < k - 1: return "IMPOSSIBLE"
for i in range(1, k):
s[-i] = '-' if s[-i] == '+' else '+'
ans += 1
return ans
for i in range(1, int(input()) + 1):
print("Case #", i, ": ", solve(*input().split()), sep='')
| [
"[email protected]"
] | |
6498e8c2aa931f3faecf9b2f1027d5f19f53e176 | d24a6e0be809ae3af8bc8daa6dacfc1789d38a84 | /other_contests/PAST2019/I.py | 1fe31e281c0bf9947e3edece5bda7e352ee8a741 | [] | no_license | k-harada/AtCoder | 5d8004ce41c5fc6ad6ef90480ef847eaddeea179 | 02b0a6c92a05c6858b87cb22623ce877c1039f8f | refs/heads/master | 2023-08-21T18:55:53.644331 | 2023-08-05T14:21:25 | 2023-08-05T14:21:25 | 184,904,794 | 9 | 0 | null | 2023-05-22T16:29:18 | 2019-05-04T14:24:18 | Python | UTF-8 | Python | false | false | 1,051 | py | def solve(n, m, s_list, c_list):
dp = [[sum(c_list) + 1] * (2 ** n) for _ in range(m + 1)]
dp[0][0] = 0
for i in range(m):
k = 0
s = s_list[i]
for p in range(n):
if s[- p - 1] == "Y":
k += 2 ** p
c = c_list[i]
for j in range(2 ** n):
dp[i + 1][j] = dp[i][j]
for j in range(2 ** n):
dp[i + 1][j | k] = min(dp[i + 1][j | k], dp[i][j] + c)
# print(dp)
res = dp[m][2 ** n - 1]
if res == sum(c_list) + 1:
return -1
else:
return res
def main():
n, m = map(int, input().split())
s_list = [""] * m
c_list = [0] * m
for i in range(m):
s, c = input().split()
s_list[i] = s
c_list[i] = int(c)
res = solve(n, m, s_list, c_list)
print(res)
def test():
assert solve(3, 4, ["YYY", "YYN", "YNY", "NYY"], [100, 20, 10, 25]) == 30
assert solve(5, 4, ["YNNNN", "NYNNN", "NNYNN", "NNNYN"], [10, 10, 10, 10]) == -1
if __name__ == "__main__":
test()
main()
| [
"[email protected]"
] | |
5fcafa9dc665d308fac15b10dc9dda4e4a8b26f0 | 45b64f620e474ac6d6b2c04fbad2730f67a62b8e | /Varsity-Final-Project-by-Django-master/.history/project/quiz/models_20210424121724.py | e9feec73e98c6b749012c04f069e8911025079ec | [] | no_license | ashimmitra/Final-Project | 99de00b691960e25b1ad05c2c680015a439277e0 | a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003 | refs/heads/master | 2023-04-11T06:12:35.123255 | 2021-04-26T15:41:52 | 2021-04-26T15:41:52 | 361,796,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,603 | py | from django.db import models
# Create your models here.
class Quiz(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Bangla(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Math(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Science(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class GK(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Mat(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Sci(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class GNK(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
| [
"[email protected]"
] | |
1d1d26c8fc8487564347ded0561a2639a59ac73e | 2fc1e3382ae1024b008004ef6302914b492f51e1 | /cos/models/const.py | ce15e48b226aeb7293242c88f466838c589fd636 | [] | no_license | tclh123/COS | abf475b32cb45e6f099508675a8138dd2211e963 | 7e4843fbfe67f7e795eccffc3b48270b152ba438 | refs/heads/master | 2016-09-15T21:43:55.956184 | 2014-01-04T17:46:50 | 2014-01-04T17:46:50 | 15,521,178 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # coding=utf-8
PERM_BASIC = 0b001
PERM_ADMIN = 0b010
PERM_DELIVERY = 0b100
ORDER_STATUS_SUBMIT = 1
ORDER_STATUS_PAY = 2
ORDER_STATUS_WAIT = 3
ORDER_STATUS_DELIVERY = 4
ORDER_STATUS_OVER = 5
# '待买家完善订单后提交'
# '待买家付款'
# '待餐厅处理'
# '待送餐员送餐'
# '订单结束'
PAYMENT = {
u'货到付款': 1,
u'工资支付': 2,
u'网上支付': 3,
}
| [
"[email protected]"
] | |
eea2161b70cf5a3ecfebef0a04cfa64e7e059291 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_corrective.py | fc4041522f6dce91bdc98602bc6f91a6cf0a525b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py |
#calss header
class _CORRECTIVE():
def __init__(self,):
self.name = "CORRECTIVE"
self.definitions = [u'intended to improve a situation: ', u'used to refer to something that is intended to cure a medical condition: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
] | |
9d42ae4909b6084829161c508e2a65ffa42c2b2a | a9e3b2e87563acf39ce74c5e57aa4ad5c13404bf | /cartridge/shop/admin.py | 2cae83122ba67a49a11d3d4dce0a7b0f2428ceaf | [
"BSD-3-Clause"
] | permissive | CDC/cartridge | 40cf531f2912637f9bde261baf6b52e94aed388f | b9229617bbf054f1958bf5f2fadaf523d5d72522 | refs/heads/master | 2021-01-20T23:37:26.759210 | 2011-12-19T23:30:15 | 2011-12-19T23:30:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,718 | py |
from copy import deepcopy
from django.contrib import admin
from django.db.models import ImageField
from django.utils.translation import ugettext_lazy as _
from mezzanine.core.admin import DisplayableAdmin, TabularDynamicInlineAdmin
from mezzanine.pages.admin import PageAdmin
from cartridge.shop.fields import MoneyField
from cartridge.shop.forms import ProductAdminForm, ProductVariationAdminForm
from cartridge.shop.forms import ProductVariationAdminFormset
from cartridge.shop.forms import DiscountAdminForm, ImageWidget, MoneyWidget
from cartridge.shop.models import Category, Product, ProductImage
from cartridge.shop.models import ProductVariation, ProductOption, Order
from cartridge.shop.models import OrderItem, Sale, DiscountCode
# Lists of field names.
option_fields = [f.name for f in ProductVariation.option_fields()]
billing_fields = [f.name for f in Order._meta.fields
if f.name.startswith("billing_detail")]
shipping_fields = [f.name for f in Order._meta.fields
if f.name.startswith("shipping_detail")]
category_fieldsets = deepcopy(PageAdmin.fieldsets)
category_fieldsets[0][1]["fields"][3:3] = ["content"] # , "products"]
category_fieldsets += ((_("Product filters"), {
"fields": ("options", "sale", ("price_min", "price_max"), "combined"),
"classes": ("collapse-closed",)},),)
class CategoryAdmin(PageAdmin):
fieldsets = category_fieldsets
formfield_overrides = {ImageField: {"widget": ImageWidget}}
filter_horizontal = ("options",) # "products", )
class ProductVariationAdmin(admin.TabularInline):
verbose_name_plural = _("Current variations")
model = ProductVariation
fields = ("sku", "default", "num_in_stock", "unit_price", "sale_price",
"sale_from", "sale_to", "image")
extra = 0
formfield_overrides = {MoneyField: {"widget": MoneyWidget}}
form = ProductVariationAdminForm
formset = ProductVariationAdminFormset
class ProductImageAdmin(TabularDynamicInlineAdmin):
model = ProductImage
formfield_overrides = {ImageField: {"widget": ImageWidget}}
product_fieldsets = deepcopy(DisplayableAdmin.fieldsets)
product_fieldsets[0][1]["fields"].extend(["available", "categories",
"content"])
product_fieldsets = list(product_fieldsets)
product_fieldsets.append((_("Other products"),
{"classes": ("collapse-closed",), "fields": ("related_products",
"upsell_products")}))
product_fieldsets.insert(1, (_("Create new variations"),
{"classes": ("create-variations",), "fields": option_fields}))
class ProductAdmin(DisplayableAdmin):
list_display = ("admin_thumb", "title", "status", "available",
"admin_link")
list_display_links = ("admin_thumb", "title")
list_editable = ("status", "available")
list_filter = ("status", "available", "categories")
filter_horizontal = ("categories", "related_products", "upsell_products")
search_fields = ("title", "content", "categories__title",
"variations__sku")
inlines = (ProductImageAdmin, ProductVariationAdmin)
form = ProductAdminForm
fieldsets = product_fieldsets
def save_model(self, request, obj, form, change):
"""
Store the product object for creating variations in save_formset.
"""
super(ProductAdmin, self).save_model(request, obj, form, change)
self._product = obj
def save_formset(self, request, form, formset, change):
"""
Create variations for selected options if they don't exist, manage the
default empty variation creating it if no variations exist or removing
it if multiple variations exist, and copy the pricing and image fields
from the default variation to the product.
"""
super(ProductAdmin, self).save_formset(request, form, formset, change)
if isinstance(formset, ProductVariationAdminFormset):
options = dict([(f, request.POST.getlist(f)) for f in option_fields
if request.POST.getlist(f)])
self._product.variations.create_from_options(options)
self._product.variations.manage_empty()
self._product.copy_default_variation()
class ProductOptionAdmin(admin.ModelAdmin):
ordering = ("type", "name")
list_display = ("type", "name")
list_display_links = ("type",)
list_editable = ("name",)
list_filter = ("type",)
search_fields = ("type", "name")
radio_fields = {"type": admin.HORIZONTAL}
class OrderItemInline(admin.TabularInline):
verbose_name_plural = _("Items")
model = OrderItem
extra = 0
formfield_overrides = {MoneyField: {"widget": MoneyWidget}}
class OrderAdmin(admin.ModelAdmin):
ordering = ("status", "-id")
list_display = ("id", "billing_name", "total", "time", "status",
"transaction_id", "invoice")
list_editable = ("status",)
list_filter = ("status", "time")
list_display_links = ("id", "billing_name",)
search_fields = (["id", "status", "transaction_id"] +
billing_fields + shipping_fields)
date_hierarchy = "time"
radio_fields = {"status": admin.HORIZONTAL}
inlines = (OrderItemInline,)
formfield_overrides = {MoneyField: {"widget": MoneyWidget}}
fieldsets = (
(_("Billing details"), {"fields": (tuple(billing_fields),)}),
(_("Shipping details"), {"fields": (tuple(shipping_fields),)}),
(None, {"fields": ("additional_instructions", ("shipping_total",
"shipping_type"), ("discount_total", "discount_code"),
"item_total", ("total", "status"), "transaction_id")}),
)
class SaleAdmin(admin.ModelAdmin):
list_display = ("title", "active", "discount_deduct", "discount_percent",
"discount_exact", "valid_from", "valid_to")
list_editable = ("active", "discount_deduct", "discount_percent",
"discount_exact", "valid_from", "valid_to")
filter_horizontal = ("categories", "products")
formfield_overrides = {MoneyField: {"widget": MoneyWidget}}
form = DiscountAdminForm
fieldsets = (
(None, {"fields": ("title", "active")}),
(_("Apply to product and/or products in categories"),
{"fields": ("products", "categories")}),
(_("Reduce unit price by"),
{"fields": (("discount_deduct", "discount_percent",
"discount_exact"),)}),
(_("Sale period"), {"fields": (("valid_from", "valid_to"),)}),
)
class DiscountCodeAdmin(admin.ModelAdmin):
list_display = ("title", "active", "code", "discount_deduct",
"discount_percent", "min_purchase", "free_shipping", "valid_from",
"valid_to")
list_editable = ("active", "code", "discount_deduct", "discount_percent",
"min_purchase", "free_shipping", "valid_from", "valid_to")
filter_horizontal = ("categories", "products")
formfield_overrides = {MoneyField: {"widget": MoneyWidget}}
form = DiscountAdminForm
fieldsets = (
(None, {"fields": ("title", "active", "code")}),
(_("Apply to product and/or products in categories"),
{"fields": ("products", "categories")}),
(_("Reduce unit price by"),
{"fields": (("discount_deduct", "discount_percent"),)}),
(None, {"fields": (("min_purchase", "free_shipping"),)}),
(_("Valid for"), {"fields": (("valid_from", "valid_to"),)}),
)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(ProductOption, ProductOptionAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(Sale, SaleAdmin)
admin.site.register(DiscountCode, DiscountCodeAdmin)
| [
"[email protected]"
] | |
c925b2637715a9f2f135c947d06035c815739be7 | 434b6556038ad326ffaa8584a8a91edf8ad5c037 | /DP-1/MinStepsTo1_IterativeDP.py | a3f670d079c5c84019726bd05d7717a5a128e3d0 | [] | no_license | Pranav016/DS-Algo-in-Python | 60702460ad6639dd3e8a1fdc3caf0821b8e0b4c2 | 5557e371ccdf801d78ba123ca83c0dd47b3bdb3b | refs/heads/master | 2023-01-23T08:29:32.186861 | 2020-11-01T17:14:12 | 2020-11-01T17:14:12 | 284,651,382 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | import sys
def minStepsIterative(n):
dp=[-1 for i in range(n+1)]
dp[0]=dp[1]=0
for i in range(2,n+1):
ans1=ans2=ans3=sys.maxsize
if i%3==0:
ans1=dp[i//3]
if i%2==0:
ans2=dp[i//2]
ans3=dp[i-1]
dp[i]=1+min(ans1,ans2,ans3)
return dp[n]
# main
n=int(input())
print(minStepsIterative(n)) | [
"[email protected]"
] | |
7f6ada10257846c167743e85db32815a21168d88 | d24cef73100a0c5d5c275fd0f92493f86d113c62 | /SRC/common/brushstyle.spy | e0783b6eccb843c7e84f0f7042b33858e2133c9c | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | rlinder1/oof3d | 813e2a8acfc89e67c3cf8fdb6af6b2b983b8b8ee | 1fb6764d9d61126bd8ad4025a2ce7487225d736e | refs/heads/master | 2021-01-23T00:40:34.642449 | 2016-09-15T20:51:19 | 2016-09-15T20:51:19 | 92,832,740 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | spy | # -*- python -*-
# $RCSfile: brushstyle.spy,v $
# $Revision: 1.2.18.1 $
# $Author: langer $
# $Date: 2014/09/27 22:33:47 $
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# [email protected].
from ooflib.common import cregisteredclass
from ooflib.common.IO import parameter
from ooflib.common.IO import xmlmenudump
cregisteredclass.registerCClass(BrushStylePtr)
BrushStylePtr.tip = "Brush styles for pixel selection."
BrushStylePtr.discussion = """<para>
Objects of the <classname>BrushStyle</classname> are used as the
<varname>style</varname> parameter in the <xref
linkend='MenuItem:OOF.Graphics_n.Toolbox.Pixel_Select.Brush'/> command
for selecting pixels.
</para>"""
circleReg = cregisteredclass.Registration(
"Circle",
BrushStylePtr,
CircleBrush,
ordering=0,
params=[parameter.FloatParameter('radius', 0,
tip='Radius of the brush in physical units.')],
tip="Brush with a circular profile.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/common/reg/circlebrush.xml')
)
squareReg = cregisteredclass.Registration(
"Square",
BrushStylePtr,
SquareBrush,
ordering=1,
params=[parameter.FloatParameter('size', 0,
tip='Half the side of the brush in physical units.')],
tip="Brush with a square profile.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/common/reg/squarebrush.xml')
)
| [
"[email protected]"
] | |
381da1385d47377eeaa12cedb3eee290fa920879 | 3517bef3bb174fef1f2f6a5edd221d23af2a4a99 | /backend/emma_phillips_3684/urls.py | 4c3f152a16e4c4be9cdae8fe722eacab23a51993 | [] | no_license | crowdbotics-apps/emma-phillips-3684 | e8fe2c0c7e68abda4f199c4cc4396bb2268afffc | 7ee256d6a167d5236800751a478023c48056b162 | refs/heads/master | 2020-05-25T23:38:41.448295 | 2019-05-22T12:55:07 | 2019-05-22T12:55:07 | 188,038,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | """emma_phillips_3684 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
admin.site.site_header = 'Emma Phillips'
admin.site.site_title = 'Emma Phillips Admin Portal'
admin.site.index_title = 'Emma Phillips Admin'
| [
"[email protected]"
] | |
55fc69c3daeea8d2a118a7e02188c932abb24f83 | 8d2a785ffc06ec46a546cdf50af41054a382f05a | /classes/day11/practice/数据库博库/db_con_insert.py | 201679238f0035ffd314aad79db425ae24484c53 | [] | no_license | Pigcanflysohigh/Py27 | 4be0d9ad93f5d695c48fd89157952230ec4d111a | 2f6568fce2a6f09c73cdc08342a8b05645c87736 | refs/heads/master | 2020-06-18T08:09:08.217036 | 2019-11-20T16:14:05 | 2019-11-20T16:14:05 | 196,225,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | import pymysql
conn = pymysql.connect(host='10.211.55.5',user='root',password='root',database='mlg')
cur = conn.cursor()
cur.execute("insert into t8 values('女','rap');")
conn.commit() # 涉及到修改/写入(insert/update/delete)数据库内容的,多需要执行提交操作才能生效
cur.close()
conn.close() | [
"[email protected]"
] | |
3be2fbf38cc96ef463cecd4366ea19e030cca99a | 3c8aaef535328f7c4d812cf086a637b27d891752 | /interview/google/hard/LC327.py | 6e8c0bbf06d684ef7b05a58684d8b05985f7d649 | [] | no_license | zhangshv123/superjump | 9339cd7f5e75d8a94be60d44c752267cc38183d3 | 7de5f69e6e44ca4e74d75fed2af390b3d2cbd2b9 | refs/heads/master | 2020-03-20T20:36:34.378950 | 2019-03-08T04:37:22 | 2019-03-08T04:37:22 | 137,696,605 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | #!/usr/bin/python
"""
First compute the prefix sums: first[m] is the sum of the first m numbers.
Then the sum of any subarray nums[i:k] is simply first[k] - first[i].
So we just need to count those where first[k] - first[i] is in [lower,upper].
To find those pairs, I use mergesort with embedded counting. The pairs in the left half and the pairs in the right half get counted in the recursive calls. We just need to also count the pairs that use both halves.
For each left in first[lo:mid] I find all right in first[mid:hi] so that right - left lies in [lower, upper]. Because the halves are sorted, these fitting right values are a subarray first[i:j]. With increasing left we must also increase right, meaning must we leave out first[i] if it's too small and and we must include first[j] if it's small enough.
Besides the counting, I also need to actually merge the halves for the sorting. I let sorted do that, which uses Timsort and takes linear time to recognize and merge the already sorted halves.
"""
def countRangeSum(self, nums, lower, upper):
first = [0]
for num in nums:
first.append(first[-1] + num)
def sort(lo, hi):
mid = (lo + hi) / 2
if mid == lo:
return 0
count = sort(lo, mid) + sort(mid, hi)
i = j = mid
for left in first[lo:mid]:
while i < hi and first[i] - left < lower: i += 1
while j < hi and first[j] - left <= upper: j += 1
count += j - i
first[lo:hi] = sorted(first[lo:hi])
return count
return sort(0, len(first))
| [
"[email protected]"
] | |
76da49d71b796760d616591e77762c095f4a80b5 | d2f893a95f74b59ec7f073a008d9502c22afb04a | /cwcnn/extend/cuda_functions/round_cuda.py | 8066ff17b0f52d0d7e12a1c8cf7951624e09e703 | [] | no_license | liu-yangyang/CNN-based-Image-Compression-Guided-by-YOLOv2 | bbf03ce26b51e1247b1655c6d8aa909530702339 | 850391525908ca751b832ed882eca3f5eccd819c | refs/heads/master | 2020-03-19T03:07:26.152652 | 2018-05-27T08:23:09 | 2018-05-27T08:23:09 | 135,697,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | import torch as th
from torch.autograd import Variable, Function
#include_path = '/home/snk/Desktop/workspace/pytorch_implement/extend/'
include_path = "/home/zhangwenqiang/jobs/pytorch_implement/extend"
import sys
if include_path not in sys.path:
sys.path.append(include_path)
from round import round_forward_wrapper, round_backward_wrapper
class RoundCudaFunction(Function):
'''
Pytorch Function wrapper of cuda implementation of round layer
'''
def forward(self, x):
y = th.zeros_like(x)
round_forward_wrapper(x, y, x.numel())
return y
def backward(self, grad_y):
grad_x = th.zeros_like(grad_y)
round_backward_wrapper(grad_x, grad_y, grad_y.numel())
return grad_x
class RoundCuda(th.nn.Module):
def forward(self, x):
return RoundCudaFunction()(x)
def test():
inp_tensor = th.Tensor([[1,2],[-1,0]]).cuda()
inp = th.sigmoid(inp_tensor)
x = Variable(inp, requires_grad = True)
round_= RoundCuda()
y = round_(x)
print (x)
print (y)
y.backward(th.cuda.FloatTensor([[1,2],[3,4]]))
print (x.grad)
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
b0efdce4e0bf427b216c46a6e9f000bcc9bf1a57 | ed75b99e824b5724746d72f2d529781eccf8ef0d | /biostar/settings/base.py | b6bc56bfba6910cfc319fb458e4fe765acde772c | [
"MIT"
] | permissive | satra/biostar-central | 6799c4df4d12de1278f60fb2b29623acf8cc7640 | 794c67d2972a4fe700c79841f5f3c0c562352738 | refs/heads/master | 2021-01-12T20:32:14.356389 | 2014-03-20T15:37:27 | 2014-03-20T15:37:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,247 | py | # -*- coding: utf8 -*-
#
# Django settings for biostar project.
#
from __future__ import absolute_import
import os
from django.core.exceptions import ImproperlyConfigured
from .logger import LOGGING
# Turn off debug mode on deployed servers.
DEBUG = True
# Template debug mode.
TEMPLATE_DEBUG = DEBUG
# Should the django compressor be used.
USE_COMPRESSOR = False
# The start categories. These tags have special meaning internally.
START_CATEGORIES = [
"Latest", "Unanswered",
]
# These should be the most frequent (or special) tags on the site.
NAVBAR_TAGS = [
"Assembly", "RNA-Seq", "ChIP-Seq", "SNP", "Galaxy",
]
# The last categories. These tags have special meaning internally.
END_CATEGORIES = [
"Job", "Planet", "Forum",
]
# These are the tags that always show up in the tag recommendation dropdown.
POST_TAG_LIST = NAVBAR_TAGS + ["software error"]
# This will form the navbar
CATEGORIES = START_CATEGORIES + NAVBAR_TAGS + END_CATEGORIES
def get_env(name, func=None):
"""Get the environment variable or return exception"""
try:
if func:
return func(os.environ[name])
else:
return unicode(os.environ[name], encoding="utf-8")
except KeyError:
msg = "*** Required environment variable %s not set." % name
raise ImproperlyConfigured(msg)
def abspath(*args):
"""Generates absolute paths"""
return os.path.abspath(os.path.join(*args))
# Displays debug comments when the server is run from this IP.
INTERNAL_IPS = ('127.0.0.1', )
# Set location relative to the current file directory.
HOME_DIR = get_env("BIOSTAR_HOME")
LIVE_DIR = abspath(HOME_DIR, 'live')
DATABASE_NAME = abspath(LIVE_DIR, get_env("DATABASE_NAME"))
STATIC_DIR = abspath(HOME_DIR, 'biostar', 'static')
TEMPLATE_DIR = abspath(HOME_DIR, 'biostar', 'server', 'templates')
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
EXPORT_DIR = abspath(LIVE_DIR, "export")
STATIC_ROOT = abspath(EXPORT_DIR, "static")
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = abspath(EXPORT_DIR, "media")
# Needs to point to the directory that contains the
# html files that are stored in the flatpages about, faq, help, policy etc.
FLATPAGE_IMPORT_DIR = abspath(HOME_DIR, "import", "pages")
# Default search index location.
WHOOSH_INDEX = abspath(LIVE_DIR, "whoosh_index")
# These settings create an admin user.
# The default password is the SECRET_KEY.
ADMIN_NAME = get_env("BIOSTAR_ADMIN_NAME")
ADMIN_EMAIL = get_env("BIOSTAR_ADMIN_EMAIL")
ADMINS = (
(ADMIN_NAME, ADMIN_EMAIL),
)
# Get the secret key from the environment.
SECRET_KEY = get_env("SECRET_KEY")
MANAGERS = ADMINS
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DATABASE_NAME,
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# admin site may fail if this setting is active
TEMPLATE_STRING_IF_INVALID = "*** MISSING ***"
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", get_env("BIOSTAR_HOSTNAME")]
ATOMIC_REQUESTS = True
CONN_MAX_AGE = 10;
# Allowed html content.
ALLOWED_TAGS = "p div br code pre h1 h2 h3 h4 hr span s sub sup b i img strong strike em underline super table thead tr th td tbody".split()
ALLOWED_STYLES = 'color font-weight background-color'.split()
ALLOWED_ATTRIBUTES = {
'*': ['class', 'style'],
'a': ['href', 'rel'],
'img': ['src', 'alt'],
'table': ['border', 'cellpadding', 'cellspacing'],
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# These parameters will be inserted into the database automatically.
SITE_ID = 1
SITE_NAME = "localhost"
SITE_DOMAIN = get_env("BIOSTAR_HOSTNAME")
SERVER_EMAIL = DEFAULT_FROM_EMAIL = get_env("DEFAULT_FROM_EMAIL")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/static/upload/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Use absolute paths, not relative paths.
STATIC_DIR,
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'biostar.server.middleware.Visit',
)
ROOT_URLCONF = 'biostar.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'biostar.wsgi.application'
TEMPLATE_DIRS = (
TEMPLATE_DIR,
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
LOGIN_REDIRECT_URL = "/"
MESSAGE_TAGS = {
10: 'alert-info', 20: 'alert-info',
25: 'alert-success', 30: 'alert-warning', 40: 'alert-danger',
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
# 'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
# The javascript and CSS asset manager.
'compressor',
# Enabling the admin and its documentation.
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.messages',
'django.contrib.humanize',
'django.contrib.flatpages',
'django.contrib.sessions',
# Biostar specific apps.
'biostar.apps.users',
'biostar.apps.util',
'biostar.apps.posts',
'biostar.apps.messages',
'biostar.apps.badges',
# The main Biostar server.
'biostar.server',
# Social login handlers.
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.persona',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.twitter',
'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.linkedin',
#'allauth.socialaccount.providers.weibo',
# External apps.
'haystack',
'crispy_forms',
'djcelery',
'kombu.transport.django',
'south',
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
AUTH_USER_MODEL = 'users.User'
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Default search is provided via Whoosh
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': WHOOSH_INDEX,
},
}
TEMPLATE_CONTEXT_PROCESSORS = (
# Django specific context processors.
"django.core.context_processors.debug",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
# Social authorization specific context.
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
# Biostar specific context.
'biostar.server.context.shortcuts',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
"allauth.account.auth_backends.AuthenticationBackend",
)
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
# Should the captcha be shown on the signup page.
CAPTCHA = True
# For how long does a user need to be a member to become trusted.
TRUST_RANGE_DAYS = 7
# Votes needed to start trusting the user
TRUST_VOTE_COUNT = 5
# How many non top level posts per day for users.
MAX_POSTS_NEW_USER = 5
MAX_POSTS_TRUSTED_USER = 30
# How many top level posts per day for a new user.
MAX_TOP_POSTS_NEW_USER = 1
MAX_TOP_POSTS_TRUSTED_USER = 5
# Customize this to match the providers listed in the APPs
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email'],
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
'METHOD': 'oauth2',
'LOCALE_FUNC': lambda x: 'en_US',
'PROVIDER_KEY': get_env("FACEBOOK_PROVIDER_KEY"),
'PROVIDER_SECRET_KEY': get_env("FACEBOOK_PROVIDER_SECRET_KEY"),
},
'twitter': {
'SCOPE': ['email'],
'PROVIDER_KEY': get_env("TWITTER_PROVIDER_KEY"),
'PROVIDER_SECRET_KEY': get_env("TWITTER_PROVIDER_SECRET_KEY"),
},
'persona': {
'REQUEST_PARAMETERS': {'siteName': 'Biostar'}
},
'google': {
'SCOPE': ['email', 'https://www.googleapis.com/auth/userinfo.profile'],
'AUTH_PARAMS': {'access_type': 'online'},
'PROVIDER_KEY': get_env("GOOGLE_PROVIDER_KEY"),
'PROVIDER_SECRET_KEY': get_env("GOOGLE_PROVIDER_SECRET_KEY"),
},
}
# The google id will injected as a template variable.
GOOGLE_TRACKER = ""
GOOGLE_DOMAIN = ""
# The site logo.
SITE_LOGO = "biostar2.logo.png"
# The default CSS file to load.
SITE_STYLE_CSS = "biostar.style.less"
# Set it to None if all posts should be accesible via the Latest tab.
SITE_LATEST_POST_LIMIT = None
# How many recent objects to show in the sidebar.
RECENT_VOTE_COUNT = 10
RECENT_USER_COUNT = 10
RECENT_POST_COUNT = 10
# Time between two accesses from the same IP to qualify as a different view.
POST_VIEW_MINUTES = 5
# Default expiration in seconds.
CACHE_TIMEOUT = 60
# Should the messages go to email by default?
DEFAULT_EMAIL_ON = False
# Django precompressor settings.
COMPRESS_PRECOMPILERS = (
('text/coffeescript', 'coffee --compile --stdio'),
('text/less', 'lessc {infile} {outfile}'),
)
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': STATIC_URL,
'SITE_STYLE_CSS': SITE_STYLE_CSS,
}
# The cache mechanism is deployment dependent. Override it externally.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache' if DEBUG else 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake'
}
}
# The celery configuration file
CELERY_CONFIG = 'biostar.celeryconfig'
# Setting a cookie with email:signed_hash(email)
# will automatically create accounts
EXTERNAL_AUTH = [
("foo.bar.com", "ABC"),
]
# Set these to redirect login to an external site.
EXTERNAL_LOGIN_URL = None
EXTERNAL_SIGNUP_URL = None
EXTERNAL_LOGOUT_URL = None
# How far to look for posts for anonymous users.
COUNT_INTERVAL_WEEKS = 10000
# How frequently do we update the counts for authenticated users.
SESSION_UPDATE_SECONDS = 2 * 60
# The number of posts to show per page.
PAGINATE_BY = 25
# Used by crispyforms.
#CRISPY_FAIL_SILENTLY = not DEBUG
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_EMAIL_SUBJECT_PREFIX = "[biostar] "
ACCOUNT_PASSWORD_MIN_LENGHT = 6
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_USER_MODEL_EMAIL_FIELD = "email"
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "http"
#ACCOUNT_LOGOUT_ON_GET = True
# Session specific settings.
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
SESSION_KEY = "session"
# Use a mock email backend for development.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# On deployed servers the following must be set.
EMAIL_HOST = get_env("EMAIL_HOST")
EMAIL_PORT = get_env("EMAIL_PORT", func=int)
EMAIL_HOST_USER = get_env("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = get_env("EMAIL_HOST_PASSWORD")
| [
"[email protected]"
] | |
9acd73bb18291d9817ce151961855002e16b1075 | fdca7a2818602fa40b0848a15f630afb68a2ec13 | /page/search.py | 163526ad1ac64c5ba863746b6dc9ed85152a1222 | [] | no_license | sisul1204/appium_zhuangshiqi | 0bded9307c92f57749ad6eb514431b0348deaebc | c457d267fee86ee0f516e3cbab25afd514a7c7fc | refs/heads/main | 2023-01-07T07:05:31.420987 | 2020-11-13T03:47:48 | 2020-11-13T03:47:48 | 312,163,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # * coding:utf-8 *
# Author:sisul
#创建时间:2020/11/11 17:11
import yaml
from selenium.webdriver.common.by import By
from page.base_page import BasePage
class Search(BasePage):
def search(self, name):
self._params['name'] = name
self.steps('../page/search.yaml')
def add(self, name):
self._params['name'] = name
self.steps('../page/search.yaml')
def is_choose(self, name):
self._params['name'] = name
return self.steps('../page/search.yaml')
def reset(self, name):
self._params['name'] = name
self.steps('../page/search.yaml')
| [
"[email protected]"
] | |
d22d9284208ebd8f92edf3f7139fd34bf723d63a | 6bf1b595a7f4d3cbf0995455869d438a7d0e0624 | /lingvo/core/scatter_update.py | 4be545fde721c2587c20f8f3dff3ccfb2a8f9048 | [
"Apache-2.0"
] | permissive | huaxz1986/lingvo | 889abc82b1bab6f37ba861c41eb480b7e89362c0 | b83984577610423e3b1c6b04ca248cd23f2842f7 | refs/heads/master | 2022-05-15T03:29:56.903688 | 2022-04-02T01:41:25 | 2022-04-02T01:41:25 | 173,536,461 | 1 | 0 | Apache-2.0 | 2019-03-03T05:52:01 | 2019-03-03T05:52:01 | null | UTF-8 | Python | false | false | 2,341 | py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for scatter updates."""
import contextlib
import lingvo.compat as tf
from lingvo.core import py_utils
from lingvo.core import thread_local_utils
_global_inplace_update_stack = thread_local_utils.ThreadLocalStack()
@contextlib.contextmanager
def SetInplaceUpdate(inplace_update):
_global_inplace_update_stack.stack.append(inplace_update)
try:
yield
finally:
_global_inplace_update_stack.stack.pop()
def UseInplaceUpdate():
if not _global_inplace_update_stack.stack:
# TODO(rpang): set the default value to False in a follow-up CL.
return True
return _global_inplace_update_stack.stack[-1]
def Update(x, i, v, *, inplace_update=None):
"""Performs scatter update: x[i] = v.
A drop-in replacement for inplace_ops.alias_inplace_update (
aka tf.InplaceUpdate).
Args:
x: the source tensor.
i: the index tensor. If None, do x = v. If a scalar, do x[i, ...] = v. If a
vector, do x[j, ...] = v[j, ...] for j in i.
v: the update value tensor.
inplace_update: whether to perform inplace updates. If None, follows the
current context set by SetInplaceUpdate.
Returns:
The updated tensor.
"""
if inplace_update is None:
inplace_update = UseInplaceUpdate()
if inplace_update:
return tf.InplaceUpdate(x, i, v)
if i is None:
return py_utils.HasShape(v, tf.shape(x))
i = tf.convert_to_tensor(i)
assert i.shape, i
assert i.shape.rank in (0, 1), i
if i.shape.rank == 0:
y = tf.concat([x[:i, ...], v[None, ...], x[i + 1:, ...]], axis=0)
y.set_shape(x.shape)
return y
return tf.tensor_scatter_nd_update(x, i[:, None], v)
| [
"[email protected]"
] | |
5493a9b3dcefaa7349c665b1c55edd28129ad453 | 05805ab879654cdcf61df3653847f435b624dc77 | /Dictator_service/bin_gui/driver_meta.py | 5daf43424c05f36e16f446af2eb71205a02c3602 | [] | no_license | Wuwqhnsya/Dictator | 3d57db6bc0138464884ddc9fe7378907ab86e3ef | 45388fec03a4acdac3620611b3bccfa3c991d65f | refs/heads/master | 2020-04-28T21:57:39.309165 | 2019-01-28T19:10:28 | 2019-01-28T19:10:28 | 175,600,478 | 1 | 0 | null | 2019-03-14T10:34:02 | 2019-03-14T10:34:02 | null | UTF-8 | Python | false | false | 33,830 | py | import json
import time
import sys
import msfrpc
import auto_commands
import psutil
import MySQLdb
#import MySQLdb
import threading
import subprocess
import logging
import logging.handlers
import threading
import Auto_logger
import json
import IPexploits
import commands,os
import texttable as tt
import csv
import os
r = '\033[31m' #red
b = '\033[34m' #blue
g = '\033[32m' #green
y = '\033[33m' #yellow
m = '\033[34m' #magenta
c = '\033[36m' #magenta
p = '\033[95m' #purple
e = '\033[0m' #end
lr= '\033[91m'#Light red
#print "Object created"
class Driver:
def __init__(self):
self.con=None
self.cursor=None
self.logger=None
self.Log_file=None
self.project_id="Default"
self.lock = threading.Lock()
self.Auto_logger=Auto_logger.Logger()
self.commandObj=auto_commands.Commands()
self.config={}
self.config_file={}
self.rows=[]
self.method_id="INIT"
self.processed_services=None
self.commandsJson=None
self.IPexploits=[]
self.IPexploit=IPexploits.IPexploits()
self.missed_services=None
self.new_and_unknown=[]
self.data_path=""
self.parent_folder="Results_and_Reports"
self.folder_name=os.path.join("Results","Data_")
def init_connection(self):
try:
self.method_id="Init_connection()"
self.con=MySQLdb.connect("localhost","<USER>","<PASSWORD>","nmapscan")
self.cursor = self.con.cursor()
except Exception,ee:
self.print_Error("EXception in connection-->"+str(ee))
def close_connection(self):
try:
self.method_id="Close_connection()"
self.con.close()
except Exception, ee:
self.print_Error("EXception in connection-->"+str(ee))
def parse_and_process(self): #note make an entry for service of type unknown in json file and its type would be custom
try:
self.method_id="parse_and_process()"
self.print_Log("Starting method --> "+self.method_id)
self.rows=[]
self.new_and_unknown=[]
self.IPexploits=[]
if (self.missed_services): #check is not none --it returns false for empty isits
print "Missed services does contain data !!!"
for k,v in self.missed_services.iteritems():
entries={}
entry={}
service_status='unknown'
#print "Missed service is "+str(k)
if (k=='unknown'):
service_status='unknown'
entry["unknown"]=True
entry["new"]=False
#entry["echo"]=False
elif(k !=""):
service_status='new'
entry["unknown"]=False
entry["new"]=True
#entry["echo"]=False
if entry:
entries["Entries"]=entry
entries=json.dumps(entries)
else:
entries["Entries"]={"unknown":False,"new":False}
entries=json.dumps(entries)
for h_p in v:
#print "Appending -->Host-->"+str(h_p[0]) +"Port "+str(h_p[1]) +"Entries :" +str(entries)
self.rows.append((self.project_id,str(h_p[0]),str(h_p[1]),str(k),'init',entries,service_status))
self.IPexploits.append(IPexploits.IPexploits(self.project_id,str(h_p[0]),str(h_p[1]),str(k),'init',entries,service_status))
if (self.processed_services): #dict form of services that are discovered by nmap in dict fom
#print "1000"
#print "---->" +str(self.processed_services)
for k,v in self.processed_services.iteritems():#would always have common services-May also contain custom services
#print str(k)
#print "bye"
entries={}
commands_and_exploits={}
row=[]
service_val=self.commandsJson.get(k) # k would be service and would act as key for commandsjson
#all_commands=service_val.get('Commands') #commands is list of dictionaries
is_custom=service_val.get('Custom')
#print "here reached"
if(is_custom==False):
entries=self.getTemplate(k)
#print "entries are -->" +str(entries)
if(entries != -1):
"""if all_commands:
#print "here reached also\n"
for entry in all_commands :
if entry:
method_name=entry.get('method')
command_id=entry.get('id')
commands_and_exploits[command_id]=[False,"0","0"]
entries["Entries"]=commands_and_exploits
entries=json.dumps(entries)"""
#print "here reached also 1.2\n
for h_p in v:
self.rows.append((self.project_id,str(h_p[0]),str(h_p[1]),str(k),'init',entries,'existing'))
self.IPexploits.append(IPexploits.IPexploits(self.project_id,str(h_p[0]),str(h_p[1]),str(k),'init',entries,'existing'))
self.config[k]=row
else:
print "Error entry -1 for key -- Does not support recursive classes:"+str(k)
self.print_Error("Entry error (returns -1) for key "+str(k))
elif(is_custom==True):
all_commands=service_val.get('Commands')
if all_commands:
for entry in all_commands : #each command entry will pint to a custom class
if (entry):
entries=self.getTemplate(entry)
if(entries != -1):
for h_p in v:
#self.rows.append((self.project_id,str(h_p[0]),str(h_p[1]),str(k),'init',entries,'existing'))
self.rows.append((self.project_id,str(h_p[0]),str(h_p[1]),str(entry),'init',entries,'existing'))
self.IPexploits.append(IPexploits.IPexploits(self.project_id,str(h_p[0]),str(h_p[1]),str(entry),'init',entries,'existing'))
self.config[k]=row
if self.rows:
#print "\n\n\nrows are \n\n"
#print str(self.rows)
#print "1"
#self.makeBulkEntries(self.rows)
self.IPexploit.insertIPexploits(self.rows)
print "\n"
print r+"{+}______________Launching with selected configuration !!!__________________"+e
self.launchConfiguration()
else :
print "\n"+g+"No Common service and no unknown or new service discovered !!"+e
#self.launchConfiguration()
except Exception, ee:
self.print_Error("EXception -->"+str(ee))
def DrawTable(self,records,header=[],col_width=[]):
tab = tt.Texttable()
x = [[]]
for row in records:
x.append([str(row[0]),str(row[1]),str(row[2]),str(row[3]),str(row[4]),str(row[7])])
tab.add_rows(x)
tab.set_cols_align(['r','r','r','r','r','r'])
if (header):
tab.header(header)
else:
tab.header(['ID','PROJECT_Id','HOST','PORT','SERVICE','SERVICE TYPE'])
if (col_width):
tab.set_cols_width(col_width)
print tab.draw()
def getTemplate(self,service,reconfig=False):
#print "\n\nObtaining template\n\n "
entries={}
commands_and_exploits={}
row=[]
service_val=self.commandsJson.get(service)
if(service_val):
all_commands=service_val.get('Commands')
if all_commands:
for entry in all_commands :
if entry:
method_name=entry.get('method')
command_id=entry.get('id')
commands_and_exploits[command_id]=[False,"0","0"]
else:
return -1
entries["Entries"]=commands_and_exploits
entries=json.dumps(entries)
return entries
else:
return -1
else :
if(reconfig==True):
print r+"[*] Invalid choice Enter a valid service class as per master json "
return -1
def InsertAdditionalServices(self,unKnownServices,id_list):
self.method_id="InsertAdditionalServices()"
self.print_Log("Started method InsertAdditionalServices()")
while (1):
pass_check=True
try:
choice=raw_input( "\n\n"+y +">Press 1 to add additional test case and press 2 to proceed"+e)
if (choice =="2"):
break
elif (choice=="1"):
print b +"\n>Enter Host port and service in single line seperated by comma "+e
print y +"[+] Eg: 192.168.179.136,80,ssh \n"+e
entry=raw_input(y+">")
line=entry.split(',')
if (len(line) !=3):
print "\n" +r+"[+] Invalid Choice "+e
continue
#(Pid,Host,Port,Service,Project_status,Exploits)
ip=str(line[0])
ip_chk=ip.split('.')
if(len(ip_chk) < 2) :
pass_check=False
print "\n"+r+"[*]-Invalid Host "+e
continue;
if((str(line[1]).isdigit())==False):
pass_check=False
print "\n"+r+"[*]-Invalid PORT"+e
continue
service_val=self.commandsJson.get(str(line[2]))
if (not service_val):
print "\n"+r+"[*]--------Invalid SERVICE"+e
continue
all_commands=service_val.get('Commands')
is_custom=service_val.get('Custom')
if (is_custom==False):
json_template=self.getTemplate(line[2],True)
if (json_template ==-1):
pass_check=False
print "\n"+r+"[*]-Invalid SERVICE"+e
continue
if(pass_check==True):
print b+"json template--> " +str(json_template)
if (json_template !=-1):
row=(int(self.project_id),line[0],line[1],line[2],'init',json_template,'existing')
self.IPexploit.insertIPexploits(row,True)
print "\n"+y+"[+]The reconfiguration has been saved "+e
else:
print "\n"+r+"[*] Service class invalid "+e
else:
print "\n\n"+g+"[*]**********"+r+"Correct the errors and reenter"+g+"*********"+e+"\n\n"
elif (is_custom==True):
if all_commands:
for entry in all_commands : #each command entry will point to a custom class
if (entry):
json_template=self.getTemplate(entry,True)
if (json_template ==-1):
pass_check=False
print "\n"+r+"[*]-Invalid SERVICE"+e
continue
if(pass_check==True):
print b+"json template--> " +str(json_template)
if (json_template !=-1):
row=(int(self.project_id),line[0],line[1],str(entry),'init',json_template,'existing')
self.IPexploit.insertIPexploits(row,True)
print "\n"+y+"[+]The reconfiguration has been saved "+e
else:
print "\n"+r+"[*] Service class invalid "+e
else:
print "\n\n"+g+"[*]**********"+r+"Correct the errors and reenter"+g+"*********"+e+"\n\n"
else:
print "\n\n"+g+"[*] **Some issue with master json..Contains no entry for this service for commands"+e
else:
print "\n\n"+g+"[*] **Some issue with master json..Commands key missing"+e
else:
print "\n\n"+g+"[*] **Some issue with master json..Custom flag not set"+e
except Exception ,ee:
print "Exception occured :" +str(ee)
self.print_Error("Exception occured "+str(ee))
self.method_id="InsertAdditionalServices()"
self.print_Log("Stopped method InsertAdditionalServices()")
def UpdateUnknownServices(self,unKnownServices,id_list,unknownservice_json):
self.method_id="UpdateUnknownServices()"
self.print_Log("Started method UpdateUnknownServices")
if (unKnownServices):
update_entries=[]
invalid=False
while (1):
try:
invalid=False
reconfig=False
choice=raw_input("\n"+b +">Press 1 to reconfigure press 2 to Launch Tests"+e)
if (choice =="2"):
break
elif(choice=="1"):
rec_id=raw_input( b +"Enter the Id of the record to reconfigure "+e)
if rec_id in id_list:
pass_check_=True
reconfig=True
update_entry={}
update_entry["id"]=str(rec_id)
inp=raw_input("Enter 1 to reconfigiure service and 2 to reconfigure all <host,port and service> ")
if(inp=="1"):
print y+"You may reffer to servics.txt file present in the parent folder to see the list of services currently supported"+e
service_name=raw_input ("\n\n"+b +"Enter new service for the record id to be updated \n"+e)
print "chosn service -->"+str(service_name)
service_val=self.commandsJson.get(service_name)
print "service_val is :"+str(service_val)
if (not service_val):
print "\n"+r+"[*]-------Invalid SERVICE--------------"+e
pass_check_=False
continue
all_commands=service_val.get('Commands') #commands is list of dictionaries
is_custom=service_val.get('Custom')
if(is_custom==False):
json_template=self.getTemplate(service_name,True) #this service would be added by user and
if (json_template ==-1):
pass_check_=False
print "\n"+r+"[*]-Invalid SERVICE"+e
continue
if( (pass_check_==True)):
update_entry["service"]=service_name.lstrip().rstrip()
update_entry["pid"]=str(self.project_id)
print "\n\n[+]Updating the record!!"
self.IPexploit.Update_Reconfig(update_entry["id"],update_entry["pid"],'','',update_entry["service"],'existing',json_template,True)
print "\n\n"+g+"[+]Record Updated!!"+e
elif((is_custom==True) and (all_commands)):
print r+"\n\n[+]You have selected a custom class option.A custom class can be configured by selecting <configure all> option from the last menu.KIndly set custom service from there "+e
continue
elif(inp=="2"):
print b +"Enter host port and service in single line seperated by comma "+e
print y +"Eg: 192.168.179.136,80,ssh "+e
entry=raw_input(y+">")
line=entry.split(',')
if (len(line) !=3):
print "\n" +r+"[+] Invalid Choice "+e
continue
ip=str(line[0])
ip_chk=ip.split('.')
if(len(ip_chk) < 2) :
pass_check_=False
print "\n"+r+"[*]-Invalid Host "+e
continue
if((str(line[1]).isdigit())==False):
pass_check_=False
print "\n"+r+"[*]-Invalid PORT"+e
continue
service_val=self.commandsJson.get(str(line[2]))
print "The service val is -->"+str(service_val)
if (not service_val):
print "\n"+r+"[*]-------Invalid SERVICE--------------"+e
pass_check=False
continue
all_commands=service_val.get('Commands') #commands is list of dictionaries
is_custom=service_val.get('Custom')
if(is_custom==False):
json_template=self.getTemplate(line[2],True) #this service would be added by user and
if (json_template ==-1):
pass_check_=False
print "\n"+r+"[*]-Invalid SERVICE"+e
continue
if((reconfig) and (not invalid) and (pass_check_==True)):
update_entry["host"]=str(line[0]).lstrip().rstrip()
update_entry["port"]=str(line[1]).lstrip().rstrip()
#check weather the service added is there in the master json
update_entry["service"]=str(line[2]).lstrip().rstrip()
update_entry["pid"]=str(self.project_id)
print "\n\n[+]Updating the record!!"
self.IPexploit.Update_Reconfig(update_entry["id"],update_entry["pid"],update_entry["host"],update_entry["port"],update_entry["service"],'existing',json_template)
print "\n\n"+g+"[+]Record Updated!!"+e
elif((is_custom==True) and (all_commands)):
insert_entries=[]
made_insertion=False
parent_service =unknownservice_json.get(str(update_entry["id"]))
print r+"[+]Parent service to be updated is -->"+str(parent_service)+e
for entry in all_commands : #each command entry will point to a custom class
if (entry):
json_template=self.getTemplate(entry,True)
if (json_template ==-1):
pass_check_=False
print "\n"+r+"[*]-Invalid SERVICE"+e
continue
if((reconfig) and (not invalid) and (pass_check_==True)):
update_entry["host"]=str(line[0]).lstrip().rstrip()
update_entry["port"]=str(line[1]).lstrip().rstrip()
#check weather the service added is there in the master json
#update_entry["service"]=parent_service
update_entry["service"]=entry
update_entry["pid"]=str(self.project_id)
print "\n\n[+]Updating the record!!"
row=(int(self.project_id),update_entry["host"],update_entry["port"],update_entry["service"],'update',json_template,'existing')
self.IPexploit.insertIPexploits(row,True)
made_insertion=True
print "\n\n"+g+"[+]Record Updated!!"+e
if(made_insertion):
self.IPexploit.removeIPexploit(int(update_entry["id"]))
self.print_Log("Details updated for custom added service !!")
print "\n\n"+g+"[+] Details updated Successfully for current service " +e
else:
print r+"\n[+] INvalid choice \n"+e
continue
else:
print r +"[*][*]In valid Id-->Enter a valid ID\n" +e
#invalid=True
continue
except Exception ,ee:
self.print_Error("Exception in Update unknown services --" +str(ee))
print ("Exception in update unknown services --"+str(ee))
else :
print g+"\n[+] No UNknown services were detected"+e
self.method_id="UpdateUnknownServices()"
self.print_Log("Stopped method UpdateUnknownServices")
def reConfigure (self):
try:
self.method_id="Reconfigure()"
self.print_Log("Started method Reconfigure")
unKnownServices=self.IPexploit.getUnknownServices(self.project_id)
id_list=[]
repeat=1
unknownservice_json={}
if unKnownServices:
for entry in unKnownServices:
id_list.append(str(entry[0])) #the one's haveing service type as unknown
unknownservice_json[str(entry[0])]=str(entry[4])
print y +"[+]" + "Discovered some unknown and new services--Configure them or exploits woould not be launched against them" +e
print "\n"
self.DrawTable(unKnownServices)
self.UpdateUnknownServices(unKnownServices,id_list,unknownservice_json)
self.InsertAdditionalServices(unKnownServices,id_list)
#print "Press 1 to launch exploits and 2 change master file and exit :"
choice="0"
while(1):
choice=raw_input("\n"+g+"[+]Press 1 see the updated configuration and launch exploits and 2 change master file and exit :\n"+e)
if((choice=="1") or(choice=="2")):
break
else:
print "\n"+r+"[*] Choice invalid \n"+e
self.method_id="Reconfigure()"
self.print_Log("Ending method Reconfigure()")
if (choice =="1"):
self.launchConfiguration(True)
#self.launchExploits()
#self.print_Log("Ended method Reconfigure")
else :
return
except Exception,ee:
self.print_Error("Error occured :" +str(ee))
def makeConfigurationFile(self):
config_file=str(self.project_id)+"Config.json"
config_file_path = os.path.join(self.data_path, config_file)
with open(config_file_path, 'w') as outfile:
json.dump(self.config_file, outfile, indent = 2,ensure_ascii=False)
def launchConfiguration(self,make_config=False):
try:
print "\n"+g+"[+] Launching configuration ...."+e
#self.init_connection()
self.method_id="launchConfiguration()"
self.print_Log("Starting method --> "+self.method_id +"Project id --> "+self.project_id)
id_=int(self.project_id)
IPexploits=self.IPexploit.getIpExploits(self.project_id)
IPexploits_and_commands=[]
list_row=[]
config_list=[]
tab_draw=[]
for row in IPexploits: #row is of type tuple whic is read only
#print str(row[4])
commands=self.getCommands(row[4],row[2],row[3])#x.append([str(row[0]),str(row[1])])
#print" commands got are :" +str(commands)
list_row.append((row[0],row[1],row[2],row[3],row[4],row[5],commands))
tab_draw.append((row[0],row[1],row[2],row[3],row[4],row[5],'',commands))
#print tab.draw()
header=[]
header=['ID','PROJECT_Id','HOST','PORT','SERVICE','Commands']
col_width=[5,5,15,5,7,40]
#self.DrawTable(tab_draw,header,col_width)
for row in list_row:
config_entry={}
print "\n"+ lr +"######################################################################################"+e
#print str(row)
print ("\n"+g+"[+]Project id : "+y+str(row[1])+g+" [+] Host : "+y+ str(row[2])+g+" [+] Port : "+y+str(row[3]) +g+" [+] Service : "+y+str(row[4])+e)
#print "Commands :"
command_data=row[6]
config_entry["id"]=str(row[0])
config_entry["Project_id"]=str(row[1])
config_entry["Host"]=str(row[2])
config_entry["Port"]=str(row[3])
config_entry["Service"]=str(row[4])
config_entry["IsCustom"]=False
config_entry["IsModified"]=False
command_list=[]
print "\n"
for k in command_data:
id_=k.get("id")
command_list.append(id_)
print b+"*************************************************"+e
print r+"Command id :-->"+y+str(id_)+e
args=k.get('args')
print r+"Commands :"+e
for aur in args:
if isinstance(aur, basestring):
aur=aur.replace('\n','')
print str(aur)
print b+"*************************************************"+e
#print "\n"
print "\n"+ lr +"######################################################################################"+e
config_entry["Commands"]=command_list
config_list.append(config_entry)
self.config_file["Records"]=config_list
if(make_config==True):
self.makeConfigurationFile()
print y+"\n\n[+] The above configuration has been selected :Press 1 tolaunch the tests ,2 to reconfigure !!!"+e
choice="0"
while (1):
choice =raw_input(b+"\n>Please enter your choice\n "+e)
if((choice=="1") or (choice=="2")):
break;
else:
print "\n" + r +"[+] Invalid choice " +e
if (choice =="1"):
self.launchExploits()
else :
self.reConfigure()
except Exception ,ee:
self.print_Error("EXception 11-->"+str(ee))
def getCommands(self,k,host,port):
try:
# "In get commands"
#print str(k)
service_val=self.commandsJson.get(k)
#print "Got commands"
#print str(service_val)
all_commands=service_val.get('Commands')
#print "here"
arg_list=[]
#arg_list.append(1)
for arg in all_commands :
#print str(args)
if isinstance(arg, basestring):
arg=arg.replace("<host>",host)
arg=arg.replace("<port>",port)
arg_list.append(arg)
return arg_list
except Exception, ee:
self.print_Error("EXception -22->"+str(ee))
return -1
def set_log_file(self):
self.Log_file=str(self.project_id) +str("_Log_file_info.txt")
print "\n\n\nData path is -->"+str(self.data_path)
self.Log_file_path = os.path.join(self.data_path, self.Log_file)
print "Log file is --> " +str(self.Log_file)+"and log file path is : "+str(self.Log_file_path)
print "\n@@@@\n"
#self.Log_file=str(self.project_id) +str("_Log_file_info")
self.logger=self.Auto_logger.configureLoggerInfo(self.method_id,self.Log_file_path)
self.print_Log("\n\nStarting \n\n")
time.sleep(3)
print "hello !!! Logger is set"
def init_project_directory(self):
print "Initialising parent directory "
try:
if not os.path.exists(self.folder_name+str(self.project_id)):
print "Making directory !!"
#self.print_Log("Making project directory !")
os.mkdir(self.folder_name+str(self.project_id))
self.data_path=self.folder_name+str(self.project_id)
return 1;
except Exception ,ee:
#self.print_Error("Error while creating directory !!"+str(ee))
print "EX "+str(ee)
return -1
def main(self):
try:
self.method_id="Main()"
print r+"List of Project with IDs"+e +"\n"
tab = tt.Texttable()
x = [[]]
self.init_connection()
result = self.cursor.execute("SELECT id, projects from project where project_status='complete'")
result=self.cursor.fetchall()
valid_projects=[]
for row in result:
x.append([str(row[0]),str(row[1])])
valid_projects.append(str(row[0]))
tab.add_rows(x)
tab.set_cols_align(['r','r'])
tab.header(['IDs','PROJECT_NAME'])
print tab.draw()
print "\n"
while 1:
id = raw_input(b+"[+]Enter The Project Id For Scanning :\n>"+e)
reenter=False
if id in valid_projects:
#print "yes"
check_status=self.IPexploit.Exists(id)
#print "here"
print check_status
if (check_status ==1):
print y+"[+] It seems ,you have alreday launched exploits for this project .\n[+]Proceeding further would overwrie old logs."+e
while(1):
ch=raw_input(b+"[+]Press 1 to Proceed 2 to Re enter.\n"+e)
if ch=="1":
self.IPexploit.removeIPexploit(id,all_=True)
break
elif ch=="2":
reenter=True
break
if (reenter==False):
break
else:
print r+"[+] Invalid project id.Please select an id from the provided list "+e
print "\n"
self.project_id=id
print "Removed !!"
#print "-1"
status=self.init_project_directory()
print "INitialised"
if (status==-1):
print("some error occured while creating the directory\nExiting...")
return
self.set_log_file()
self.IPexploit.data_path=self.data_path
self.IPexploit.logger=self.logger
self.commandObj.project_id=self.project_id
self.commandObj.data_path=self.data_path
self.commandObj.set_log_file()
self.commandObj.logger_info=self.logger
self.print_Log("\n\n\nWelcome STARTING MAIN METHOD OF DRIVER FILE FOR PROJECT ID --> " +str(id))
lst1 = []
###very importent -->check here weather the selected id from user actually falls under completed projects"
id_=int(id)
result_ = self.cursor.execute("SELECT Sevices_detected from IPtable_history where project=%s and Sevices_detected is not null",(id_,))
#print "Byee!!"
result_=self.cursor.fetchall()
print "Hello"
for row in result_:
if row[0] is not None:
string = str(row[0])
s = string.split("\n")
for k in s:
t = str(k).split(";")
lst1.append(t)
#print "List 1 -->"+ str(lst1)
lst = {}
for i in lst1:
if len(i) is not 1:
#print i[0]
temp={i[3]:[i[0],i[2]]}
if cmp(lst.keys(), temp):
lst.setdefault(i[3], []).append([i[0],i[2]])
else:
lst.update(temp)
lst.pop("name") #-->All service and val disc by nmap {ssh:[[h1,p1],[h2,p2]],ftp--}
with open("all_commands.json","rb") as f:
jsonpredata = json.loads(f.read()) #--> all service types in master json
lst_pre = jsonpredata.keys()
lst_temp = lst.keys()
ss = set(lst_temp).intersection(set(lst_pre)) #-->All services common to what is discovered by nmap and what is there in master json-->it will skip the use case if nmap identifies a service that our master json would not have.Thus it would be good to do a set difference as well suc that all the services that are discovered by nmap and are not there in master json would be fetched
ms=list(set(lst_temp) - set(lst_pre))
print "ss is " +str(ss)
dic = {}
for i in ss:
for k in lst.get(i):
dic.setdefault(i, []).append(k)#thus all refined data would be in dic.All services and host,ports that ar discovered by the nmap scan placed like {ssh:[[h1,p1],[h2,p2]],ftp--}
#dic.update({i:k for k in lst.get(i)})
ms_dic={}
for i in ms:
for k in lst.get(i):
ms_dic.setdefault(i, []).append(k)
print "here reached "
self.processed_services=dic #--Processed services would now contain relevent json
self.commandsJson=jsonpredata #all data from json file is in commandsjson
self.missed_services=ms_dic
self.parse_and_process()
if(self.generate_report==True):
while (1):
inp=raw_input("\n" + g +"[+] Press 1 to generate the report and 2 to exit \n")
if (inp=="1"):
self.IPexploit.generate_report(self.project_id)
break
elif(inp=="2"):
break
temp_file=str(id) + "_result_data.txt"
data_file=os.path.join(self.data_path,temp_file)
json.dump(dic,open(data_file,"wb"))
data = json.load(open(data_file,"rb"))
data_temp = []
for j in data:
data_temp.append(j) #all keys of json file go in data_temp
except Exception ,ee:
print str(ee)
self.print_Error("Error occured in Main method "+str(ee))
def print_Log(self,message):
try:
self.lock.acquire()
self.logger.debug(message)
self.lock.release()
except Exception ,ee:
self.lock.acquire()
self.logger.critical(message +"--Exception : --"+str(ee))
self.lock.release()
print message+"\n"
def print_Error(self,message):
#self.Log_file=str(self.project_id) +str("_Log_file_info")
#self.logger=self.Auto_logger.configureLoggerInfo(self.method_id,self.Log_file)
#message="Command id --> "+str(self.command_id) +" Message --> :" +str(message)
try:
self.lock.acquire()
self.logger.error(message)
self.lock.release()
except Exception ,ee:
self.lock.acquire()
self.logger.error(message +"--Exception : --"+str(ee))
self.lock.release()
print message+"\n"
def launchExploits(self):
try:
self.method_id="LaunchExploits()"
self.print_Log("Started method LaunchExploits()")
self.generate_report=True
IPexploits_data=self.IPexploit.getIpExploits(self.project_id)
print "here -->"
if((IPexploits_data !=-1 ) and (IPexploits_data is not None )):
print "--1---here -->"
#self.commandObj.Log_File=str(self.project_id) +str("_Log_file")
for exploit in IPexploits_data:
current_record_id=exploit[0]
service=str(exploit[4])
host=exploit[2]
port=exploit[3]
self.print_Log("Service,Host,port is -->"+str(service)+" " +str(host)+" "+str(port))
entry=self.commandsJson.get(service)
print "read"
meta=entry.get('Commands')
for entries in meta :
method_name=entries.get('method')
args=entries.get('args')
self.commandObj.method_id=method_name
self.commandObj.command_id=entries.get('id')
self.commandObj.current_record_id=current_record_id
self.commandObj.current_host=host
self.commandObj.current_port=port
self.commandObj.data_path=self.data_path
final_args=[]
for arg in args:
if isinstance(arg, basestring):
arg=arg.replace("<host>",host)
arg=arg.replace("<port>",port)
final_args.append(arg)
if ((method_name)):
func = getattr(self.commandObj,method_name)
print "Invoking !!!"
is_interactive=entries.get('interactive')
self.commandObj.print_Log_info("\n\n\n STARTING EXPLOITS FOR PROJECT ID --> " +str(self.project_id))
print "Logged"
if((is_interactive !=None ) and (is_interactive =="1")):
print "Launching General interactive mode !!-->Method->"+method_name
func(final_args,True)
else:
print "Launching without interactive mode !!--->"+method_name
func(final_args)
except Exception ,ee:
self.print_Error("Inside exception of launch exoloits :"+str(ee))
def integration_test(self):
print "Started\n"
with open("all_commands.json","rb") as f:
jsonpredatas = json.loads(f.read())
#ftp=jsonpredatas.get('netbios-ssn')
#ftp=jsonpredatas.get('ftp_command')
#ftp=jsonpredatas.get('ssh')
#ftp=jsonpredatas.get('smtp_command')
#ftp=jsonpredatas.get('smtps_command')
#ftp=jsonpredatas.get('pop3')
ftp=jsonpredatas.get('imaps')
#ftp=jsonpredatas.get('domain')
#ftp=jsonpredatas.get('ldaps')
#ftp=jsonpredatas.get('isakmp')
#ftp=jsonpredatas.get('exec')
#ftp=jsonpredatas.get('openvpn')
#ftp=jsonpredatas.get('vnc')
#ftp=jsonpredatas.get('finger')
#ftp=jsonpredatas.get('ntp')
#ftp=jsonpredatas.get('ms-sql-m')
#ftp=jsonpredatas.get('nfs')
#ftp=jsonpredatas.get('login')
#ftp=jsonpredatas.get('snmp')
#ftp=jsonpredatas.get('ms-wbt-server')
#ftp=jsonpredatas.get('rsftp')
#ftp=jsonpredatas.get('dhcps')
#ftp=jsonpredatas.get('tftp')
#ftp=jsonpredatas.get('rpcbind')
#ftp=jsonpredatas.get('microsoft-ds')
#ftp=jsonpredatas.get('shell')
#ftp=jsonpredatas.get('oracle')
#ftp=jsonpredatas.get('radius')
#ftp=jsonpredatas.get('upnp')
#ftp=jsonpredatas.get('squid-http')
#ftp=jsonpredatas.get('mysql')
#ftp=jsonpredatas.get('xmpp-client')
#ftp=jsonpredatas.get('postgresql')
#ftp=jsonpredatas.get('irc')
ftp=jsonpredatas.get('http')
print "read"
meta=ftp.get('Commands')
for entries in meta :
method_name=entries.get('method')
id_=entries.get('id')
args=entries.get('args')
host="192.168.179.136"
port="80"
final_args=[]
if (id_=="http_2"):#replace it later by if 1:
for arg in args:
if isinstance(arg, basestring):
arg=arg.replace("<host>",host)
arg=arg.replace("<port>",port)
final_args.append(arg)
if ((method_name)):
func = getattr(self.commandObj,method_name)
print "Invoking !!!"
is_interactive=entries.get('interactive')
if((is_interactive !=None ) and (is_interactive =="1")):
print "Launching mathod in General interactive mode !!"
func(final_args,True)
else:
print "Launching mathod without interactive mode !!"
func(final_args)
driverObj=Driver()
driverObj.main()
#driverObj.integration_test()
#m.cleanUp()
"""for k,v in ftp.iteritems():
#print "key :\n"+str(k) + "\nValue :\n" +str(v)
for items in v :
print str(values)+"\n\n"
for
module_name=values.get('Script')
method_name=values.get('method')
if ((module_name ) and (method_name)):
print "Module : "+module_name + "Method :" +method_name
for key_ in jsonpredatas.keys():
for k in jsonpredatas.get(key_):
print key_
print k
#m = __import__ ('module_name')
#func = getattr(m,'method_name')
#func()"""
"""
{"Script":"commands","method":"meta_commands","args":["workspace -a ssh_version_tester\n","set THREADS 1\n","workspace ssh_version_tester\n","use auxiliary/scanner/ftp/anonymous\n","set RHOSTS 192.168.179.136\n"]}
"FTP_command":
{
"Metasploit_commands":[{"Script":"Metasploit.py","method":"meta_ftp","args":["workspace -a ssh_version_tester\n","set THREADS 10\n","workspace ssh_version_tester1\n","use auxiliary/scanner/ftp/ftp_login\n","set RHOSTS 192.168.179.136\n","set USERNAME root\n","set PASSWORD toor\n","set VERBOSE false\n"]}],"Terminal_commands":["val2"]
}
"""
| [
"[email protected]"
] | |
4a47178fb06d05660bafa29a5ef90b32d359dd97 | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/numpy/linalg/__init__.py | 5f6f0789803b35e7885c22cbd333ab9c1a00c704 | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-numpy-1.18.3-c5tvrr2q5vwgtvc6f3ld57v6y4ahvr2h/lib/python3.7/site-packages/numpy/linalg/__init__.py | [
"[email protected]"
] | |
3a08322542080a2fcc64710b9dc2610df64888eb | 00820b522cc16bf996f1ef44a94a2f31989c4065 | /abc/abc135/a.py | fd0e4e82004daed404daa9c533c478c68aa7447d | [] | no_license | yamato1992/at_coder | 6dffd425163a37a04e37507743a15f67b29239fc | 6e0ec47267ed3cae62aebdd3d149f6191fdcae27 | refs/heads/master | 2020-08-31T11:17:03.500616 | 2020-06-12T15:45:58 | 2020-06-12T15:45:58 | 218,678,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | A, B = map(int, input().split())
k = (A + B) / 2
if k % 1 == 0:
print(int(k))
else:
print('IMPOSSIBLE') | [
"[email protected]"
] | |
b832f61d96cc32d7408a37057ca8f6beeaa6d209 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/dalvik/dex/inject/InjectPayloadDexRange.pyi | 6efab5017e40b6f8e42699d88edaa8c65a0cbbb8 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,994 | pyi | from typing import List
import ghidra.app.plugin.processors.sleigh
import ghidra.program.model.lang
import ghidra.program.model.listing
import ghidra.program.model.pcode
import java.lang
class InjectPayloadDexRange(object, ghidra.program.model.lang.InjectPayload):
CALLFIXUP_TYPE: int = 1
CALLMECHANISM_TYPE: int = 3
CALLOTHERFIXUP_TYPE: int = 2
EXECUTABLEPCODE_TYPE: int = 4
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getInput(self) -> List[ghidra.program.model.lang.InjectPayload.InjectParameter]: ...
def getName(self) -> unicode: ...
def getOutput(self) -> List[ghidra.program.model.lang.InjectPayload.InjectParameter]: ...
def getParamShift(self) -> int: ...
def getPcode(self, __a0: ghidra.program.model.listing.Program, __a1: ghidra.program.model.lang.InjectContext) -> List[ghidra.program.model.pcode.PcodeOp]: ...
def getSource(self) -> unicode: ...
def getType(self) -> int: ...
def hashCode(self) -> int: ...
def inject(self, __a0: ghidra.program.model.lang.InjectContext, __a1: ghidra.app.plugin.processors.sleigh.PcodeEmit) -> None: ...
def isFallThru(self) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def fallThru(self) -> bool: ...
@property
def input(self) -> List[ghidra.program.model.lang.InjectPayload.InjectParameter]: ...
@property
def name(self) -> unicode: ...
@property
def output(self) -> List[ghidra.program.model.lang.InjectPayload.InjectParameter]: ...
@property
def paramShift(self) -> int: ...
@property
def source(self) -> unicode: ...
@property
def type(self) -> int: ...
| [
"[email protected]"
] | |
dced9c197b5f1c3d8ec2b246cbf2d816188ae156 | 0bcee5fb01f99957e49c14b0e8831e7d6eedef2c | /emlp/nn.py | ed8113b2aaa95058f346e96ef138bb3dfdd01daf | [
"MIT"
] | permissive | g-benton/equivariant-MLP | 4cb758b912734e062c7a2d6492f639b79d761666 | 1f749dc1dc21de7c5f01ab5348dc7912f34de0a1 | refs/heads/master | 2023-04-04T05:30:15.342439 | 2021-04-02T16:59:49 | 2021-04-02T16:59:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,654 | py | import jax
import jax.numpy as jnp
import objax.nn as nn
import objax.functional as F
import numpy as np
from emlp.reps import T,Rep,Scalar
from emlp.reps import bilinear_weights
from emlp.reps.product_sum_reps import SumRep
import collections
from oil.utils.utils import Named,export
import scipy as sp
import scipy.special
import random
import logging
from objax.variable import TrainVar, StateVar
from objax.nn.init import kaiming_normal, xavier_normal
from objax.module import Module
import objax
from objax.nn.init import orthogonal
from scipy.special import binom
from jax import jit,vmap
from functools import lru_cache as cache
def Sequential(*args):
""" Wrapped to mimic pytorch syntax"""
return nn.Sequential(args)
@export
class Linear(nn.Linear):
""" Basic equivariant Linear layer from repin to repout."""
def __init__(self, repin, repout):
nin,nout = repin.size(),repout.size()
super().__init__(nin,nout)
self.b = TrainVar(objax.random.uniform((nout,))/jnp.sqrt(nout))
self.w = TrainVar(orthogonal((nout, nin)))
self.rep_W = rep_W = repout*repin.T
rep_bias = repout
self.Pw = rep_W.equivariant_projector()
self.Pb = rep_bias.equivariant_projector()
logging.info(f"Linear W components:{rep_W.size()} rep:{rep_W}")
def __call__(self, x): # (cin) -> (cout)
logging.debug(f"linear in shape: {x.shape}")
W = ([email protected](-1)).reshape(*self.w.value.shape)
b = [email protected]
out = [email protected]+b
logging.debug(f"linear out shape:{out.shape}")
return out
@export
class BiLinear(Module):
""" Cheap bilinear layer (adds parameters for each part of the input which can be
interpreted as a linear map from a part of the input to the output representation)."""
def __init__(self, repin, repout):
super().__init__()
Wdim, weight_proj = bilinear_weights(repout,repin)
self.weight_proj = jit(weight_proj)
self.w = TrainVar(objax.random.normal((Wdim,)))#xavier_normal((Wdim,))) #TODO: revert to xavier
logging.info(f"BiW components: dim:{Wdim}")
def __call__(self, x,training=True):
# compatible with non sumreps? need to check
W = self.weight_proj(self.w.value,x)
out= .1*(W@x[...,None])[...,0]
return out
@export
def gated(sumrep): #TODO: generalize to mixed tensors?
""" Returns the rep with an additional scalar 'gate' for each of the nonscalars and non regular
reps in the input. To be used as the output for linear (and or bilinear) layers directly
before a :func:`GatedNonlinearity` to produce its scalar gates. """
return sumrep+sum([Scalar(rep.G) for rep in sumrep if rep!=Scalar and not rep.is_regular])
@export
class GatedNonlinearity(Module): #TODO: add support for mixed tensors and non sumreps
""" Gated nonlinearity. Requires input to have the additional gate scalars
for every non regular and non scalar rep. Applies swish to regular and
scalar reps. (Right now assumes rep is a SumRep)"""
def __init__(self,rep):
super().__init__()
self.rep=rep
def __call__(self,values):
gate_scalars = values[..., gate_indices(self.rep)]
activations = jax.nn.sigmoid(gate_scalars) * values[..., :self.rep.size()]
return activations
@export
class EMLPBlock(Module):
""" Basic building block of EMLP consisting of G-Linear, biLinear,
and gated nonlinearity. """
def __init__(self,rep_in,rep_out):
super().__init__()
self.linear = Linear(rep_in,gated(rep_out))
self.bilinear = BiLinear(gated(rep_out),gated(rep_out))
self.nonlinearity = GatedNonlinearity(rep_out)
def __call__(self,x):
lin = self.linear(x)
preact =self.bilinear(lin)+lin
return self.nonlinearity(preact)
def uniform_rep_general(ch,*rep_types):
""" adds all combinations of (powers of) rep_types up to
a total of ch channels."""
#TODO: write this function
raise NotImplementedError
@export
def uniform_rep(ch,group):
""" A heuristic method for allocating a given number of channels (ch)
into tensor types. Attempts to distribute the channels evenly across
the different tensor types. Useful for hands off layer construction.
Args:
ch (int): total number of channels
group (Group): symmetry group
Returns:
SumRep: The direct sum representation with dim(V)=ch
"""
d = group.d
Ns = np.zeros((lambertW(ch,d)+1,),int) # number of tensors of each rank
while ch>0:
max_rank = lambertW(ch,d) # compute the max rank tensor that can fit up to
Ns[:max_rank+1] += np.array([d**(max_rank-r) for r in range(max_rank+1)],dtype=int)
ch -= (max_rank+1)*d**max_rank # compute leftover channels
sum_rep = sum([binomial_allocation(nr,r,group) for r,nr in enumerate(Ns)])
sum_rep,perm = sum_rep.canonicalize()
return sum_rep
def lambertW(ch,d):
""" Returns solution to x*d^x = ch rounded down."""
max_rank=0
while (max_rank+1)*d**max_rank <= ch:
max_rank += 1
max_rank -= 1
return max_rank
def binomial_allocation(N,rank,G):
""" Allocates N of tensors of total rank r=(p+q) into
T(k,r-k) for k=0,1,...,r to match the binomial distribution.
For orthogonal representations there is no
distinction between p and q, so this op is equivalent to N*T(rank)."""
if N==0: return 0
n_binoms = N//(2**rank)
n_leftover = N%(2**rank)
even_split = sum([n_binoms*int(binom(rank,k))*T(k,rank-k,G) for k in range(rank+1)])
ps = np.random.binomial(rank,.5,n_leftover)
ragged = sum([T(int(p),rank-int(p),G) for p in ps])
out = even_split+ragged
return out
def uniform_allocation(N,rank):
""" Uniformly allocates N of tensors of total rank r=(p+q) into
T(k,r-k) for k=0,1,...,r. For orthogonal representations there is no
distinction between p and q, so this op is equivalent to N*T(rank)."""
if N==0: return 0
even_split = sum((N//(rank+1))*T(k,rank-k) for k in range(rank+1))
ragged = sum(random.sample([T(k,rank-k) for k in range(rank+1)],N%(rank+1)))
return even_split+ragged
@export
class EMLP(Module,metaclass=Named):
""" Equivariant MultiLayer Perceptron.
If the input ch argument is an int, uses the hands off uniform_rep heuristic.
If the ch argument is a representation, uses this representation for the hidden layers.
Individual layer representations can be set explicitly by using a list of ints or a list of
representations, rather than use the same for each hidden layer.
Args:
rep_in (Rep): input representation
rep_out (Rep): output representation
group (Group): symmetry group
ch (int or list[int] or Rep or list[Rep]): number of channels in the hidden layers
num_layers (int): number of hidden layers
Returns:
Module: the EMLP objax module."""
def __init__(self,rep_in,rep_out,group,ch=384,num_layers=3):#@
super().__init__()
logging.info("Initing EMLP")
self.rep_in =rep_in(group)
self.rep_out = rep_out(group)
self.G=group
# Parse ch as a single int, a sequence of ints, a single Rep, a sequence of Reps
if isinstance(ch,int): middle_layers = num_layers*[uniform_rep(ch,group)]#[uniform_rep(ch,group) for _ in range(num_layers)]
elif isinstance(ch,Rep): middle_layers = num_layers*[ch(group)]
else: middle_layers = [(c(group) if isinstance(c,Rep) else uniform_rep(c,group)) for c in ch]
#assert all((not rep.G is None) for rep in middle_layers[0].reps)
reps = [self.rep_in]+middle_layers
#logging.info(f"Reps: {reps}")
self.network = Sequential(
*[EMLPBlock(rin,rout) for rin,rout in zip(reps,reps[1:])],
Linear(reps[-1],self.rep_out)
)
def __call__(self,x,training=True):
return self.network(x)
def swish(x):
return jax.nn.sigmoid(x)*x
def MLPBlock(cin,cout):
return Sequential(nn.Linear(cin,cout),swish)#,nn.BatchNorm0D(cout,momentum=.9),swish)#,
@export
class MLP(Module,metaclass=Named):
""" Standard baseline MLP. Representations and group are used for shapes only. """
def __init__(self,rep_in,rep_out,group,ch=384,num_layers=3):
super().__init__()
self.rep_in =rep_in(group)
self.rep_out = rep_out(group)
self.G = group
chs = [self.rep_in.size()] + num_layers*[ch]
cout = self.rep_out.size()
logging.info("Initing MLP")
self.net = Sequential(
*[MLPBlock(cin,cout) for cin,cout in zip(chs,chs[1:])],
nn.Linear(chs[-1],cout)
)
def __call__(self,x,training=True):
y = self.net(x)
return y
@export
class Standardize(Module):
""" A convenience module to wrap a given module, normalize its input
by some dataset x mean and std stats, and unnormalize its output by
the dataset y mean and std stats.
Args:
model (Module): model to wrap
ds_stats ((μx,σx,μy,σy) or (μx,σx)): tuple of the normalization stats
Returns:
Module: Wrapped model with input normalization (and output unnormalization)"""
def __init__(self,model,ds_stats):
super().__init__()
self.model = model
self.ds_stats=ds_stats
def __call__(self,x,training):
if len(self.ds_stats)==2:
muin,sin = self.ds_stats
return self.model((x-muin)/sin,training=training)
else:
muin,sin,muout,sout = self.ds_stats
y = sout*self.model((x-muin)/sin,training=training)+muout
return y
# Networks for hamiltonian dynamics (need to sum for batched Hamiltonian grads)
@export
class MLPode(Module,metaclass=Named):
def __init__(self,rep_in,rep_out,group,ch=384,num_layers=3):
super().__init__()
self.rep_in =rep_in(group)
self.rep_out = rep_out(group)
self.G = group
chs = [self.rep_in.size()] + num_layers*[ch]
cout = self.rep_out.size()
logging.info("Initing MLP")
self.net = Sequential(
*[Sequential(nn.Linear(cin,cout),swish) for cin,cout in zip(chs,chs[1:])],
nn.Linear(chs[-1],cout)
)
def __call__(self,z,t):
return self.net(z)
@export
class EMLPode(EMLP):
""" Neural ODE Equivariant MLP. Same args as EMLP."""
#__doc__ += EMLP.__doc__.split('.')[1]
def __init__(self,rep_in,rep_out,group,ch=384,num_layers=3):#@
#super().__init__()
logging.info("Initing EMLP")
self.rep_in =rep_in(group)
self.rep_out = rep_out(group)
self.G=group
# Parse ch as a single int, a sequence of ints, a single Rep, a sequence of Reps
if isinstance(ch,int): middle_layers = num_layers*[uniform_rep(ch,group)]#[uniform_rep(ch,group) for _ in range(num_layers)]
elif isinstance(ch,Rep): middle_layers = num_layers*[ch(group)]
else: middle_layers = [(c(group) if isinstance(c,Rep) else uniform_rep(c,group)) for c in ch]
#print(middle_layers[0].reps[0].G)
#print(self.rep_in.G)
reps = [self.rep_in]+middle_layers
logging.info(f"Reps: {reps}")
self.network = Sequential(
*[EMLPBlock(rin,rout) for rin,rout in zip(reps,reps[1:])],
Linear(reps[-1],self.rep_out)
)
def __call__(self,z,t):
return self.network(z)
# Networks for hamiltonian dynamics (need to sum for batched Hamiltonian grads)
@export
class MLPH(Module,metaclass=Named):
def __init__(self,rep_in,rep_out,group,ch=384,num_layers=3):
super().__init__()
self.rep_in =rep_in(group)
self.rep_out = rep_out(group)
self.G = group
chs = [self.rep_in.size()] + num_layers*[ch]
cout = self.rep_out.size()
logging.info("Initing MLP")
self.net = Sequential(
*[Sequential(nn.Linear(cin,cout),swish) for cin,cout in zip(chs,chs[1:])],
nn.Linear(chs[-1],cout)
)
def H(self,x):#,training=True):
y = self.net(x).sum()
return y
def __call__(self,x):
return self.H(x)
@export
class EMLPH(EMLP):
""" Equivariant EMLP modeling a Hamiltonian for HNN. Same args as EMLP"""
#__doc__ += EMLP.__doc__.split('.')[1]
def H(self,x):#,training=True):
y = self.network(x)
return y.sum()
def __call__(self,x):
return self.H(x)
@cache(maxsize=None)
def gate_indices(sumrep): #TODO: add support for mixed_tensors
""" Indices for scalars, and also additional scalar gates
added by gated(sumrep)"""
assert isinstance(sumrep,SumRep), f"unexpected type for gate indices {type(sumrep)}"
channels = sumrep.size()
perm = sumrep.perm
indices = np.arange(channels)
num_nonscalars = 0
i=0
for rep in sumrep:
if rep!=Scalar and not rep.is_regular:
indices[perm[i:i+rep.size()]] = channels+num_nonscalars
num_nonscalars+=1
i+=rep.size()
return indices | [
"[email protected]"
] | |
741d55862503d5f145872c689ccfd3f4780a57c2 | 645b5211c50b1a07a5d576b96624b22055802dc4 | /pvw-dependencies/pv-flow/flow/plugins/__init__.py | 1b8e3c325a97cccaf1c8c5f4654e6b07c027e9f1 | [
"Apache-2.0"
] | permissive | dealenx/hpccloud-kemsu | 7c3a33e5ce01560d6fc7abcb9524e4526b9f4848 | 42fc44b06385c6eb25a979477dcea53fe66cfbfa | refs/heads/master | 2023-02-05T21:13:07.328928 | 2021-06-25T04:56:39 | 2021-06-25T04:56:39 | 252,550,259 | 3 | 0 | Apache-2.0 | 2023-01-24T23:21:39 | 2020-04-02T19:41:31 | Python | UTF-8 | Python | false | false | 626 | py | import os
from paraview import simple
# -----------------------------------------------------------------------------
MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
PLUGINS = [
'parflow.py'
]
FULL_PATHS = [
'/Applications/ParaView-5.6.0-1626-g52acf2f741.app/Contents/Plugins/ParFlow.so',
]
# -----------------------------------------------------------------------------
# Load the plugins
# -----------------------------------------------------------------------------
for plugin in PLUGINS:
simple.LoadPlugin(os.path.join(MODULE_PATH, plugin))
for plugin in FULL_PATHS:
simple.LoadPlugin(plugin)
| [
"[email protected]"
] | |
6cd6eee44b489002c6e6e5258534b94e5f4f6c30 | 6be956588b6bfdb5004d812872ef23973de9e07c | /l_006_docker/ubuntu/load_all.py | e24265db010f75fe97e70320bfbd6f2c4639320b | [] | no_license | Serg-sh/teleBots_aiogram | 45c8ee60501cff9a6035dbbab820975aade897e8 | 4bc9d452b6b6098fb25d4c9704a025737d59d4c8 | refs/heads/master | 2023-06-10T14:28:10.696277 | 2021-07-05T14:30:46 | 2021-07-05T14:30:46 | 375,966,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | import asyncio
import logging
from aiogram import Bot, types
from aiogram import Dispatcher
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from config import TOKEN
from sql import create_pool, create_db
# from aiogram.contrib.fsm_storage.redis import RedisStorage2
logging.basicConfig(format=u'%(filename)s [LINE:%(lineno)d] #%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO)
loop = asyncio.get_event_loop()
# Поток нам не нужен, т.к. он и так создается в диспатчере.
# Set up storage (either in Redis or Memory)
storage = MemoryStorage()
# storage = RedisStorage2()
bot = Bot(token=TOKEN, parse_mode=types.ParseMode.HTML)
dp = Dispatcher(bot, storage=storage)
db = loop.run_until_complete(create_pool()) | [
"[email protected]"
] | |
f5efd08c910b830ef549e690e939ca4a01a2f950 | 9615178b79a69519883c092b20cfdd060b426a69 | /sublemon/version.py | 2e863c21379228f97e1ba1ac4cb053db461ad577 | [
"MIT"
] | permissive | emuhedo/sublemon | 3635799567a8b18f863d582e7b23d6840069ce37 | 198da2ec96d4d50c7017f4ebfa8e69e5aa0681b0 | refs/heads/master | 2020-04-02T09:22:31.272188 | 2018-10-20T19:10:18 | 2018-10-20T19:10:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | """Version info for the `sublemon` library."""
__version_info__ = (0, 0, 2)
__version__ = '.'.join(str(i) for i in __version_info__)
| [
"[email protected]"
] | |
8a70a022a1bff29b68d30aa56e8fbe8aadb30ed0 | 14f1af759b594b4fab570fd98fc8dceae668d62f | /file_IO_exercise/bonus_crashTest.py | b5c7ff5ef2a8d870dfe9eba6a717490f50abbd02 | [] | no_license | ziqingW/python-exercise-flex-Mar08 | cb10bf8c6f376808ff5bfadc86066a7c31b48120 | 070be19fb63b1ec6312c477bb656c19339448d67 | refs/heads/master | 2021-04-03T01:45:53.533366 | 2018-03-09T16:51:43 | 2018-03-09T16:51:43 | 124,464,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | import io
file_handle = io.StringIO()
while True:
file_handle.write("B" * 1024 * 1024)
size_contents = len(file_handle.getvalue())
print("Characters count: {}".format(size_contents))
# crash happens at counting of 208666624
# MemoryError
# I use 1mb as one time write, so totally it reached to 200mb
# it's later than I thought, I never expect cloud9 has memory as much as 200mb | [
"[email protected]"
] | |
2e18e65d3098282a56dc5d1a6a480e964b0032af | 176bda9771b0ec07a3206112eb4dbd34f5dc293a | /seznamy/seznamy/06_zmena_polozky.py | 2f5345e2776005aa4f10f4fe61bfd83a219421a2 | [] | no_license | HelenaJanda/pyladies-7 | 8b4254f85295fb5695c60b1b5d8f70e29d1a999f | 00613e2ff1bea2b8b8f60d3e4ce7a83345f2300d | refs/heads/master | 2022-12-24T03:12:35.338933 | 2020-10-14T15:32:46 | 2020-10-14T15:32:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | # Zkusme zmenit pismeno v retezci
retezec = "ahoj"
retezec[0] = "A"
print(retezec)
# A nyni v seznamu
cisla = [1, 0, 2, 3, -6, 8, 13]
cisla[0] = 42
print(cisla) | [
"[email protected]"
] | |
676193468f407b65515fcd72b955175e02eb7f4c | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ggH_SF/Full2017_v6/cuts_loose.py | 5b5523ca9223e6190987132c5c63712a4cce2ab3 | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 5,984 | py |
supercut = ' mll>12 \
&& Lepton_pt[0]>25 \
&& Lepton_pt[1]>10 \
&& (abs(Lepton_pdgId[1])==13 || Lepton_pt[1]>13) \
&& (nLepton>=2 && Alt$(Lepton_pt[2],0)<10) \
&& abs(Lepton_eta[0])<2.5 && abs(Lepton_eta[1])<2.5 \
&& ptll>30 \
&& PuppiMET_pt > 20 \
'
# Some cuts
dymva0jet = 'dymva_alt_dnn_0j > 0.8 && dymva_alt_dnn_0j < 0.95'
dymva1jet = 'dymva_alt_dnn_1j > 0.8 && dymva_alt_dnn_1j < 0.95'
dymva2jet = 'dymva_alt_dnn_2j > 0.8 && dymva_alt_dnn_2j < 0.95'
dymvaVBF = 'dymva_alt_dnn_VBF > 0.8 && dymva_alt_dnn_VBF < 0.95'
dymvaVH = 'dymva_alt_dnn_VH > 0.8 && dymva_alt_dnn_VH < 0.95'
# Higgs Signal Regions: ee/uu * 0/1/2 jet
cuts['hww2l2v_13TeV'] = {
'expr': 'sr && (Lepton_pdgId[0]==-Lepton_pdgId[1])' ,
'categories' : {
'0j_ee' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs0jet && '+dymva0jet,
'0j_mm' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs0jet && '+dymva0jet,
'1j_ee' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs1jet && '+dymva1jet,
'1j_mm' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs1jet && '+dymva1jet,
'2j_ee' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs2jet && '+dymva2jet,
'2j_mm' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs2jet && '+dymva2jet,
'2j_vh_ee' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgsvh && '+dymvaVH,
'2j_vh_mm' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgsvh && '+dymvaVH,
'2j_vbf_ee' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgsvbf && '+dymvaVBF,
'2j_vbf_mm' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgsvbf && '+dymvaVBF,
}
}
## DY Background IN with DYMVA>0.9X : Split ee/mm , No H cut !
cuts['hww2l2v_13TeV_DYin'] = {
'expr' : 'Zpeak && bVeto',
'categories' : {
'0j_ee' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && '+dymva0jet,
'0j_mm' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && '+dymva0jet,
'0j_df' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) && '+dymva0jet,
'1j_ee' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && '+dymva1jet,
'1j_mm' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && '+dymva1jet,
'1j_df' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) && '+dymva1jet,
'2j_ee' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && '+dymva2jet,
'2j_mm' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && '+dymva2jet,
'2j_df' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) && '+dymva2jet,
'2j_vh_ee' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && '+dymvaVH,
'2j_vh_mm' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && '+dymvaVH,
'2j_vh_df' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) && '+dymvaVH,
'2j_vbf_ee' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && '+dymvaVBF,
'2j_vbf_mm' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && '+dymvaVBF,
'2j_vbf_df' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) && '+dymvaVBF,
}
}
# Numerator for DY acceptance in Signal region
cuts['hww2l2v_13TeV_HAccNum'] = {
'expr': 'sr && (Lepton_pdgId[0]==-Lepton_pdgId[1])' ,
'categories' : {
'0j_ee' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs0jet && dymva_alt_dnn_0j > 0.8',
'0j_mm' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs0jet && dymva_alt_dnn_0j > 0.8',
'1j_ee' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs1jet && dymva_alt_dnn_1j > 0.8',
'1j_mm' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs1jet && dymva_alt_dnn_1j > 0.8',
'2j_ee' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs2jet && dymva_alt_dnn_2j > 0.8',
'2j_mm' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs2jet && dymva_alt_dnn_2j > 0.8',
'2j_vh_ee' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgsvh && dymva_alt_dnn_VH > 0.8',
'2j_vh_mm' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgsvh && dymva_alt_dnn_VH > 0.8',
'2j_vbf_ee' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgsvbf && dymva_alt_dnn_VBF > 0.8',
'2j_vbf_mm' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgsvbf && dymva_alt_dnn_VBF > 0.8',
}
}
## Acc Denominator
cuts['hww2l2v_13TeV_AccDen'] = {
'expr' : 'sr * (Lepton_pdgId[0]==-Lepton_pdgId[1])',
'categories' : {
'0j_ee' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && dymva_alt_dnn_0j > 0.8',
'0j_mm' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && dymva_alt_dnn_0j > 0.8',
'1j_ee' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && dymva_alt_dnn_1j > 0.8',
'1j_mm' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && dymva_alt_dnn_1j > 0.8',
'2j_ee' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && dymva_alt_dnn_2j > 0.8',
'2j_mm' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && dymva_alt_dnn_2j > 0.8',
'2j_vh_ee' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && dymva_alt_dnn_VH > 0.8',
'2j_vh_mm' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && dymva_alt_dnn_VH > 0.8',
'2j_vbf_ee' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && dymva_alt_dnn_VBF > 0.8',
'2j_vbf_mm' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && dymva_alt_dnn_VBF > 0.8',
}
}
| [
"[email protected]"
] | |
1aef6511d6c15c98a7eb11431325e8bdb0e80f1c | 3ef3266bcc8d74e81bf303bdd16fcaa5e22f142b | /telemetry/telemetry/internal/story_runner_unittest.py | f621c11785d550313eb3939d029c1d4cb3d42663 | [
"BSD-3-Clause"
] | permissive | chandangoyal/catapult | 6cf7a30f1c655d6ba82c4766453b767b9f720efe | 52d748d48b4d5b334c0416954ac73f4c352b6627 | refs/heads/master | 2021-07-19T22:59:09.101731 | 2017-10-26T22:26:14 | 2017-10-26T23:34:09 | 108,620,526 | 0 | 0 | null | 2017-10-28T04:49:00 | 2017-10-28T04:49:00 | null | UTF-8 | Python | false | false | 52,103 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import math
import os
import shutil
import StringIO
import sys
import tempfile
import unittest
from py_utils import cloud_storage # pylint: disable=import-error
from telemetry import benchmark
from telemetry.core import exceptions
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.actions import page_action
from telemetry.internal.results import page_test_results
from telemetry.internal.results import results_options
from telemetry.internal import story_runner
from telemetry.internal.util import exception_formatter as ex_formatter_module
from telemetry.page import page as page_module
from telemetry.page import legacy_page_test
from telemetry import story as story_module
from telemetry.testing import fakes
from telemetry.testing import options_for_unittests
from telemetry.testing import system_stub
import mock
from telemetry.value import failure
from telemetry.value import improvement_direction
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
from telemetry.value import skip
from telemetry.value import summary as summary_module
from telemetry.web_perf import story_test
from telemetry.web_perf import timeline_based_measurement
from telemetry.wpr import archive_info
from tracing.value import histogram as histogram_module
from tracing.value import histogram_set
from tracing.value.diagnostics import reserved_infos
# This linter complains if we define classes nested inside functions.
# pylint: disable=bad-super-call
# pylint: disable=too-many-lines
class FakePlatform(object):
def CanMonitorThermalThrottling(self):
return False
def WaitForBatteryTemperature(self, _):
pass
def GetDeviceTypeName(self):
return 'GetDeviceTypeName'
def GetArchName(self):
return 'amd64'
def GetOSName(self):
return 'win'
def GetOSVersionName(self):
return 'win10'
def GetSystemTotalPhysicalMemory(self):
return 8 * (1024 ** 3)
def GetDeviceId(self):
return None
class TestSharedState(story_module.SharedState):
_platform = FakePlatform()
@classmethod
def SetTestPlatform(cls, platform):
cls._platform = platform
def __init__(self, test, options, story_set):
super(TestSharedState, self).__init__(
test, options, story_set)
self._test = test
self._current_story = None
@property
def platform(self):
return self._platform
def WillRunStory(self, story):
self._current_story = story
def CanRunStory(self, story):
return True
def RunStory(self, results):
self._test.ValidateAndMeasurePage(self._current_story, None, results)
def DidRunStory(self, results):
pass
def TearDownState(self):
pass
def DumpStateUponFailure(self, story, results):
pass
class TestSharedPageState(TestSharedState):
def RunStory(self, results):
self._test.RunPage(self._current_story, None, results)
class FooStoryState(TestSharedPageState):
pass
class DummyTest(legacy_page_test.LegacyPageTest):
def RunPage(self, *_):
pass
def ValidateAndMeasurePage(self, page, tab, results):
pass
class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(EmptyMetadataForTest, self).__init__('')
class DummyLocalStory(story_module.Story):
def __init__(self, shared_state_class, name='', tags=None):
if name == '':
name = 'dummy local story'
super(DummyLocalStory, self).__init__(
shared_state_class, name=name, tags=tags)
def Run(self, shared_state):
pass
@property
def is_local(self):
return True
@property
def url(self):
return 'data:,'
class _DisableBenchmarkExpectations(
story_module.expectations.StoryExpectations):
def SetExpectations(self):
self.DisableBenchmark([story_module.expectations.ALL], 'crbug.com/123')
class _DisableStoryExpectations(story_module.expectations.StoryExpectations):
def SetExpectations(self):
self.DisableStory('one', [story_module.expectations.ALL], 'crbug.com/123')
class FakeBenchmark(benchmark.Benchmark):
def __init__(self):
super(FakeBenchmark, self).__init__()
self._disabled = False
self._story_disabled = False
@classmethod
def Name(cls):
return 'fake'
test = DummyTest
def page_set(self):
return story_module.StorySet()
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, b):
assert isinstance(b, bool)
self._disabled = b
@property
def story_disabled(self):
return self._story_disabled
@story_disabled.setter
def story_disabled(self, b):
assert isinstance(b, bool)
self._story_disabled = b
def GetExpectations(self):
if self.story_disabled:
return _DisableStoryExpectations()
if self.disabled:
return _DisableBenchmarkExpectations()
return story_module.expectations.StoryExpectations()
def _GetOptionForUnittest():
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = False
parser = options.CreateParser()
story_runner.AddCommandLineArgs(parser)
options.MergeDefaultValues(parser.get_default_values())
story_runner.ProcessCommandLineArgs(parser, options)
return options
class FakeExceptionFormatterModule(object):
@staticmethod
def PrintFormattedException(
exception_class=None, exception=None, tb=None, msg=None):
pass
def GetNumberOfSuccessfulPageRuns(results):
return len([run for run in results.all_page_runs if run.ok or run.skipped])
def GetNumberOfSkippedPageRuns(results):
return len([run for run in results.all_page_runs if run.skipped])
class TestOnlyException(Exception):
pass
class FailureValueMatcher(object):
def __init__(self, expected_exception_message):
self._expected_exception_message = expected_exception_message
def __eq__(self, other):
return (isinstance(other, failure.FailureValue) and
other.exc_info[1].message == self._expected_exception_message)
class SkipValueMatcher(object):
def __eq__(self, other):
return isinstance(other, skip.SkipValue)
class _Measurement(legacy_page_test.LegacyPageTest):
i = 0
def RunPage(self, page, _, results):
self.i += 1
results.AddValue(scalar.ScalarValue(
page, 'metric', 'unit', self.i,
improvement_direction=improvement_direction.UP))
def ValidateAndMeasurePage(self, page, tab, results):
self.i += 1
results.AddValue(scalar.ScalarValue(
page, 'metric', 'unit', self.i,
improvement_direction=improvement_direction.UP))
class StoryRunnerTest(unittest.TestCase):
def setUp(self):
self.fake_stdout = StringIO.StringIO()
self.actual_stdout = sys.stdout
sys.stdout = self.fake_stdout
self.options = _GetOptionForUnittest()
self.results = results_options.CreateResults(
EmptyMetadataForTest(), self.options)
self._story_runner_logging_stub = None
def SuppressExceptionFormatting(self):
"""Fake out exception formatter to avoid spamming the unittest stdout."""
story_runner.exception_formatter = FakeExceptionFormatterModule
self._story_runner_logging_stub = system_stub.Override(
story_runner, ['logging'])
def RestoreExceptionFormatter(self):
story_runner.exception_formatter = ex_formatter_module
if self._story_runner_logging_stub:
self._story_runner_logging_stub.Restore()
self._story_runner_logging_stub = None
def tearDown(self):
sys.stdout = self.actual_stdout
results_file_path = os.path.join(os.path.dirname(__file__), '..',
'testing', 'results.html')
if os.path.isfile(results_file_path):
os.remove(results_file_path)
self.RestoreExceptionFormatter()
def testRunStorySet(self):
number_stories = 3
story_set = story_module.StorySet()
for i in xrange(number_stories):
story_set.AddStory(DummyLocalStory(FooStoryState, name='story_%d' % i))
test = DummyTest()
story_runner.Run(
test, story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
self.assertEquals(0, len(self.results.failures))
self.assertEquals(number_stories,
GetNumberOfSuccessfulPageRuns(self.results))
def testRunStoryWithMissingArchiveFile(self):
story_set = story_module.StorySet(archive_data_file='data/hi.json')
story_set.AddStory(page_module.Page(
'http://www.testurl.com', story_set, story_set.base_dir,
name='http://www.testurl.com'))
test = DummyTest()
self.assertRaises(story_runner.ArchiveError, story_runner.Run, test,
story_set, self.options, self.results)
def testRunStoryWithLongName(self):
story_set = story_module.StorySet()
story_set.AddStory(DummyLocalStory(FooStoryState, name='l' * 182))
test = DummyTest()
self.assertRaises(ValueError, story_runner.Run, test, story_set,
self.options, self.results)
def testRunStoryWithLongURLPage(self):
story_set = story_module.StorySet()
story_set.AddStory(page_module.Page('file://long' + 'g' * 180,
story_set, name='test'))
test = DummyTest()
story_runner.Run(test, story_set,
self.options, self.results,
metadata=EmptyMetadataForTest())
def testSuccessfulTimelineBasedMeasurementTest(self):
"""Check that PageTest is not required for story_runner.Run.
Any PageTest related calls or attributes need to only be called
for PageTest tests.
"""
class TestSharedTbmState(TestSharedState):
def RunStory(self, results):
pass
TEST_WILL_RUN_STORY = 'test.WillRunStory'
TEST_MEASURE = 'test.Measure'
TEST_DID_RUN_STORY = 'test.DidRunStory'
EXPECTED_CALLS_IN_ORDER = [TEST_WILL_RUN_STORY,
TEST_MEASURE,
TEST_DID_RUN_STORY]
test = timeline_based_measurement.TimelineBasedMeasurement(
timeline_based_measurement.Options())
manager = mock.MagicMock()
test.WillRunStory = mock.MagicMock()
test.Measure = mock.MagicMock()
test.DidRunStory = mock.MagicMock()
manager.attach_mock(test.WillRunStory, TEST_WILL_RUN_STORY)
manager.attach_mock(test.Measure, TEST_MEASURE)
manager.attach_mock(test.DidRunStory, TEST_DID_RUN_STORY)
story_set = story_module.StorySet()
story_set.AddStory(DummyLocalStory(TestSharedTbmState, name='foo'))
story_set.AddStory(DummyLocalStory(TestSharedTbmState, name='bar'))
story_set.AddStory(DummyLocalStory(TestSharedTbmState, name='baz'))
story_runner.Run(
test, story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
self.assertEquals(0, len(self.results.failures))
self.assertEquals(3, GetNumberOfSuccessfulPageRuns(self.results))
self.assertEquals(3*EXPECTED_CALLS_IN_ORDER,
[call[0] for call in manager.mock_calls])
def testCallOrderBetweenStoryTestAndSharedState(self):
"""Check that the call order between StoryTest and SharedState is correct.
"""
TEST_WILL_RUN_STORY = 'test.WillRunStory'
TEST_MEASURE = 'test.Measure'
TEST_DID_RUN_STORY = 'test.DidRunStory'
STATE_WILL_RUN_STORY = 'state.WillRunStory'
STATE_RUN_STORY = 'state.RunStory'
STATE_DID_RUN_STORY = 'state.DidRunStory'
EXPECTED_CALLS_IN_ORDER = [TEST_WILL_RUN_STORY,
STATE_WILL_RUN_STORY,
STATE_RUN_STORY,
TEST_MEASURE,
TEST_DID_RUN_STORY,
STATE_DID_RUN_STORY]
class TestStoryTest(story_test.StoryTest):
def WillRunStory(self, platform):
pass
def Measure(self, platform, results):
pass
def DidRunStory(self, platform, results):
pass
class TestSharedStateForStoryTest(TestSharedState):
def RunStory(self, results):
pass
@mock.patch.object(TestStoryTest, 'WillRunStory')
@mock.patch.object(TestStoryTest, 'Measure')
@mock.patch.object(TestStoryTest, 'DidRunStory')
@mock.patch.object(TestSharedStateForStoryTest, 'WillRunStory')
@mock.patch.object(TestSharedStateForStoryTest, 'RunStory')
@mock.patch.object(TestSharedStateForStoryTest, 'DidRunStory')
def GetCallsInOrder(state_DidRunStory, state_RunStory, state_WillRunStory,
test_DidRunStory, test_Measure, test_WillRunStory):
manager = mock.MagicMock()
manager.attach_mock(test_WillRunStory, TEST_WILL_RUN_STORY)
manager.attach_mock(test_Measure, TEST_MEASURE)
manager.attach_mock(test_DidRunStory, TEST_DID_RUN_STORY)
manager.attach_mock(state_WillRunStory, STATE_WILL_RUN_STORY)
manager.attach_mock(state_RunStory, STATE_RUN_STORY)
manager.attach_mock(state_DidRunStory, STATE_DID_RUN_STORY)
test = TestStoryTest()
story_set = story_module.StorySet()
story_set.AddStory(DummyLocalStory(TestSharedStateForStoryTest))
story_runner.Run(
test, story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
return [call[0] for call in manager.mock_calls]
calls_in_order = GetCallsInOrder() # pylint: disable=no-value-for-parameter
self.assertEquals(EXPECTED_CALLS_IN_ORDER, calls_in_order)
def testAppCrashExceptionCausesFailureValue(self):
self.SuppressExceptionFormatting()
story_set = story_module.StorySet()
class SharedStoryThatCausesAppCrash(TestSharedPageState):
def WillRunStory(self, story):
raise exceptions.AppCrashException(msg='App Foo crashes')
story_set.AddStory(DummyLocalStory(
SharedStoryThatCausesAppCrash))
story_runner.Run(
DummyTest(), story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
self.assertEquals(1, len(self.results.failures))
self.assertEquals(0, GetNumberOfSuccessfulPageRuns(self.results))
self.assertIn('App Foo crashes', self.fake_stdout.getvalue())
def testExceptionRaisedInSharedStateTearDown(self):
self.SuppressExceptionFormatting()
story_set = story_module.StorySet()
class SharedStoryThatCausesAppCrash(TestSharedPageState):
def TearDownState(self):
raise TestOnlyException()
story_set.AddStory(DummyLocalStory(
SharedStoryThatCausesAppCrash))
with self.assertRaises(TestOnlyException):
story_runner.Run(
DummyTest(), story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
def testUnknownExceptionIsFatal(self):
self.SuppressExceptionFormatting()
story_set = story_module.StorySet()
class UnknownException(Exception):
pass
# This erroneous test is set up to raise exception for the 2nd story
# run.
class Test(legacy_page_test.LegacyPageTest):
def __init__(self, *args):
super(Test, self).__init__(*args)
self.run_count = 0
def RunPage(self, *_):
old_run_count = self.run_count
self.run_count += 1
if old_run_count == 1:
raise UnknownException('FooBarzException')
def ValidateAndMeasurePage(self, page, tab, results):
pass
s1 = DummyLocalStory(TestSharedPageState, name='foo')
s2 = DummyLocalStory(TestSharedPageState, name='bar')
story_set.AddStory(s1)
story_set.AddStory(s2)
test = Test()
with self.assertRaises(UnknownException):
story_runner.Run(
test, story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
self.assertEqual(set([s2]), self.results.pages_that_failed)
self.assertEqual(set([s1]), self.results.pages_that_succeeded)
self.assertIn('FooBarzException', self.fake_stdout.getvalue())
def testRaiseBrowserGoneExceptionFromRunPage(self):
self.SuppressExceptionFormatting()
story_set = story_module.StorySet()
class Test(legacy_page_test.LegacyPageTest):
def __init__(self, *args):
super(Test, self).__init__(*args)
self.run_count = 0
def RunPage(self, *_):
old_run_count = self.run_count
self.run_count += 1
if old_run_count == 0:
raise exceptions.BrowserGoneException(
None, 'i am a browser crash message')
def ValidateAndMeasurePage(self, page, tab, results):
pass
story_set.AddStory(DummyLocalStory(TestSharedPageState, name='foo'))
story_set.AddStory(DummyLocalStory(TestSharedPageState, name='bar'))
test = Test()
story_runner.Run(
test, story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
self.assertEquals(2, test.run_count)
self.assertEquals(1, len(self.results.failures))
self.assertEquals(1, GetNumberOfSuccessfulPageRuns(self.results))
def testAppCrashThenRaiseInTearDownFatal(self):
self.SuppressExceptionFormatting()
story_set = story_module.StorySet()
unit_test_events = [] # track what was called when
class DidRunTestError(Exception):
pass
class TestTearDownSharedState(TestSharedPageState):
def TearDownState(self):
unit_test_events.append('tear-down-state')
raise DidRunTestError
def DumpStateUponFailure(self, story, results):
unit_test_events.append('dump-state')
class Test(legacy_page_test.LegacyPageTest):
def __init__(self, *args):
super(Test, self).__init__(*args)
self.run_count = 0
def RunPage(self, *_):
old_run_count = self.run_count
self.run_count += 1
if old_run_count == 0:
unit_test_events.append('app-crash')
raise exceptions.AppCrashException
def ValidateAndMeasurePage(self, page, tab, results):
pass
story_set.AddStory(DummyLocalStory(TestTearDownSharedState, name='foo'))
story_set.AddStory(DummyLocalStory(TestTearDownSharedState, name='bar'))
test = Test()
with self.assertRaises(DidRunTestError):
story_runner.Run(
test, story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
self.assertEqual(['app-crash', 'dump-state', 'tear-down-state'],
unit_test_events)
# The AppCrashException gets added as a failure.
self.assertEquals(1, len(self.results.failures))
def testPagesetRepeat(self):
story_set = story_module.StorySet()
# TODO(eakuefner): Factor this out after flattening page ref in Value
blank_story = DummyLocalStory(TestSharedPageState, name='blank')
green_story = DummyLocalStory(TestSharedPageState, name='green')
story_set.AddStory(blank_story)
story_set.AddStory(green_story)
self.options.pageset_repeat = 2
self.options.output_formats = []
results = results_options.CreateResults(
EmptyMetadataForTest(), self.options)
story_runner.Run(
_Measurement(), story_set, self.options, results,
metadata=EmptyMetadataForTest())
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
blank_value = list_of_scalar_values.ListOfScalarValues(
blank_story, 'metric', 'unit', [1, 3],
improvement_direction=improvement_direction.UP)
green_value = list_of_scalar_values.ListOfScalarValues(
green_story, 'metric', 'unit', [2, 4],
improvement_direction=improvement_direction.UP)
merged_value = list_of_scalar_values.ListOfScalarValues(
None, 'metric', 'unit',
[1, 3, 2, 4], std=math.sqrt(2), # Pooled standard deviation.
improvement_direction=improvement_direction.UP)
self.assertEquals(4, GetNumberOfSuccessfulPageRuns(results))
self.assertEquals(0, len(results.failures))
self.assertEquals(3, len(values))
self.assertIn(blank_value, values)
self.assertIn(green_value, values)
self.assertIn(merged_value, values)
def testRunStoryDisabledStory(self):
story_set = story_module.StorySet()
story_one = DummyLocalStory(TestSharedPageState, name='one')
story_set.AddStory(story_one)
results = results_options.CreateResults(
EmptyMetadataForTest(), self.options)
story_runner.Run(_Measurement(), story_set, self.options, results,
expectations=_DisableStoryExpectations(),
metadata=EmptyMetadataForTest())
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
self.assertEquals(1, GetNumberOfSuccessfulPageRuns(results))
self.assertEquals(1, GetNumberOfSkippedPageRuns(results))
self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(values))
def testRunStoryOneDisabledOneNot(self):
story_set = story_module.StorySet()
story_one = DummyLocalStory(TestSharedPageState, name='one')
story_two = DummyLocalStory(TestSharedPageState, name='two')
story_set.AddStory(story_one)
story_set.AddStory(story_two)
results = results_options.CreateResults(
EmptyMetadataForTest(), self.options)
story_runner.Run(_Measurement(), story_set, self.options, results,
expectations=_DisableStoryExpectations(),
metadata=EmptyMetadataForTest())
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
self.assertEquals(2, GetNumberOfSuccessfulPageRuns(results))
self.assertEquals(1, GetNumberOfSkippedPageRuns(results))
self.assertEquals(0, len(results.failures))
self.assertEquals(2, len(values))
def testRunStoryDisabledOverriddenByFlag(self):
story_set = story_module.StorySet()
story_one = DummyLocalStory(TestSharedPageState, name='one')
story_set.AddStory(story_one)
self.options.run_disabled_tests = True
results = results_options.CreateResults(
EmptyMetadataForTest(), self.options)
story_runner.Run(_Measurement(), story_set, self.options, results,
expectations=_DisableStoryExpectations(),
metadata=EmptyMetadataForTest())
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
self.assertEquals(1, GetNumberOfSuccessfulPageRuns(results))
self.assertEquals(0, GetNumberOfSkippedPageRuns(results))
self.assertEquals(0, len(results.failures))
self.assertEquals(2, len(values))
def testRunStoryPopulatesHistograms(self):
self.SuppressExceptionFormatting()
story_set = story_module.StorySet()
class Test(legacy_page_test.LegacyPageTest):
def __init__(self, *args):
super(Test, self).__init__(*args)
# pylint: disable=unused-argument
def RunPage(self, _, _2, results):
results.histograms.ImportDicts([
histogram_module.Histogram('hist', 'count').AsDict()])
def ValidateAndMeasurePage(self, page, tab, results):
pass
s1 = DummyLocalStory(TestSharedPageState, name='foo')
story_set.AddStory(s1)
test = Test()
story_runner.Run(
test, story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
hs = self.results.histograms
self.assertEqual(1, len(hs))
h = hs.GetFirstHistogram()
self.assertEqual('hist', h.name)
def testRunStoryAddsDeviceInfo(self):
story_set = story_module.StorySet()
story_set.AddStory(DummyLocalStory(FooStoryState, 'foo', ['bar']))
story_runner.Run(DummyTest(), story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
hs = self.results.histograms
generic_diagnostics = hs.GetSharedDiagnosticsOfType(
histogram_module.GenericSet)
generic_diagnostics_values = [
list(diagnostic) for diagnostic in generic_diagnostics]
self.assertGreater(len(generic_diagnostics), 3)
self.assertIn(['win10'], generic_diagnostics_values)
self.assertIn(['win'], generic_diagnostics_values)
self.assertIn(['amd64'], generic_diagnostics_values)
self.assertIn([8 * (1024 ** 3)], generic_diagnostics_values)
def testRunStoryAddsDeviceInfo_EvenInErrors(self):
class ErrorRaisingDummyLocalStory(DummyLocalStory):
def __init__(self, shared_state_class, name='', tags=None):
if name == '':
name = 'dummy local story'
super(ErrorRaisingDummyLocalStory, self).__init__(
shared_state_class, name=name, tags=tags)
def Run(self, shared_state):
raise BaseException('foo')
@property
def is_local(self):
return True
@property
def url(self):
return 'data:,'
story_set = story_module.StorySet()
story_set.AddStory(ErrorRaisingDummyLocalStory(
FooStoryState, 'foo', ['bar']))
story_runner.Run(DummyTest(), story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
hs = self.results.histograms
generic_diagnostics = hs.GetSharedDiagnosticsOfType(
histogram_module.GenericSet)
generic_diagnostics_values = [
list(diagnostic) for diagnostic in generic_diagnostics]
self.assertGreater(len(generic_diagnostics), 3)
self.assertIn(['win10'], generic_diagnostics_values)
self.assertIn(['win'], generic_diagnostics_values)
self.assertIn(['amd64'], generic_diagnostics_values)
self.assertIn([8 * (1024 ** 3)], generic_diagnostics_values)
def testRunStoryAddsDeviceInfo_OnePerStorySet(self):
class Test(legacy_page_test.LegacyPageTest):
def __init__(self, *args):
super(Test, self).__init__(*args)
# pylint: disable=unused-argument
def RunPage(self, _, _2, results):
results.histograms.ImportDicts([
histogram_module.Histogram('hist', 'count').AsDict()])
def ValidateAndMeasurePage(self, page, tab, results):
pass
story_set = story_module.StorySet()
story_set.AddStory(DummyLocalStory(FooStoryState, 'foo', ['bar']))
story_set.AddStory(DummyLocalStory(FooStoryState, 'abc', ['def']))
story_runner.Run(Test(), story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
hs = self.results.histograms
generic_diagnostics = hs.GetSharedDiagnosticsOfType(
histogram_module.GenericSet)
generic_diagnostics_values = [
list(diagnostic) for diagnostic in generic_diagnostics]
self.assertGreater(len(generic_diagnostics), 3)
self.assertIn(['win10'], generic_diagnostics_values)
self.assertIn(['win'], generic_diagnostics_values)
self.assertIn(['amd64'], generic_diagnostics_values)
self.assertIn([8 * (1024 ** 3)], generic_diagnostics_values)
self.assertEqual(1, len(
[value for value in generic_diagnostics_values if value == ['win']]))
first_histogram_diags = hs.GetFirstHistogram().diagnostics
self.assertIn(reserved_infos.ARCHITECTURES.name, first_histogram_diags)
self.assertIn(reserved_infos.MEMORY_AMOUNTS.name, first_histogram_diags)
self.assertIn(reserved_infos.OS_NAMES.name, first_histogram_diags)
self.assertIn(reserved_infos.OS_VERSIONS.name, first_histogram_diags)
def testRunStoryAddsTagMap(self):
story_set = story_module.StorySet()
story_set.AddStory(DummyLocalStory(FooStoryState, 'foo', ['bar']))
story_runner.Run(DummyTest(), story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
hs = self.results.histograms
tagmap = None
for diagnostic in hs.shared_diagnostics:
if type(diagnostic) == histogram_module.TagMap:
tagmap = diagnostic
break
self.assertIsNotNone(tagmap)
self.assertListEqual(['bar'], tagmap.tags_to_story_names.keys())
self.assertSetEqual(set(['foo']), tagmap.tags_to_story_names['bar'])
def testRunStoryAddsTagMapEvenInFatalException(self):
self.SuppressExceptionFormatting()
story_set = story_module.StorySet()
class UnknownException(Exception):
pass
class Test(legacy_page_test.LegacyPageTest):
def __init__(self, *args):
super(Test, self).__init__(*args)
def RunPage(self, *_):
raise UnknownException('FooBarzException')
def ValidateAndMeasurePage(self, page, tab, results):
pass
s1 = DummyLocalStory(TestSharedPageState, name='foo', tags=['footag'])
s2 = DummyLocalStory(TestSharedPageState, name='bar', tags=['bartag'])
story_set.AddStory(s1)
story_set.AddStory(s2)
test = Test()
with self.assertRaises(UnknownException):
story_runner.Run(
test, story_set, self.options, self.results,
metadata=EmptyMetadataForTest())
self.assertIn('FooBarzException', self.fake_stdout.getvalue())
hs = self.results.histograms
tagmap = None
for diagnostic in hs.shared_diagnostics:
if type(diagnostic) == histogram_module.TagMap:
tagmap = diagnostic
break
self.assertIsNotNone(tagmap)
self.assertSetEqual(
set(['footag', 'bartag']), set(tagmap.tags_to_story_names.keys()))
self.assertSetEqual(set(['foo']), tagmap.tags_to_story_names['footag'])
self.assertSetEqual(set(['bar']), tagmap.tags_to_story_names['bartag'])
@decorators.Disabled('chromeos') # crbug.com/483212
def testUpdateAndCheckArchives(self):
usr_stub = system_stub.Override(story_runner, ['cloud_storage'])
wpr_stub = system_stub.Override(archive_info, ['cloud_storage'])
archive_data_dir = os.path.join(
util.GetTelemetryDir(),
'telemetry', 'internal', 'testing', 'archive_files')
try:
story_set = story_module.StorySet()
story_set.AddStory(page_module.Page(
'http://www.testurl.com', story_set, story_set.base_dir,
name='http://www.testurl.com'))
# Page set missing archive_data_file.
self.assertRaises(
story_runner.ArchiveError,
story_runner._UpdateAndCheckArchives,
story_set.archive_data_file,
story_set.wpr_archive_info,
story_set.stories)
story_set = story_module.StorySet(
archive_data_file='missing_archive_data_file.json')
story_set.AddStory(page_module.Page(
'http://www.testurl.com', story_set, story_set.base_dir,
name='http://www.testurl.com'))
# Page set missing json file specified in archive_data_file.
self.assertRaises(
story_runner.ArchiveError,
story_runner._UpdateAndCheckArchives,
story_set.archive_data_file,
story_set.wpr_archive_info,
story_set.stories)
story_set = story_module.StorySet(
archive_data_file=os.path.join(archive_data_dir, 'test.json'),
cloud_storage_bucket=cloud_storage.PUBLIC_BUCKET)
story_set.AddStory(page_module.Page(
'http://www.testurl.com', story_set, story_set.base_dir,
name='http://www.testurl.com'))
# Page set with valid archive_data_file.
self.assertTrue(
story_runner._UpdateAndCheckArchives(
story_set.archive_data_file, story_set.wpr_archive_info,
story_set.stories))
story_set.AddStory(page_module.Page(
'http://www.google.com', story_set, story_set.base_dir,
name='http://www.google.com'))
# Page set with an archive_data_file which exists but is missing a page.
self.assertRaises(
story_runner.ArchiveError,
story_runner._UpdateAndCheckArchives,
story_set.archive_data_file,
story_set.wpr_archive_info,
story_set.stories)
story_set = story_module.StorySet(
archive_data_file=os.path.join(
archive_data_dir, 'test_missing_wpr_file.json'),
cloud_storage_bucket=cloud_storage.PUBLIC_BUCKET)
story_set.AddStory(page_module.Page(
'http://www.testurl.com', story_set, story_set.base_dir,
name='http://www.testurl.com'))
story_set.AddStory(page_module.Page(
'http://www.google.com', story_set, story_set.base_dir,
name='http://www.google.com'))
# Page set with an archive_data_file which exists and contains all pages
# but fails to find a wpr file.
self.assertRaises(
story_runner.ArchiveError,
story_runner._UpdateAndCheckArchives,
story_set.archive_data_file,
story_set.wpr_archive_info,
story_set.stories)
finally:
usr_stub.Restore()
wpr_stub.Restore()
def _testMaxFailuresOptionIsRespectedAndOverridable(
self, num_failing_stories, runner_max_failures, options_max_failures,
expected_num_failures):
class SimpleSharedState(story_module.SharedState):
_fake_platform = FakePlatform()
_current_story = None
@property
def platform(self):
return self._fake_platform
def WillRunStory(self, story):
self._current_story = story
def RunStory(self, results):
self._current_story.Run(self)
def DidRunStory(self, results):
pass
def CanRunStory(self, story):
return True
def TearDownState(self):
pass
def DumpStateUponFailure(self, story, results):
pass
class FailingStory(story_module.Story):
def __init__(self, name):
super(FailingStory, self).__init__(
shared_state_class=SimpleSharedState,
is_local=True, name=name)
self.was_run = False
def Run(self, shared_state):
self.was_run = True
raise legacy_page_test.Failure
@property
def url(self):
return 'data:,'
self.SuppressExceptionFormatting()
story_set = story_module.StorySet()
for i in range(num_failing_stories):
story_set.AddStory(FailingStory(name='failing%d' % i))
options = _GetOptionForUnittest()
options.output_formats = ['none']
options.suppress_gtest_report = True
if options_max_failures:
options.max_failures = options_max_failures
results = results_options.CreateResults(EmptyMetadataForTest(), options)
story_runner.Run(
DummyTest(), story_set, options,
results, max_failures=runner_max_failures,
metadata=EmptyMetadataForTest())
self.assertEquals(0, GetNumberOfSuccessfulPageRuns(results))
self.assertEquals(expected_num_failures, len(results.failures))
for ii, story in enumerate(story_set.stories):
self.assertEqual(story.was_run, ii < expected_num_failures)
def testMaxFailuresNotSpecified(self):
self._testMaxFailuresOptionIsRespectedAndOverridable(
num_failing_stories=5, runner_max_failures=None,
options_max_failures=None, expected_num_failures=5)
def testMaxFailuresSpecifiedToRun(self):
# Runs up to max_failures+1 failing tests before stopping, since
# every tests after max_failures failures have been encountered
# may all be passing.
self._testMaxFailuresOptionIsRespectedAndOverridable(
num_failing_stories=5, runner_max_failures=3,
options_max_failures=None, expected_num_failures=4)
def testMaxFailuresOption(self):
# Runs up to max_failures+1 failing tests before stopping, since
# every tests after max_failures failures have been encountered
# may all be passing.
self._testMaxFailuresOptionIsRespectedAndOverridable(
num_failing_stories=5, runner_max_failures=3,
options_max_failures=1, expected_num_failures=2)
def _CreateErrorProcessingMock(self, method_exceptions=None,
legacy_test=False):
if legacy_test:
test_class = legacy_page_test.LegacyPageTest
else:
test_class = story_test.StoryTest
root_mock = mock.NonCallableMock(
story=mock.NonCallableMagicMock(story_module.Story),
results=mock.NonCallableMagicMock(page_test_results.PageTestResults),
test=mock.NonCallableMagicMock(test_class),
state=mock.NonCallableMagicMock(
story_module.SharedState,
CanRunStory=mock.Mock(return_value=True)))
if method_exceptions:
root_mock.configure_mock(**{
path + '.side_effect': exception
for path, exception in method_exceptions.iteritems()})
return root_mock
def testRunStoryAndProcessErrorIfNeeded_success(self):
root_mock = self._CreateErrorProcessingMock()
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.test.WillRunStory(root_mock.state.platform),
mock.call.state.WillRunStory(root_mock.story),
mock.call.state.CanRunStory(root_mock.story),
mock.call.state.RunStory(root_mock.results),
mock.call.test.Measure(root_mock.state.platform, root_mock.results),
mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
mock.call.state.DidRunStory(root_mock.results),
])
def testRunStoryAndProcessErrorIfNeeded_successLegacy(self):
root_mock = self._CreateErrorProcessingMock(legacy_test=True)
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.state.WillRunStory(root_mock.story),
mock.call.state.CanRunStory(root_mock.story),
mock.call.state.RunStory(root_mock.results),
mock.call.test.DidRunPage(root_mock.state.platform),
mock.call.state.DidRunStory(root_mock.results),
])
def testRunStoryAndProcessErrorIfNeeded_tryTimeout(self):
root_mock = self._CreateErrorProcessingMock(method_exceptions={
'state.WillRunStory': exceptions.TimeoutException('foo')
})
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.test.WillRunStory(root_mock.state.platform),
mock.call.state.WillRunStory(root_mock.story),
mock.call.state.DumpStateUponFailure(
root_mock.story, root_mock.results),
mock.call.results.AddValue(FailureValueMatcher('foo')),
mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
mock.call.state.DidRunStory(root_mock.results),
])
def testRunStoryAndProcessErrorIfNeeded_tryError(self):
root_mock = self._CreateErrorProcessingMock(method_exceptions={
'state.CanRunStory': exceptions.Error('foo')
})
with self.assertRaisesRegexp(exceptions.Error, 'foo'):
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.test.WillRunStory(root_mock.state.platform),
mock.call.state.WillRunStory(root_mock.story),
mock.call.state.CanRunStory(root_mock.story),
mock.call.state.DumpStateUponFailure(
root_mock.story, root_mock.results),
mock.call.results.AddValue(FailureValueMatcher('foo')),
mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
mock.call.state.DidRunStory(root_mock.results),
])
def testRunStoryAndProcessErrorIfNeeded_tryUnsupportedAction(self):
root_mock = self._CreateErrorProcessingMock(method_exceptions={
'state.RunStory': page_action.PageActionNotSupported('foo')
})
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.test.WillRunStory(root_mock.state.platform),
mock.call.state.WillRunStory(root_mock.story),
mock.call.state.CanRunStory(root_mock.story),
mock.call.state.RunStory(root_mock.results),
mock.call.results.AddValue(SkipValueMatcher()),
mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
mock.call.state.DidRunStory(root_mock.results),
])
def testRunStoryAndProcessErrorIfNeeded_tryUnhandlable(self):
root_mock = self._CreateErrorProcessingMock(method_exceptions={
'test.WillRunStory': Exception('foo')
})
with self.assertRaisesRegexp(Exception, 'foo'):
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.test.WillRunStory(root_mock.state.platform),
mock.call.state.DumpStateUponFailure(
root_mock.story, root_mock.results),
mock.call.results.AddValue(FailureValueMatcher('foo')),
mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
mock.call.state.DidRunStory(root_mock.results),
])
def testRunStoryAndProcessErrorIfNeeded_finallyException(self):
root_mock = self._CreateErrorProcessingMock(method_exceptions={
'state.DidRunStory': Exception('bar')
})
with self.assertRaisesRegexp(Exception, 'bar'):
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.test.WillRunStory(root_mock.state.platform),
mock.call.state.WillRunStory(root_mock.story),
mock.call.state.CanRunStory(root_mock.story),
mock.call.state.RunStory(root_mock.results),
mock.call.test.Measure(root_mock.state.platform, root_mock.results),
mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
mock.call.state.DidRunStory(root_mock.results),
mock.call.state.DumpStateUponFailure(root_mock.story, root_mock.results)
])
def testRunStoryAndProcessErrorIfNeeded_tryTimeout_finallyException(self):
root_mock = self._CreateErrorProcessingMock(method_exceptions={
'state.RunStory': exceptions.TimeoutException('foo'),
'state.DidRunStory': Exception('bar')
})
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.test.WillRunStory(root_mock.state.platform),
mock.call.state.WillRunStory(root_mock.story),
mock.call.state.CanRunStory(root_mock.story),
mock.call.state.RunStory(root_mock.results),
mock.call.state.DumpStateUponFailure(
root_mock.story, root_mock.results),
mock.call.results.AddValue(FailureValueMatcher('foo')),
mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
mock.call.state.DidRunStory(root_mock.results)
])
def testRunStoryAndProcessErrorIfNeeded_tryError_finallyException(self):
root_mock = self._CreateErrorProcessingMock(method_exceptions={
'state.WillRunStory': exceptions.Error('foo'),
'test.DidRunStory': Exception('bar')
})
with self.assertRaisesRegexp(exceptions.Error, 'foo'):
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.test.WillRunStory(root_mock.state.platform),
mock.call.state.WillRunStory(root_mock.story),
mock.call.state.DumpStateUponFailure(
root_mock.story, root_mock.results),
mock.call.results.AddValue(FailureValueMatcher('foo')),
mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results)
])
def testRunStoryAndProcessErrorIfNeeded_tryUnsupportedAction_finallyException(
self):
root_mock = self._CreateErrorProcessingMock(method_exceptions={
'test.WillRunStory': page_action.PageActionNotSupported('foo'),
'state.DidRunStory': Exception('bar')
})
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.test.WillRunStory(root_mock.state.platform),
mock.call.results.AddValue(SkipValueMatcher()),
mock.call.test.DidRunStory(
root_mock.state.platform, root_mock.results),
mock.call.state.DidRunStory(root_mock.results)
])
def testRunStoryAndProcessErrorIfNeeded_tryUnhandlable_finallyException(self):
root_mock = self._CreateErrorProcessingMock(method_exceptions={
'test.Measure': Exception('foo'),
'test.DidRunStory': Exception('bar')
})
with self.assertRaisesRegexp(Exception, 'foo'):
story_runner._RunStoryAndProcessErrorIfNeeded(
root_mock.story, root_mock.results, root_mock.state, root_mock.test)
self.assertEquals(root_mock.method_calls, [
mock.call.test.WillRunStory(root_mock.state.platform),
mock.call.state.WillRunStory(root_mock.story),
mock.call.state.CanRunStory(root_mock.story),
mock.call.state.RunStory(root_mock.results),
mock.call.test.Measure(root_mock.state.platform, root_mock.results),
mock.call.state.DumpStateUponFailure(
root_mock.story, root_mock.results),
mock.call.results.AddValue(FailureValueMatcher('foo')),
mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results)
])
def _GenerateBaseBrowserFinderOptions(self):
options = fakes.CreateBrowserFinderOptions()
options.upload_results = None
options.suppress_gtest_report = False
options.results_label = None
options.reset_results = False
options.use_live_sites = False
options.max_failures = 100
options.pause = None
options.pageset_repeat = 1
options.output_formats = ['chartjson']
options.run_disabled_tests = False
return options
def testRunBenchmarkDisabledBenchmarkViaCanRunonPlatform(self):
fake_benchmark = FakeBenchmark()
fake_benchmark.SUPPORTED_PLATFORMS = []
options = self._GenerateBaseBrowserFinderOptions()
tmp_path = tempfile.mkdtemp()
try:
options.output_dir = tmp_path
story_runner.RunBenchmark(fake_benchmark, options)
with open(os.path.join(tmp_path, 'results-chart.json')) as f:
data = json.load(f)
self.assertFalse(data['enabled'])
finally:
shutil.rmtree(tmp_path)
def testRunBenchmarkDisabledBenchmark(self):
fake_benchmark = FakeBenchmark()
fake_benchmark.disabled = True
options = self._GenerateBaseBrowserFinderOptions()
tmp_path = tempfile.mkdtemp()
try:
options.output_dir = tmp_path
story_runner.RunBenchmark(fake_benchmark, options)
with open(os.path.join(tmp_path, 'results-chart.json')) as f:
data = json.load(f)
self.assertFalse(data['enabled'])
finally:
shutil.rmtree(tmp_path)
def testRunBenchmarkDisabledBenchmarkCanOverriddenByCommandLine(self):
fake_benchmark = FakeBenchmark()
fake_benchmark.disabled = True
options = self._GenerateBaseBrowserFinderOptions()
options.run_disabled_tests = True
temp_path = tempfile.mkdtemp()
try:
options.output_dir = temp_path
story_runner.RunBenchmark(fake_benchmark, options)
with open(os.path.join(temp_path, 'results-chart.json')) as f:
data = json.load(f)
self.assertTrue(data['enabled'])
finally:
shutil.rmtree(temp_path)
def testRunBenchmark_AddsOwners_NoComponent(self):
@benchmark.Owner(emails=['[email protected]'])
class FakeBenchmarkWithOwner(FakeBenchmark):
def __init__(self):
super(FakeBenchmark, self).__init__()
self._disabled = False
self._story_disabled = False
fake_benchmark = FakeBenchmarkWithOwner()
options = self._GenerateBaseBrowserFinderOptions()
options.output_formats = ['histograms']
temp_path = tempfile.mkdtemp()
try:
options.output_dir = temp_path
story_runner.RunBenchmark(fake_benchmark, options)
with open(os.path.join(temp_path, 'histograms.json')) as f:
data = json.load(f)
hs = histogram_set.HistogramSet()
hs.ImportDicts(data)
generic_diagnostics = hs.GetSharedDiagnosticsOfType(
histogram_module.GenericSet)
self.assertGreater(len(generic_diagnostics), 0)
generic_diagnostics_values = [
list(diagnostic) for diagnostic in generic_diagnostics]
self.assertIn(['[email protected]'], generic_diagnostics_values)
finally:
shutil.rmtree(temp_path)
def testRunBenchmark_AddsComponent(self):
@benchmark.Owner(emails=['[email protected]', '[email protected]'],
component='fooBar')
class FakeBenchmarkWithOwner(FakeBenchmark):
def __init__(self):
super(FakeBenchmark, self).__init__()
self._disabled = False
self._story_disabled = False
fake_benchmark = FakeBenchmarkWithOwner()
options = self._GenerateBaseBrowserFinderOptions()
options.output_formats = ['histograms']
temp_path = tempfile.mkdtemp()
try:
options.output_dir = temp_path
story_runner.RunBenchmark(fake_benchmark, options)
with open(os.path.join(temp_path, 'histograms.json')) as f:
data = json.load(f)
hs = histogram_set.HistogramSet()
hs.ImportDicts(data)
generic_diagnostics = hs.GetSharedDiagnosticsOfType(
histogram_module.GenericSet)
self.assertGreater(len(generic_diagnostics), 0)
generic_diagnostics_values = [
list(diagnostic) for diagnostic in generic_diagnostics]
self.assertIn(['fooBar'], generic_diagnostics_values)
self.assertIn(['[email protected]', '[email protected]'],
generic_diagnostics_values)
finally:
shutil.rmtree(temp_path)
def testRunBenchmarkTimeDuration(self):
fake_benchmark = FakeBenchmark()
options = self._GenerateBaseBrowserFinderOptions()
with mock.patch('telemetry.internal.story_runner.time.time') as time_patch:
# 3, because telemetry code asks for the time at some point
time_patch.side_effect = [1, 0, 61]
tmp_path = tempfile.mkdtemp()
try:
options.output_dir = tmp_path
story_runner.RunBenchmark(fake_benchmark, options)
with open(os.path.join(tmp_path, 'results-chart.json')) as f:
data = json.load(f)
self.assertEqual(len(data['charts']), 1)
charts = data['charts']
self.assertIn('benchmark_duration', charts)
duration = charts['benchmark_duration']
self.assertIn("summary", duration)
summary = duration['summary']
duration = summary['value']
self.assertAlmostEqual(duration, 1)
finally:
shutil.rmtree(tmp_path)
def testRunBenchmarkDisabledStoryWithBadName(self):
fake_benchmark = FakeBenchmark()
fake_benchmark.story_disabled = True
options = self._GenerateBaseBrowserFinderOptions()
tmp_path = tempfile.mkdtemp()
try:
options.output_dir = tmp_path
rc = story_runner.RunBenchmark(fake_benchmark, options)
# Test should return 0 since only error messages are logged.
self.assertEqual(rc, 0)
finally:
shutil.rmtree(tmp_path)
def testRunBenchmark_TooManyValues(self):
self.SuppressExceptionFormatting()
story_set = story_module.StorySet()
story_set.AddStory(DummyLocalStory(TestSharedPageState, name='story'))
story_runner.Run(
_Measurement(), story_set, self.options, self.results,
metadata=EmptyMetadataForTest(),
max_num_values=0)
self.assertEquals(1, len(self.results.failures))
self.assertEquals(0, GetNumberOfSuccessfulPageRuns(self.results))
self.assertIn('Too many values: 1 > 0', self.fake_stdout.getvalue())
| [
"[email protected]"
] | |
81eaca22372f4565c7fec498ad3d996d96707f81 | 2ecfe0e10d10513917e4f2770e0a56075404c5d8 | /oldnumba/exttypes/jitclass.py | 2e89053cda7b823f46c4d499735c02ce96b0d72c | [
"BSD-2-Clause"
] | permissive | laserson/numba | 84ab7615ea0177b496a63e2a86319f0b12992cd2 | 35546517b27764a9120f6dfcd82eba7f4dd858cb | refs/heads/master | 2020-05-20T23:13:23.011971 | 2014-12-08T20:16:20 | 2014-12-08T20:16:20 | 16,754,385 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,662 | py | """
Compiling @jit extension classes works as follows:
* Create an extension Numba type holding a symtab
* Capture attribute types in the symtab ...
* ... from the class attributes:
@jit
class Foo(object):
attr = double
* ... from __init__
@jit
class Foo(object):
def __init__(self, attr):
self.attr = double(attr)
* Type infer all methods
* Compile all extension methods
* Process signatures such as @void(double)
* Infer native attributes through type inference on __init__
* Path the extension type with a native attributes struct
* Infer types for all other methods
* Update the ext_type with a vtab type
* Compile all methods
* Create descriptors that wrap the native attributes
* Create an extension type:
{
PyObject_HEAD
...
virtual function table (func **)
native attributes
}
The virtual function table (vtab) is a ctypes structure set as
attribute of the extension types. Objects have a direct pointer
for efficiency.
"""
from numba import typesystem
from numba.exttypes import virtual
from numba.exttypes import signatures
from numba.exttypes import validators
from numba.exttypes import compileclass
from numba.exttypes import ordering
from numba.exttypes import types as etypes
#------------------------------------------------------------------------
# Jit Extension Class Compiler
#------------------------------------------------------------------------
class JitExtensionCompiler(compileclass.ExtensionCompiler):
"""
Compile @jit extension classes.
"""
method_validators = validators.jit_validators
exttype_validators = validators.jit_type_validators
#------------------------------------------------------------------------
# Build Attributes Struct
#------------------------------------------------------------------------
class JitAttributeBuilder(compileclass.AttributeBuilder):
def finalize(self, ext_type):
ext_type.attribute_table.create_attribute_ordering(ordering.extending)
def create_descr(self, attr_name):
"""
Create a descriptor that accesses the attribute on the ctypes struct.
This is set by the extension type constructor __new__.
"""
def _get(self):
return getattr(self._numba_attrs, attr_name)
def _set(self, value):
return setattr(self._numba_attrs, attr_name, value)
return property(_get, _set)
#------------------------------------------------------------------------
# Build Extension Type
#------------------------------------------------------------------------
def create_extension(env, py_class, flags):
"""
Compile an extension class given the NumbaEnvironment and the Python
class that contains the functions that are to be compiled.
"""
flags.pop('llvm_module', None)
# ext_type = etypes.jit_exttype(py_class)
ext_type = typesystem.jit_exttype(py_class)
extension_compiler = JitExtensionCompiler(
env, py_class, dict(vars(py_class)), ext_type, flags,
signatures.JitMethodMaker(),
compileclass.AttributesInheriter(),
compileclass.Filterer(),
JitAttributeBuilder(),
virtual.StaticVTabBuilder(),
compileclass.MethodWrapperBuilder())
extension_compiler.init()
extension_compiler.infer()
extension_compiler.finalize_tables()
extension_compiler.validate()
extension_type = extension_compiler.compile()
return extension_type
| [
"[email protected]"
] | |
e54bf79baf0acedc05e53d70a7ae27e37fdab96a | 59254f1c203bd7ebd3a5d85d5ec31959c1e90182 | /rdis/formalisms/RDIS/primitive2connection.py | a7f32d38fdb66e9533143417be4bde7ef91ff1b1 | [] | no_license | monicadelaine/preop_create | c9c687012a23d99d200d4396237ba69862a285fc | 34dbe0bb8d96d6adcb2c79ac33474007044b65dd | refs/heads/master | 2020-04-11T04:23:20.150444 | 2013-02-22T22:02:51 | 2013-02-22T22:02:51 | 68,144,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,255 | py | """
__primitive2connection.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: pkilgo
Modified: Sun Apr 15 13:18:31 2012
______________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from graph_primitive2connection import *
class primitive2connection(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.graphClass_ = graph_primitive2connection
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.generatedAttributes = { }
self.realOrder = []
self.directEditing = []
def clone(self):
cloneObject = primitive2connection( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <--- Remove this if you want to use QOCA
# Get the high level constraint helper and solver
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
# Constraint only makes sense if there exists 2 objects connected to this link
if(not (self.in_connections_ and self.out_connections_)): return
# Get the graphical objects (subclass of graphEntity/graphLink)
graphicalObjectLink = self.graphObject_
graphicalObjectSource = self.in_connections_[0].graphObject_
graphicalObjectTarget = self.out_connections_[0].graphObject_
objTuple = (graphicalObjectSource, graphicalObjectTarget, graphicalObjectLink)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.LeftExactDistance(objTuple, 20)
oc.resolve() # Resolve immediately after creating entity & constraint
| [
"[email protected]"
] | |
5ad20708bb35bfc132522baadc07ab27a5c61a61 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/pandas/2015/12/test_format.py | 6c4e4dd844fc92b601ab61e33678cf0d74327aa5 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 152,171 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
import re
from pandas.compat import range, zip, lrange, StringIO, PY3, lzip, u
import pandas.compat as compat
import itertools
import os
import sys
from textwrap import dedent
import warnings
from numpy import nan
from numpy.random import randn
import numpy as np
div_style = ''
try:
import IPython
if IPython.__version__ < LooseVersion('3.0.0'):
div_style = ' style="max-width:1500px;overflow:auto;"'
except ImportError:
pass
from pandas import DataFrame, Series, Index, Timestamp, MultiIndex, date_range, NaT
import pandas.core.format as fmt
import pandas.util.testing as tm
import pandas.core.common as com
from pandas.util.terminal import get_terminal_size
import pandas as pd
from pandas.core.config import (set_option, get_option,
option_context, reset_option)
from datetime import datetime
import nose
_frame = DataFrame(tm.getSeriesData())
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split('\n')[0].startswith("<class")
c2 = r.split('\n')[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
nv = len(r.split('\n')) == 6 # 1. <class>, 2. Index, 3. Columns, 4. dtype, 5. memory usage, 6. trailing newline
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line=='...')[0][0]
except:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix,l in enumerate(r.splitlines()):
if not r.split()[cand_col] == '...':
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match('^[\.\ ]+$',row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split('\n'):
if line.endswith('\\'):
return True
return False
class TestDataFrameFormatting(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
module=".*format")
self.frame = _frame.copy()
def tearDown(self):
warnings.filters = self.warn_filters
def test_repr_embedded_ndarray(self):
arr = np.empty(10, dtype=[('err', object)])
for i in range(len(arr)):
arr['err'][i] = np.random.randn(i)
df = DataFrame(arr)
repr(df['err'])
repr(df)
df.to_string()
def test_eng_float_formatter(self):
self.frame.ix[5] = 0
fmt.set_eng_float_format()
repr(self.frame)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(self.frame)
fmt.set_eng_float_format(accuracy=0)
repr(self.frame)
self.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1,columns=range(10),index=range(10))
df.iloc[1,1] = np.nan
def check(null_counts, result):
buf = StringIO()
df.info(buf=buf, null_counts=null_counts)
self.assertTrue(('non-null' in buf.getvalue()) is result)
with option_context('display.max_info_rows',20,'display.max_info_columns',20):
check(None, True)
check(True, True)
check(False, False)
with option_context('display.max_info_rows',5,'display.max_info_columns',5):
check(None, False)
check(True, False)
check(False, False)
def test_repr_tuples(self):
buf = StringIO()
df = DataFrame({'tups': lzip(range(10), range(10))})
repr(df)
df.to_string(col_space=10, buf=buf)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame({'A': np.random.randn(10),
'B': [tm.rands(np.random.randint(max_len - 1,
max_len + 1)) for i in range(10)]})
r = repr(df)
r = r[r.find('\n') + 1:]
adj = fmt._get_adjustment()
for line, value in lzip(r.split('\n'), df['B']):
if adj.len(value) + 1 > max_len:
self.assertIn('...', line)
else:
self.assertNotIn('...', line)
with option_context("display.max_colwidth", 999999):
self.assertNotIn('...', repr(df))
with option_context("display.max_colwidth", max_len + 2):
self.assertNotIn('...', repr(df))
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5],[0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
self.assertEqual(repr(df), ' 0 1\n0 0.1 0.5\n1 0.5 -0.1')
with option_context("display.chop_threshold", 0.2 ):
self.assertEqual(repr(df), ' 0 1\n0 0.0 0.5\n1 0.5 0.0')
with option_context("display.chop_threshold", 0.6 ):
self.assertEqual(repr(df), ' 0 1\n0 0 0\n1 0 0')
with option_context("display.chop_threshold", None ):
self.assertEqual(repr(df), ' 0 1\n0 0.1 0.5\n1 0.5 -0.1')
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items",2000):
self.assertTrue(len(com.pprint_thing(lrange(1000))) > 1000)
with option_context("display.max_seq_items",5):
self.assertTrue(len(com.pprint_thing(lrange(1000))) < 100)
def test_repr_set(self):
self.assertEqual(com.pprint_thing(set([1])), '{1}')
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather then stylized
idx = Index(['a','b'])
res = eval("pd."+repr(idx))
tm.assert_series_equal(Series(res),Series(idx))
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"),
u("\u03c6")]
cols = [u("\u03c8")]
df = DataFrame(data, columns=cols, index=index1)
self.assertTrue(type(df.__repr__()) == str) # both py2 / 3
def test_repr_no_backslash(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(np.random.randn(10, 4))
self.assertTrue('\\' not in repr(df))
def test_expand_frame_repr(self):
df_small = DataFrame('hello', [0], [0])
df_wide = DataFrame('hello', [0], lrange(10))
df_tall = DataFrame('hello', lrange(30), lrange(5))
with option_context('mode.sim_interactive', True):
with option_context('display.max_columns', 10,
'display.width',20,
'display.max_rows', 20,
'display.show_dimensions', True):
with option_context('display.expand_frame_repr', True):
self.assertFalse(has_truncated_repr(df_small))
self.assertFalse(has_expanded_repr(df_small))
self.assertFalse(has_truncated_repr(df_wide))
self.assertTrue(has_expanded_repr(df_wide))
self.assertTrue(has_vertically_truncated_repr(df_tall))
self.assertTrue(has_expanded_repr(df_tall))
with option_context('display.expand_frame_repr', False):
self.assertFalse(has_truncated_repr(df_small))
self.assertFalse(has_expanded_repr(df_small))
self.assertFalse(has_horizontally_truncated_repr(df_wide))
self.assertFalse(has_expanded_repr(df_wide))
self.assertTrue(has_vertically_truncated_repr(df_tall))
self.assertFalse(has_expanded_repr(df_tall))
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame('hello', lrange(1000), lrange(5))
with option_context('mode.sim_interactive', False,
'display.width', 0,
'display.height', 0,
'display.max_rows',5000):
self.assertFalse(has_truncated_repr(df))
self.assertFalse(has_expanded_repr(df))
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
raise nose.SkipTest("terminal size too small, "
"{0} x {1}".format(term_width, term_height))
def mkframe(n):
index = ['%05d' % i for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context('mode.sim_interactive', True):
with option_context('display.width', term_width * 2):
with option_context('display.max_rows', 5,
'display.max_columns', 5):
self.assertFalse(has_expanded_repr(mkframe(4)))
self.assertFalse(has_expanded_repr(mkframe(5)))
self.assertFalse(has_expanded_repr(df6))
self.assertTrue(has_doubly_truncated_repr(df6))
with option_context('display.max_rows', 20,
'display.max_columns', 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
self.assertFalse(has_expanded_repr(df6))
self.assertFalse(has_truncated_repr(df6))
with option_context('display.max_rows', 9,
'display.max_columns', 10):
# out vertical bounds can not result in exanded repr
self.assertFalse(has_expanded_repr(df10))
self.assertTrue(has_vertically_truncated_repr(df10))
# width=None in terminal, auto detection
with option_context('display.max_columns', 100,
'display.max_rows', term_width * 20,
'display.width', None):
df = mkframe((term_width // 7) - 2)
self.assertFalse(has_expanded_repr(df))
df = mkframe((term_width // 7) + 2)
com.pprint_thing(df._repr_fits_horizontal_())
self.assertTrue(has_expanded_repr(df))
def test_str_max_colwidth(self):
# GH 7856
df = pd.DataFrame([{'a': 'foo', 'b': 'bar',
'c': 'uncomfortably long line with lots of stuff',
'd': 1},
{'a': 'foo', 'b': 'bar', 'c': 'stuff', 'd': 1}])
df.set_index(['a', 'b', 'c'])
self.assertTrue(str(df) == ' a b c d\n'
'0 foo bar uncomfortably long line with lots of stuff 1\n'
'1 foo bar stuff 1')
with option_context('max_colwidth', 20):
self.assertTrue(str(df) == ' a b c d\n'
'0 foo bar uncomfortably lo... 1\n'
'1 foo bar stuff 1')
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term widht
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context('mode.sim_interactive', True):
with option_context('max_rows',None):
with option_context('max_columns',None):
# Wrap around with None
self.assertTrue(has_expanded_repr(df))
with option_context('max_rows',0):
with option_context('max_columns',0):
# Truncate with auto detection.
self.assertTrue(has_horizontally_truncated_repr(df))
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context('max_rows',0):
with option_context('max_columns',None):
# Wrap around with None
self.assertTrue(has_expanded_repr(df))
# Truncate vertically
self.assertTrue(has_vertically_truncated_repr(df))
with option_context('max_rows',None):
with option_context('max_columns',0):
self.assertTrue(has_horizontally_truncated_repr(df))
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = [u('\u03c3')] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({'unicode': unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(['abc', u('\u03c3a'), 'aegdvg'])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split('\n')
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except:
pass
if not line.startswith('dtype:'):
self.assertEqual(len(line), line_len)
# it works even if sys.stdin in None
_stdin= sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_to_string_unicode_columns(self):
df = DataFrame({u('\u03c3'): np.arange(10.)})
buf = StringIO()
df.to_string(buf=buf)
buf.getvalue()
buf = StringIO()
df.info(buf=buf)
buf.getvalue()
result = self.frame.to_string()
tm.assertIsInstance(result, compat.text_type)
def test_to_string_utf8_columns(self):
n = u("\u05d0").encode('utf-8')
with option_context('display.max_rows', 1):
df = DataFrame([1, 2], columns=[n])
repr(df)
def test_to_string_unicode_two(self):
dm = DataFrame({u('c/\u03c3'): []})
buf = StringIO()
dm.to_string(buf)
def test_to_string_unicode_three(self):
dm = DataFrame(['\xc2'])
buf = StringIO()
dm.to_string(buf)
def test_to_string_with_formatters(self):
df = DataFrame({'int': [1, 2, 3],
'float': [1.0, 2.0, 3.0],
'object': [(1, 2), True, False]},
columns=['int', 'float', 'object'])
formatters = [('int', lambda x: '0x%x' % x),
('float', lambda x: '[% 4.1f]' % x),
('object', lambda x: '-%s-' % str(x))]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=lzip(*formatters)[1])
self.assertEqual(result, (' int float object\n'
'0 0x1 [ 1.0] -(1, 2)-\n'
'1 0x2 [ 2.0] -True-\n'
'2 0x3 [ 3.0] -False-'))
self.assertEqual(result, result2)
def test_to_string_with_formatters_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
result = df.to_string(formatters={u('c/\u03c3'):
lambda x: '%s' % x})
self.assertEqual(result, u(' c/\u03c3\n') +
'0 1\n1 2\n2 3')
def test_east_asian_unicode_frame(self):
if PY3:
_rep = repr
else:
_rep = unicode
# not alighned properly because of east asian width
# mid col
df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
self.assertEqual(_rep(df), expected)
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na 1 あ\n"
u"bb 222 いいい\nc 33333 う\n"
u"ddd 4 ええええええ")
self.assertEqual(_rep(df), expected)
# all col
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あああああ あ\n"
u"bb い いいい\nc う う\n"
u"ddd えええ ええええええ")
self.assertEqual(_rep(df), expected)
# column name
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" b あああああ\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
self.assertEqual(_rep(df), expected)
# index
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
expected = (u" a b\nあああ あああああ あ\n"
u"いいいいいい い いいい\nうう う う\n"
u"え えええ ええええええ")
self.assertEqual(_rep(df), expected)
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=pd.Index([u'あ', u'い', u'うう', u'え'], name=u'おおおお'))
expected = (u" a b\nおおおお \nあ あああああ あ\n"
u"い い いいい\nうう う う\nえ えええ ええええええ")
self.assertEqual(_rep(df), expected)
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
index=pd.Index([u'あ', u'いいい', u'うう', u'え'], name=u'お'))
expected = (u" あああ いいいいい\nお \nあ あああ あ\n"
u"いいい い いいい\nうう う う\nえ えええええ ええ")
self.assertEqual(_rep(df), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'),
(u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']}, index=idx)
expected = (u" a b\nあ いい あああああ あ\n"
u"う え い いいい\nおおお かかかか う う\n"
u"き くく えええ ええええええ")
self.assertEqual(_rep(df), expected)
# truncate
with option_context('display.max_rows', 3, 'display.max_columns', 3):
df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ'],
'c': [u'お', u'か', u'ききき', u'くくくくくく'],
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
expected = (u" a ... ああああ\n0 あああああ ... さ\n"
u".. ... ... ...\n3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
self.assertEqual(_rep(df), expected)
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
expected = (u" a ... ああああ\nあああ あああああ ... さ\n"
u".. ... ... ...\naaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
self.assertEqual(_rep(df), expected)
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# mid col
df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
self.assertEqual(_rep(df), expected)
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na 1 あ\n"
u"bb 222 いいい\nc 33333 う\n"
u"ddd 4 ええええええ")
self.assertEqual(_rep(df), expected)
# all col
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あああああ あ\n"
u"bb い いいい\nc う う\n"
u"ddd えええ ええええええ""")
self.assertEqual(_rep(df), expected)
# column name
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" b あああああ\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
self.assertEqual(_rep(df), expected)
# index
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
expected = (u" a b\nあああ あああああ あ\n"
u"いいいいいい い いいい\nうう う う\n"
u"え えええ ええええええ")
self.assertEqual(_rep(df), expected)
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=pd.Index([u'あ', u'い', u'うう', u'え'], name=u'おおおお'))
expected = (u" a b\nおおおお \n"
u"あ あああああ あ\nい い いいい\n"
u"うう う う\nえ えええ ええええええ")
self.assertEqual(_rep(df), expected)
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
index=pd.Index([u'あ', u'いいい', u'うう', u'え'], name=u'お'))
expected = (u" あああ いいいいい\nお \n"
u"あ あああ あ\nいいい い いいい\n"
u"うう う う\nえ えええええ ええ")
self.assertEqual(_rep(df), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'),
(u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']}, index=idx)
expected = (u" a b\nあ いい あああああ あ\n"
u"う え い いいい\nおおお かかかか う う\n"
u"き くく えええ ええええええ")
self.assertEqual(_rep(df), expected)
# truncate
with option_context('display.max_rows', 3, 'display.max_columns', 3):
df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ'],
'c': [u'お', u'か', u'ききき', u'くくくくくく'],
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
expected = (u" a ... ああああ\n0 あああああ ... さ\n"
u".. ... ... ...\n3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
self.assertEqual(_rep(df), expected)
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
expected = (u" a ... ああああ\nあああ あああああ ... さ\n"
u"... ... ... ...\naaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
self.assertEqual(_rep(df), expected)
# ambiguous unicode
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'¡¡', u'ええええええ']},
index=['a', 'bb', 'c', '¡¡¡'])
expected = (u" b あああああ\na あ 1\n"
u"bb いいい 222\nc ¡¡ 33333\n"
u"¡¡¡ ええええええ 4")
self.assertEqual(_rep(df), expected)
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({u('c/\u03c3'): Series()})
nonempty = DataFrame({u('c/\u03c3'): Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
self.assertTrue(c10 < c20 < c30)
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
self.assertEqual(len(with_header_row1), len(no_header))
def test_to_string_truncate_indices(self):
for index in [ tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex ]:
for column in [ tm.makeStringIndex ]:
for h in [10,20]:
for w in [10,20]:
with option_context("display.expand_frame_repr",False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
self.assertTrue(has_vertically_truncated_repr(df))
else:
self.assertFalse(has_vertically_truncated_repr(df))
with option_context("display.max_columns", 15):
if w == 20:
self.assertTrue(has_horizontally_truncated_repr(df))
else:
self.assertFalse(has_horizontally_truncated_repr(df))
with option_context("display.max_rows", 15,"display.max_columns", 15):
if h == 20 and w == 20:
self.assertTrue(has_doubly_truncated_repr(df))
else:
self.assertFalse(has_doubly_truncated_repr(df))
def test_to_string_truncate_multilevel(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays,columns=arrays)
with option_context("display.max_rows", 7,"display.max_columns", 7):
self.assertTrue(has_doubly_truncated_repr(df))
def test_to_html_with_col_space(self):
def check_with_width(df, col_space):
import re
# check that col_space affects HTML generation
# and be very brittle about it.
html = df.to_html(col_space=col_space)
hdrs = [x for x in html.split("\n") if re.search("<th[>\s]", x)]
self.assertTrue(len(hdrs) > 0)
for h in hdrs:
self.assertTrue("min-width" in h)
self.assertTrue(str(col_space) in h)
df = DataFrame(np.random.random(size=(1, 3)))
check_with_width(df, 30)
check_with_width(df, 50)
def test_to_html_with_empty_string_label(self):
# GH3547, to_html regards empty string labels as repeated labels
data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}
df = DataFrame(data).set_index(['c1', 'c2'])
res = df.to_html()
self.assertTrue("rowspan" not in res)
def test_to_html_unicode(self):
df = DataFrame({u('\u03c3'): np.arange(10.)})
expected = u'<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th></th>\n <th>\u03c3</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>0</td>\n </tr>\n <tr>\n <th>1</th>\n <td>1</td>\n </tr>\n <tr>\n <th>2</th>\n <td>2</td>\n </tr>\n <tr>\n <th>3</th>\n <td>3</td>\n </tr>\n <tr>\n <th>4</th>\n <td>4</td>\n </tr>\n <tr>\n <th>5</th>\n <td>5</td>\n </tr>\n <tr>\n <th>6</th>\n <td>6</td>\n </tr>\n <tr>\n <th>7</th>\n <td>7</td>\n </tr>\n <tr>\n <th>8</th>\n <td>8</td>\n </tr>\n <tr>\n <th>9</th>\n <td>9</td>\n </tr>\n </tbody>\n</table>'
self.assertEqual(df.to_html(), expected)
df = DataFrame({'A': [u('\u03c3')]})
expected = u'<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th></th>\n <th>A</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>\u03c3</td>\n </tr>\n </tbody>\n</table>'
self.assertEqual(df.to_html(), expected)
def test_to_html_escaped(self):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: "<type 'str'>",
b: "<type 'str'>"},
'co>l2':{a: "<type 'str'>",
b: "<type 'str'>"}}
rs = DataFrame(test_dict).to_html()
xp = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>co<l1</th>
<th>co>l2</th>
</tr>
</thead>
<tbody>
<tr>
<th>str<ing1 &amp;</th>
<td><type 'str'></td>
<td><type 'str'></td>
</tr>
<tr>
<th>stri>ng2 &amp;</th>
<td><type 'str'></td>
<td><type 'str'></td>
</tr>
</tbody>
</table>"""
self.assertEqual(xp, rs)
def test_to_html_escape_disabled(self):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: "<b>bold</b>",
b: "<b>bold</b>"},
'co>l2': {a: "<b>bold</b>",
b: "<b>bold</b>"}}
rs = DataFrame(test_dict).to_html(escape=False)
xp = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>co<l1</th>
<th>co>l2</th>
</tr>
</thead>
<tbody>
<tr>
<th>str<ing1 &</th>
<td><b>bold</b></td>
<td><b>bold</b></td>
</tr>
<tr>
<th>stri>ng2 &</th>
<td><b>bold</b></td>
<td><b>bold</b></td>
</tr>
</tbody>
</table>"""
self.assertEqual(xp, rs)
def test_to_html_multiindex_index_false(self):
# issue 8452
df = DataFrame({
'a': range(2),
'b': range(3, 5),
'c': range(5, 7),
'd': range(3, 5)}
)
df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
result = df.to_html(index=False)
expected = """\
<table border="1" class="dataframe">
<thead>
<tr>
<th colspan="2" halign="left">a</th>
<th colspan="2" halign="left">b</th>
</tr>
<tr>
<th>c</th>
<th>d</th>
<th>c</th>
<th>d</th>
</tr>
</thead>
<tbody>
<tr>
<td>0</td>
<td>3</td>
<td>5</td>
<td>3</td>
</tr>
<tr>
<td>1</td>
<td>4</td>
<td>6</td>
<td>4</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
df.index = Index(df.index.values, name='idx')
result = df.to_html(index=False)
self.assertEqual(result, expected)
def test_to_html_multiindex_sparsify_false_multi_sparse(self):
with option_context('display.multi_sparse', False):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
result = df.to_html()
expected = """\
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th></th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<th>0</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>0</th>
<th>1</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th>1</th>
<th>0</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>1</th>
<th>1</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],
columns=index[::2], index=index)
result = df.to_html()
expected = """\
<table border="1" class="dataframe">
<thead>
<tr>
<th></th>
<th>foo</th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th></th>
<th></th>
<th>0</th>
<th>0</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<th>0</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>0</th>
<th>1</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th>1</th>
<th>0</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>1</th>
<th>1</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
def test_to_html_multiindex_sparsify(self):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
result = df.to_html()
expected = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th></th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th rowspan="2" valign="top">0</th>
<th>0</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>1</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th rowspan="2" valign="top">1</th>
<th>0</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>1</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],
columns=index[::2], index=index)
result = df.to_html()
expected = """\
<table border="1" class="dataframe">
<thead>
<tr>
<th></th>
<th>foo</th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th></th>
<th></th>
<th>0</th>
<th>0</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th rowspan="2" valign="top">0</th>
<th>0</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>1</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th rowspan="2" valign="top">1</th>
<th>0</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>1</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
def test_to_html_index_formatter(self):
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],
columns=['foo', None], index=lrange(4))
f = lambda x: 'abcd'[x]
result = df.to_html(formatters={'__index__': f})
expected = """\
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>foo</th>
<th>None</th>
</tr>
</thead>
<tbody>
<tr>
<th>a</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>b</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th>c</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>d</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
def test_to_html_regression_GH6098(self):
df = DataFrame({u('clé1'): [u('a'), u('a'), u('b'), u('b'), u('a')],
u('clé2'): [u('1er'), u('2ème'), u('1er'), u('2ème'), u('1er')],
'données1': np.random.randn(5),
'données2': np.random.randn(5)})
# it works
df.pivot_table(index=[u('clé1')], columns=[u('clé2')])._repr_html_()
def test_to_html_truncate(self):
raise nose.SkipTest("unreliable on travis")
index = pd.DatetimeIndex(start='20010101',freq='D',periods=20)
df = DataFrame(index=index,columns=range(20))
fmt.set_option('display.max_rows',8)
fmt.set_option('display.max_columns',4)
result = df._repr_html_()
expected = '''\
<div{0}>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>...</th>
<th>18</th>
<th>19</th>
</tr>
</thead>
<tbody>
<tr>
<th>2001-01-01</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-02</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-03</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-04</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>2001-01-17</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-18</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-19</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-20</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
</tbody>
</table>
<p>20 rows × 20 columns</p>
</div>'''.format(div_style)
if compat.PY2:
expected = expected.decode('utf-8')
self.assertEqual(result, expected)
def test_to_html_truncate_multi_index(self):
raise nose.SkipTest("unreliable on travis")
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays,columns=arrays)
fmt.set_option('display.max_rows',7)
fmt.set_option('display.max_columns',7)
result = df._repr_html_()
expected = '''\
<div{0}>
<table border="1" class="dataframe">
<thead>
<tr>
<th></th>
<th></th>
<th colspan="2" halign="left">bar</th>
<th>baz</th>
<th>...</th>
<th>foo</th>
<th colspan="2" halign="left">qux</th>
</tr>
<tr>
<th></th>
<th></th>
<th>one</th>
<th>two</th>
<th>one</th>
<th>...</th>
<th>two</th>
<th>one</th>
<th>two</th>
</tr>
</thead>
<tbody>
<tr>
<th rowspan="2" valign="top">bar</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>baz</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>...</th>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>foo</th>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th rowspan="2" valign="top">qux</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
</tbody>
</table>
<p>8 rows × 8 columns</p>
</div>'''.format(div_style)
if compat.PY2:
expected = expected.decode('utf-8')
self.assertEqual(result, expected)
def test_to_html_truncate_multi_index_sparse_off(self):
raise nose.SkipTest("unreliable on travis")
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays,columns=arrays)
fmt.set_option('display.max_rows',7)
fmt.set_option('display.max_columns',7)
fmt.set_option('display.multi_sparse',False)
result = df._repr_html_()
expected = '''\
<div{0}>
<table border="1" class="dataframe">
<thead>
<tr>
<th></th>
<th></th>
<th>bar</th>
<th>bar</th>
<th>baz</th>
<th>...</th>
<th>foo</th>
<th>qux</th>
<th>qux</th>
</tr>
<tr>
<th></th>
<th></th>
<th>one</th>
<th>two</th>
<th>one</th>
<th>...</th>
<th>two</th>
<th>one</th>
<th>two</th>
</tr>
</thead>
<tbody>
<tr>
<th>bar</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>bar</th>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>baz</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>foo</th>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>qux</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>qux</th>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
</tbody>
</table>
<p>8 rows × 8 columns</p>
</div>'''.format(div_style)
if compat.PY2:
expected = expected.decode('utf-8')
self.assertEqual(result, expected)
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split('\n')
self.assertEqual(len(lines[1]), len(lines[2]))
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({u('c/\u03c3'): Series({'test': np.NaN})})
compat.text_type(dm.to_string())
def test_string_repr_encoding(self):
filepath = tm.get_data_path('unicode_series.csv')
df = pd.read_csv(filepath, header=None, encoding='latin1')
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({'foo': np.inf * np.empty(10)})
repr(df)
def test_frame_info_encoding(self):
index = ['\'Til There Was You (1997)',
'ldum klaka (Cold Fever) (1994)']
fmt.set_option('display.max_rows', 1)
df = DataFrame(columns=['a', 'b', 'c'], index=index)
repr(df)
repr(df.T)
fmt.set_option('display.max_rows', 200)
def test_pprint_thing(self):
from pandas.core.common import pprint_thing as pp_t
if PY3:
raise nose.SkipTest("doesn't work on Python 3")
self.assertEqual(pp_t('a') , u('a'))
self.assertEqual(pp_t(u('a')) , u('a'))
self.assertEqual(pp_t(None) , 'None')
self.assertEqual(pp_t(u('\u05d0'), quote_strings=True),
u("u'\u05d0'"))
self.assertEqual(pp_t(u('\u05d0'), quote_strings=False),
u('\u05d0'))
self.assertEqual(pp_t((u('\u05d0'),
u('\u05d1')), quote_strings=True),
u("(u'\u05d0', u'\u05d1')"))
self.assertEqual(pp_t((u('\u05d0'), (u('\u05d1'),
u('\u05d2'))),
quote_strings=True),
u("(u'\u05d0', (u'\u05d1', u'\u05d2'))"))
self.assertEqual(pp_t(('foo', u('\u05d0'), (u('\u05d0'),
u('\u05d0'))),
quote_strings=True),
u("(u'foo', u'\u05d0', (u'\u05d0', u'\u05d0'))"))
# escape embedded tabs in string
# GH #2038
self.assertTrue(not "\t" in pp_t("a\tb", escape_chars=("\t",)))
def test_wide_repr(self):
with option_context('mode.sim_interactive', True, 'display.show_dimensions', True):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
assert "10 rows x %d columns" % (max_cols - 1) in rep_str
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assertNotEqual(rep_str, wide_repr)
with option_context('display.width', 120):
wider_repr = repr(df)
self.assertTrue(len(wider_repr) < len(wide_repr))
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(randn(5, 3), columns=['a' * 90, 'b' * 90, 'c' * 90])
rep_str = repr(df)
self.assertEqual(len(rep_str.splitlines()), 20)
def test_wide_repr_named(self):
with option_context('mode.sim_interactive', True):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = 'DataFrame Index'
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assertNotEqual(rep_str, wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assertTrue(len(wider_repr) < len(wide_repr))
for line in wide_repr.splitlines()[1::13]:
self.assertIn('DataFrame Index', line)
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex(self):
with option_context('mode.sim_interactive', True):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)),
index=midx)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assertNotEqual(rep_str, wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assertTrue(len(wider_repr) < len(wide_repr))
for line in wide_repr.splitlines()[1::13]:
self.assertIn('Level 0 Level 1', line)
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex_cols(self):
with option_context('mode.sim_interactive', True):
max_cols = get_option('display.max_columns')
midx = MultiIndex.from_arrays(
tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(
tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(tm.rands_array(25, (10, max_cols - 1)),
index=midx, columns=mcols)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assertNotEqual(rep_str, wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assertTrue(len(wider_repr) < len(wide_repr))
reset_option('display.expand_frame_repr')
def test_wide_repr_unicode(self):
with option_context('mode.sim_interactive', True):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assertNotEqual(rep_str, wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assertTrue(len(wider_repr) < len(wide_repr))
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_long_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(
{'a': ['a' * 30, 'b' * 30], 'b': ['c' * 70, 'd' * 80]})
result = repr(df)
self.assertTrue('ccccc' in result)
self.assertTrue('ddddd' in result)
def test_long_series(self):
n = 1000
s = Series(np.random.randint(-50,50,n),index=['s%04d' % x for x in range(n)], dtype='int64')
import re
str_rep = str(s)
nmatches = len(re.findall('dtype',str_rep))
self.assertEqual(nmatches, 1)
def test_index_with_nan(self):
# GH 2850
df = DataFrame({'id1': {0: '1a3', 1: '9h4'}, 'id2': {0: np.nan, 1: 'd67'},
'id3': {0: '78d', 1: '79d'}, 'value': {0: 123, 1: 64}})
# multi-index
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(' value\nid1 id2 id3 \n1a3 NaN 78d 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
# index
y = df.set_index('id2')
result = y.to_string()
expected = u(' id1 id3 value\nid2 \nNaN 1a3 78d 123\nd67 9h4 79d 64')
self.assertEqual(result, expected)
# with append (this failed in 0.12)
y = df.set_index(['id1', 'id2']).set_index('id3', append=True)
result = y.to_string()
expected = u(' value\nid1 id2 id3 \n1a3 NaN 78d 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
# all-nan in mi
df2 = df.copy()
df2.ix[:,'id2'] = np.nan
y = df2.set_index('id2')
result = y.to_string()
expected = u(' id1 id3 value\nid2 \nNaN 1a3 78d 123\nNaN 9h4 79d 64')
self.assertEqual(result, expected)
# partial nan in mi
df2 = df.copy()
df2.ix[:,'id2'] = np.nan
y = df2.set_index(['id2','id3'])
result = y.to_string()
expected = u(' id1 value\nid2 id3 \nNaN 78d 1a3 123\n 79d 9h4 64')
self.assertEqual(result, expected)
df = DataFrame({'id1': {0: np.nan, 1: '9h4'}, 'id2': {0: np.nan, 1: 'd67'},
'id3': {0: np.nan, 1: '79d'}, 'value': {0: 123, 1: 64}})
y = df.set_index(['id1','id2','id3'])
result = y.to_string()
expected = u(' value\nid1 id2 id3 \nNaN NaN NaN 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
def test_to_string(self):
from pandas import read_table
import re
# big mixed
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20,'A'] = nan
biggie.loc[:20,'B'] = nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
self.assertIsNone(retval)
self.assertEqual(buf.getvalue(), s)
tm.assertIsInstance(s, compat.string_types)
# print in right order
result = biggie.to_string(columns=['B', 'A'], col_space=17,
float_format='%.5f'.__mod__)
lines = result.split('\n')
header = lines[0].strip().split()
joined = '\n'.join([re.sub('\s+', ' ', x).strip() for x in lines[1:]])
recons = read_table(StringIO(joined), names=header,
header=None, sep=' ')
tm.assert_series_equal(recons['B'], biggie['B'])
self.assertEqual(recons['A'].count(), biggie['A'].count())
self.assertTrue((np.abs(recons['A'].dropna() -
biggie['A'].dropna()) < 0.1).all())
# expected = ['B', 'A']
# self.assertEqual(header, expected)
result = biggie.to_string(columns=['A'], col_space=17)
header = result.split('\n')[0].strip().split()
expected = ['A']
self.assertEqual(header, expected)
biggie.to_string(columns=['B', 'A'],
formatters={'A': lambda x: '%.1f' % x})
biggie.to_string(columns=['B', 'A'], float_format=str)
biggie.to_string(columns=['B', 'A'], col_space=12,
float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({'x': [1, 2, 3],
'y': [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
self.assertEqual(df_s, expected)
def test_to_string_no_index(self):
df = DataFrame({'x': [1, 2, 3],
'y': [4, 5, 6]})
df_s = df.to_string(index=False)
expected = " x y\n 1 4\n 2 5\n 3 6"
self.assertEqual(df_s, expected)
def test_to_string_float_formatting(self):
self.reset_display_options()
fmt.set_option('display.precision', 5, 'display.column_space',
12, 'display.notebook_repr_html', False)
df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6,
1.7e+8, 1.253456, np.pi, -1e6]})
df_s = df.to_string()
# Python 2.5 just wants me to be sad. And debian 32-bit
# sys.version_info[0] == 2 and sys.version_info[1] < 6:
if _three_digit_exp():
expected = (' x\n0 0.00000e+000\n1 2.50000e-001\n'
'2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n'
'5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n'
'8 -1.00000e+006')
else:
expected = (' x\n0 0.00000e+00\n1 2.50000e-01\n'
'2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n'
'5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n'
'8 -1.00000e+06')
self.assertEqual(df_s, expected)
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string()
expected = (' x\n'
'0 3234.000\n'
'1 0.253')
self.assertEqual(df_s, expected)
self.reset_display_options()
self.assertEqual(get_option("display.precision"), 6)
df = DataFrame({'x': [1e9, 0.2512]})
df_s = df.to_string()
# Python 2.5 just wants me to be sad. And debian 32-bit
# sys.version_info[0] == 2 and sys.version_info[1] < 6:
if _three_digit_exp():
expected = (' x\n'
'0 1.000000e+009\n'
'1 2.512000e-001')
else:
expected = (' x\n'
'0 1.000000e+09\n'
'1 2.512000e-01')
self.assertEqual(df_s, expected)
def test_to_string_small_float_values(self):
df = DataFrame({'a': [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if '%.4g' % 1.7e8 == '1.7e+008':
expected = (' a\n'
'0 1.500000e+000\n'
'1 1.000000e-017\n'
'2 -5.500000e-007')
else:
expected = (' a\n'
'0 1.500000e+00\n'
'1 1.000000e-17\n'
'2 -5.500000e-07')
self.assertEqual(result, expected)
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = (' 0\n'
'0 0\n'
'1 0\n'
'2 -0')
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(lrange(5), index=index)
result = df.to_string()
expected = (' 0\n'
'1.5 0\n'
'2.0 1\n'
'3.0 2\n'
'4.0 3\n'
'5.0 4')
self.assertEqual(result, expected)
def test_to_string_ascii_error(self):
data = [('0 ',
u(' .gitignore '),
u(' 5 '),
' \xe2\x80\xa2\xe2\x80\xa2\xe2\x80'
'\xa2\xe2\x80\xa2\xe2\x80\xa2')]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({'x': [-15, 20, 25, -35]})
self.assertTrue(issubclass(df['x'].dtype.type, np.integer))
output = df.to_string()
expected = (' x\n'
'0 -15\n'
'1 20\n'
'2 25\n'
'3 -35')
self.assertEqual(output, expected)
def test_to_string_index_formatter(self):
df = DataFrame([lrange(5), lrange(5, 10), lrange(10, 15)])
rs = df.to_string(formatters={'__index__': lambda x: 'abc'[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
self.assertEqual(rs, xp)
def test_to_string_left_justify_cols(self):
self.reset_display_options()
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string(justify='left')
expected = (' x \n'
'0 3234.000\n'
'1 0.253')
self.assertEqual(df_s, expected)
def test_to_string_format_na(self):
self.reset_display_options()
df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0000 foo\n'
'2 -2.1234 foooo\n'
'3 3.0000 fooooo\n'
'4 4.0000 bar')
self.assertEqual(result, expected)
df = DataFrame({'A': [np.nan, -1., -2., 3., 4.],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1 foo\n'
'2 -2 foooo\n'
'3 3 fooooo\n'
'4 4 bar')
self.assertEqual(result, expected)
def test_to_string_line_width(self):
df = DataFrame(123, lrange(10, 15), lrange(30))
s = df.to_string(line_width=80)
self.assertEqual(max(len(l) for l in s.split('\n')), 80)
def test_show_dimensions(self):
df = DataFrame(123, lrange(10, 15), lrange(30))
with option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.width',
500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', True):
self.assertTrue('5 rows' in str(df))
self.assertTrue('5 rows' in df._repr_html_())
with option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.width',
500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', False):
self.assertFalse('5 rows' in str(df))
self.assertFalse('5 rows' in df._repr_html_())
with option_context('display.max_rows', 2, 'display.max_columns', 2, 'display.width',
500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', 'truncate'):
self.assertTrue('5 rows' in str(df))
self.assertTrue('5 rows' in df._repr_html_())
with option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.width',
500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', 'truncate'):
self.assertFalse('5 rows' in str(df))
self.assertFalse('5 rows' in df._repr_html_())
def test_to_html(self):
# big mixed
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20,'A'] = nan
biggie.loc[:20,'B'] = nan
s = biggie.to_html()
buf = StringIO()
retval = biggie.to_html(buf=buf)
self.assertIsNone(retval)
self.assertEqual(buf.getvalue(), s)
tm.assertIsInstance(s, compat.string_types)
biggie.to_html(columns=['B', 'A'], col_space=17)
biggie.to_html(columns=['B', 'A'],
formatters={'A': lambda x: '%.1f' % x})
biggie.to_html(columns=['B', 'A'], float_format=str)
biggie.to_html(columns=['B', 'A'], col_space=12,
float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_html()
def test_to_html_filename(self):
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20,'A'] = nan
biggie.loc[:20,'B'] = nan
with tm.ensure_clean('test.html') as path:
biggie.to_html(path)
with open(path, 'r') as f:
s = biggie.to_html()
s2 = f.read()
self.assertEqual(s, s2)
frame = DataFrame(index=np.arange(200))
with tm.ensure_clean('test.html') as path:
frame.to_html(path)
with open(path, 'r') as f:
self.assertEqual(frame.to_html(), f.read())
def test_to_html_with_no_bold(self):
x = DataFrame({'x': randn(5)})
ashtml = x.to_html(bold_rows=False)
self.assertFalse('<strong' in ashtml[ashtml.find("</thead>")])
def test_to_html_columns_arg(self):
result = self.frame.to_html(columns=['A'])
self.assertNotIn('<th>B</th>', result)
def test_to_html_multiindex(self):
columns = MultiIndex.from_tuples(list(zip(np.arange(2).repeat(2),
np.mod(lrange(4), 2))),
names=['CL0', 'CL1'])
df = DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify='left')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr>\n'
' <th>CL0</th>\n'
' <th colspan="2" halign="left">0</th>\n'
' <th colspan="2" halign="left">1</th>\n'
' </tr>\n'
' <tr>\n'
' <th>CL1</th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td>a</td>\n'
' <td>b</td>\n'
' <td>c</td>\n'
' <td>d</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td>e</td>\n'
' <td>f</td>\n'
' <td>g</td>\n'
' <td>h</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
columns = MultiIndex.from_tuples(list(zip(range(4),
np.mod(lrange(4), 2))))
df = DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify='right')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr>\n'
' <th></th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' <th>2</th>\n'
' <th>3</th>\n'
' </tr>\n'
' <tr>\n'
' <th></th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td>a</td>\n'
' <td>b</td>\n'
' <td>c</td>\n'
' <td>d</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td>e</td>\n'
' <td>f</td>\n'
' <td>g</td>\n'
' <td>h</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
def test_to_html_justify(self):
df = DataFrame({'A': [6, 30000, 2],
'B': [1, 2, 70000],
'C': [223442, 0, 1]},
columns=['A', 'B', 'C'])
result = df.to_html(justify='left')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: left;">\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td>6</td>\n'
' <td>1</td>\n'
' <td>223442</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td>30000</td>\n'
' <td>2</td>\n'
' <td>0</td>\n'
' </tr>\n'
' <tr>\n'
' <th>2</th>\n'
' <td>2</td>\n'
' <td>70000</td>\n'
' <td>1</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
result = df.to_html(justify='right')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td>6</td>\n'
' <td>1</td>\n'
' <td>223442</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td>30000</td>\n'
' <td>2</td>\n'
' <td>0</td>\n'
' </tr>\n'
' <tr>\n'
' <th>2</th>\n'
' <td>2</td>\n'
' <td>70000</td>\n'
' <td>1</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
def test_to_html_index(self):
index = ['foo', 'bar', 'baz']
df = DataFrame({'A': [1, 2, 3],
'B': [1.2, 3.4, 5.6],
'C': ['one', 'two', np.NaN]},
columns=['A', 'B', 'C'],
index=index)
expected_with_index = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>foo</th>\n'
' <td>1</td>\n'
' <td>1.2</td>\n'
' <td>one</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bar</th>\n'
' <td>2</td>\n'
' <td>3.4</td>\n'
' <td>two</td>\n'
' </tr>\n'
' <tr>\n'
' <th>baz</th>\n'
' <td>3</td>\n'
' <td>5.6</td>\n'
' <td>NaN</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(df.to_html(), expected_with_index)
expected_without_index = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <td>1</td>\n'
' <td>1.2</td>\n'
' <td>one</td>\n'
' </tr>\n'
' <tr>\n'
' <td>2</td>\n'
' <td>3.4</td>\n'
' <td>two</td>\n'
' </tr>\n'
' <tr>\n'
' <td>3</td>\n'
' <td>5.6</td>\n'
' <td>NaN</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
result = df.to_html(index=False)
for i in index:
self.assertNotIn(i, result)
self.assertEqual(result, expected_without_index)
df.index = Index(['foo', 'bar', 'baz'], name='idx')
expected_with_index = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' <tr>\n'
' <th>idx</th>\n'
' <th></th>\n'
' <th></th>\n'
' <th></th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>foo</th>\n'
' <td>1</td>\n'
' <td>1.2</td>\n'
' <td>one</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bar</th>\n'
' <td>2</td>\n'
' <td>3.4</td>\n'
' <td>two</td>\n'
' </tr>\n'
' <tr>\n'
' <th>baz</th>\n'
' <td>3</td>\n'
' <td>5.6</td>\n'
' <td>NaN</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(df.to_html(), expected_with_index)
self.assertEqual(df.to_html(index=False), expected_without_index)
tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]
df.index = MultiIndex.from_tuples(tuples)
expected_with_index = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th rowspan="2" valign="top">foo</th>\n'
' <th>car</th>\n'
' <td>1</td>\n'
' <td>1.2</td>\n'
' <td>one</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bike</th>\n'
' <td>2</td>\n'
' <td>3.4</td>\n'
' <td>two</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bar</th>\n'
' <th>car</th>\n'
' <td>3</td>\n'
' <td>5.6</td>\n'
' <td>NaN</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(df.to_html(), expected_with_index)
result = df.to_html(index=False)
for i in ['foo', 'bar', 'car', 'bike']:
self.assertNotIn(i, result)
# must be the same result as normal index
self.assertEqual(result, expected_without_index)
df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2'])
expected_with_index = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' <tr>\n'
' <th>idx1</th>\n'
' <th>idx2</th>\n'
' <th></th>\n'
' <th></th>\n'
' <th></th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th rowspan="2" valign="top">foo</th>\n'
' <th>car</th>\n'
' <td>1</td>\n'
' <td>1.2</td>\n'
' <td>one</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bike</th>\n'
' <td>2</td>\n'
' <td>3.4</td>\n'
' <td>two</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bar</th>\n'
' <th>car</th>\n'
' <td>3</td>\n'
' <td>5.6</td>\n'
' <td>NaN</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(df.to_html(), expected_with_index)
self.assertEqual(df.to_html(index=False), expected_without_index)
def test_repr_html(self):
self.frame._repr_html_()
fmt.set_option('display.max_rows', 1, 'display.max_columns', 1)
self.frame._repr_html_()
fmt.set_option('display.notebook_repr_html', False)
self.frame._repr_html_()
self.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option('display.show_dimensions', True)
self.assertTrue('2 rows' in df._repr_html_())
fmt.set_option('display.show_dimensions', False)
self.assertFalse('2 rows' in df._repr_html_())
self.reset_display_options()
def test_repr_html_wide(self):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
reg_repr = df._repr_html_()
assert "..." not in reg_repr
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
wide_repr = wide_df._repr_html_()
assert "..." in wide_repr
def test_repr_html_wide_multiindex_cols(self):
max_cols = get_option('display.max_columns')
mcols = MultiIndex.from_product([np.arange(max_cols//2),
['foo', 'bar']],
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
reg_repr = df._repr_html_()
assert '...' not in reg_repr
mcols = MultiIndex.from_product((np.arange(1+(max_cols//2)),
['foo', 'bar']),
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
wide_repr = df._repr_html_()
assert '...' in wide_repr
def test_repr_html_long(self):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)})
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)})
long_repr = df._repr_html_()
assert '..' in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert u('%d rows ') % h in long_repr
assert u('2 columns') in long_repr
def test_repr_html_float(self):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx')
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert str(40 + h) in reg_repr
h = max_rows + 1
df = DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx')
long_repr = df._repr_html_()
assert '..' in long_repr
assert '31' not in long_repr
assert u('%d rows ') % h in long_repr
assert u('2 columns') in long_repr
def test_repr_html_long_multiindex(self):
max_rows = get_option('display.max_rows')
max_L1 = max_rows//2
tuples = list(itertools.product(np.arange(max_L1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn(max_L1*2, 2), index=idx,
columns=['A', 'B'])
reg_repr = df._repr_html_()
assert '...' not in reg_repr
tuples = list(itertools.product(np.arange(max_L1+1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn((max_L1+1)*2, 2), index=idx,
columns=['A', 'B'])
long_repr = df._repr_html_()
assert '...' in long_repr
def test_repr_html_long_and_wide(self):
max_cols = get_option('display.max_columns')
max_rows = get_option('display.max_rows')
h, w = max_rows-1, max_cols-1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert '...' not in df._repr_html_()
h, w = max_rows+1, max_cols+1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert '...' in df._repr_html_()
def test_info_repr(self):
max_rows = get_option('display.max_rows')
max_cols = get_option('display.max_columns')
# Long
h, w = max_rows+1, max_cols-1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert has_vertically_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
# Wide
h, w = max_rows-1, max_cols+1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert has_horizontally_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(randn(10, 5))
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 4):
self.assertTrue(has_non_verbose_info_repr(df))
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 5):
self.assertFalse(has_non_verbose_info_repr(df))
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = get_option('display.max_rows')
max_cols = get_option('display.max_columns')
# Long
h, w = max_rows+1, max_cols-1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert r'<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert r'<class' in df._repr_html_()
# Wide
h, w = max_rows-1, max_cols+1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert '<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert '<class' in df._repr_html_()
def test_fake_qtconsole_repr_html(self):
def get_ipython():
return {'config':
{'KernelApp':
{'parent_appname': 'ipython-qtconsole'}}}
repstr = self.frame._repr_html_()
self.assertIsNotNone(repstr)
fmt.set_option('display.max_rows', 5, 'display.max_columns', 2)
repstr = self.frame._repr_html_()
self.assertIn('class', repstr) # info fallback
self.reset_display_options()
def test_to_html_with_classes(self):
df = DataFrame()
result = df.to_html(classes="sortable draggable")
expected = dedent("""
<table border="1" class="dataframe sortable draggable">
<thead>
<tr style="text-align: right;">
<th></th>
</tr>
</thead>
<tbody>
</tbody>
</table>
""").strip()
self.assertEqual(result, expected)
result = df.to_html(classes=["sortable", "draggable"])
self.assertEqual(result, expected)
def test_pprint_pathological_object(self):
"""
if the test fails, the stack will overflow and nose crash,
but it won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't dine
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
skip = True
for line in repr(DataFrame({'A': vals})).split('\n')[:-2]:
if line.startswith('dtype:'):
continue
if _three_digit_exp():
self.assertTrue(('+010' in line) or skip)
else:
self.assertTrue(('+10' in line) or skip)
skip = False
def test_dict_entries(self):
df = DataFrame({'A': [{'a': 1, 'b': 2}]})
val = df.to_string()
self.assertTrue("'a': 1" in val)
self.assertTrue("'b': 2" in val)
def test_to_latex_filename(self):
with tm.ensure_clean('test.tex') as path:
self.frame.to_latex(path)
with open(path, 'r') as f:
self.assertEqual(self.frame.to_latex(), f.read())
def test_to_latex(self):
# it works!
self.frame.to_latex()
df = DataFrame({'a': [1, 2],
'b': ['b1', 'b2']})
withindex_result = df.to_latex()
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withindex_result, withindex_expected)
withoutindex_result = df.to_latex(index=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
a & b \\
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withoutindex_result, withoutindex_expected)
def test_to_latex_format(self):
# GH Bug #9402
self.frame.to_latex(column_format='ccc')
df = DataFrame({'a': [1, 2],
'b': ['b1', 'b2']})
withindex_result = df.to_latex(column_format='ccc')
withindex_expected = r"""\begin{tabular}{ccc}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withindex_result, withindex_expected)
def test_to_latex_multiindex(self):
df = DataFrame({('x', 'y'): ['a']})
result = df.to_latex()
expected = r"""\begin{tabular}{ll}
\toprule
{} & x \\
{} & y \\
\midrule
0 & a \\
\bottomrule
\end{tabular}
"""
self.assertEqual(result, expected)
result = df.T.to_latex()
expected = r"""\begin{tabular}{lll}
\toprule
& & 0 \\
\midrule
x & y & a \\
\bottomrule
\end{tabular}
"""
self.assertEqual(result, expected)
df = DataFrame.from_dict({
('c1', 0): pd.Series(dict((x, x) for x in range(4))),
('c1', 1): pd.Series(dict((x, x + 4) for x in range(4))),
('c2', 0): pd.Series(dict((x, x) for x in range(4))),
('c2', 1): pd.Series(dict((x, x + 4) for x in range(4))),
('c3', 0): pd.Series(dict((x, x) for x in range(4))),
}).T
result = df.to_latex()
expected = r"""\begin{tabular}{llrrrr}
\toprule
& & 0 & 1 & 2 & 3 \\
\midrule
c1 & 0 & 0 & 1 & 2 & 3 \\
& 1 & 4 & 5 & 6 & 7 \\
c2 & 0 & 0 & 1 & 2 & 3 \\
& 1 & 4 & 5 & 6 & 7 \\
c3 & 0 & 0 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(result, expected)
# GH 10660
df = pd.DataFrame({'a':[0,0,1,1], 'b':list('abab'), 'c':[1,2,3,4]})
result = df.set_index(['a', 'b']).to_latex()
expected = r"""\begin{tabular}{llr}
\toprule
& & c \\
a & b & \\
\midrule
0 & a & 1 \\
& b & 2 \\
1 & a & 3 \\
& b & 4 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(result, expected)
result = df.groupby('a').describe().to_latex()
expected = r"""\begin{tabular}{llr}
\toprule
& & c \\
a & {} & \\
\midrule
0 & count & 2.000000 \\
& mean & 1.500000 \\
& std & 0.707107 \\
& min & 1.000000 \\
& 25\% & 1.250000 \\
& 50\% & 1.500000 \\
& 75\% & 1.750000 \\
& max & 2.000000 \\
1 & count & 2.000000 \\
& mean & 3.500000 \\
& std & 0.707107 \\
& min & 3.000000 \\
& 25\% & 3.250000 \\
& 50\% & 3.500000 \\
& 75\% & 3.750000 \\
& max & 4.000000 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(result, expected)
def test_to_latex_escape(self):
a = 'a'
b = 'b'
test_dict = {u('co^l1') : {a: "a",
b: "b"},
u('co$e^x$'): {a: "a",
b: "b"}}
unescaped_result = DataFrame(test_dict).to_latex(escape=False)
escaped_result = DataFrame(test_dict).to_latex() # default: escape=True
unescaped_expected = r'''\begin{tabular}{lll}
\toprule
{} & co$e^x$ & co^l1 \\
\midrule
a & a & a \\
b & b & b \\
\bottomrule
\end{tabular}
'''
escaped_expected = r'''\begin{tabular}{lll}
\toprule
{} & co\$e\textasciicircumx\$ & co\textasciicircuml1 \\
\midrule
a & a & a \\
b & b & b \\
\bottomrule
\end{tabular}
'''
self.assertEqual(unescaped_result, unescaped_expected)
self.assertEqual(escaped_result, escaped_expected)
def test_to_latex_longtable(self):
self.frame.to_latex(longtable=True)
df = DataFrame({'a': [1, 2],
'b': ['b1', 'b2']})
withindex_result = df.to_latex(longtable=True)
withindex_expected = r"""\begin{longtable}{lrl}
\toprule
{} & a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
self.assertEqual(withindex_result, withindex_expected)
withoutindex_result = df.to_latex(index=False, longtable=True)
withoutindex_expected = r"""\begin{longtable}{rl}
\toprule
a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
1 & b1 \\
2 & b2 \\
\end{longtable}
"""
self.assertEqual(withoutindex_result, withoutindex_expected)
def test_to_latex_escape_special_chars(self):
special_characters = ['&','%','$','#','_',
'{','}','~','^','\\']
df = DataFrame(data=special_characters)
observed = df.to_latex()
expected = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & \& \\
1 & \% \\
2 & \$ \\
3 & \# \\
4 & \_ \\
5 & \{ \\
6 & \} \\
7 & \textasciitilde \\
8 & \textasciicircum \\
9 & \textbackslash \\
\bottomrule
\end{tabular}
"""
self.assertEqual(observed, expected)
def test_to_latex_no_header(self):
# GH 7124
df = DataFrame({'a': [1, 2],
'b': ['b1', 'b2']})
withindex_result = df.to_latex(header=False)
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withindex_result, withindex_expected)
withoutindex_result = df.to_latex(index=False, header=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withoutindex_result, withoutindex_expected)
def test_to_csv_quotechar(self):
df = DataFrame({'col' : [1,2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, engine='python')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$")
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$", engine='python')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
with tm.assertRaisesRegexp(TypeError, 'quotechar'):
df.to_csv(path, quoting=1, quotechar=None)
with tm.ensure_clean('test.csv') as path:
with tm.assertRaisesRegexp(TypeError, 'quotechar'):
df.to_csv(path, quoting=1, quotechar=None, engine='python')
def test_to_csv_doublequote(self):
df = DataFrame({'col' : ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a""a"
"1","""bb"""
'''
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True, engine='python')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
from _csv import Error
with tm.ensure_clean('test.csv') as path:
with tm.assertRaisesRegexp(Error, 'escapechar'):
df.to_csv(path, doublequote=False) # no escapechar set
with tm.ensure_clean('test.csv') as path:
with tm.assertRaisesRegexp(Error, 'escapechar'):
df.to_csv(path, doublequote=False, engine='python')
def test_to_csv_escapechar(self):
df = DataFrame({'col' : ['a"a', '"bb"']})
expected = """\
"","col"
"0","a\\"a"
"1","\\"bb\\""
"""
with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\',
engine='python')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
df = DataFrame({'col' : ['a,a', ',bb,']})
expected = """\
,col
0,a\\,a
1,\\,bb\\,
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\', engine='python')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
def test_csv_to_string(self):
df = DataFrame({'col' : [1,2]})
expected = ',col\n0,1\n1,2\n'
self.assertEqual(df.to_csv(), expected)
def test_to_csv_decimal(self):
# GH 781
df = DataFrame({'col1' : [1], 'col2' : ['a'], 'col3' : [10.1] })
expected_default = ',col1,col2,col3\n0,1,a,10.1\n'
self.assertEqual(df.to_csv(), expected_default)
expected_european_excel = ';col1;col2;col3\n0;1;a;10,1\n'
self.assertEqual(df.to_csv(decimal=',',sep=';'), expected_european_excel)
expected_float_format_default = ',col1,col2,col3\n0,1,a,10.10\n'
self.assertEqual(df.to_csv(float_format = '%.2f'), expected_float_format_default)
expected_float_format = ';col1;col2;col3\n0;1;a;10,10\n'
self.assertEqual(df.to_csv(decimal=',',sep=';', float_format = '%.2f'), expected_float_format)
# GH 11553: testing if decimal is taken into account for '0.0'
df = pd.DataFrame({'a': [0, 1.1], 'b': [2.2, 3.3], 'c': 1})
expected = 'a,b,c\n0^0,2^2,1\n1^1,3^3,1\n'
self.assertEqual(
df.to_csv(index=False, decimal='^'), expected)
# same but for an index
self.assertEqual(
df.set_index('a').to_csv(decimal='^'), expected)
# same for a multi-index
self.assertEqual(
df.set_index(['a', 'b']).to_csv(decimal="^"), expected)
def test_to_csv_float_format(self):
# testing if float_format is taken into account for the index
# GH 11553
df = pd.DataFrame({'a': [0, 1], 'b': [2.2, 3.3], 'c': 1})
expected = 'a,b,c\n0,2.20,1\n1,3.30,1\n'
self.assertEqual(
df.set_index('a').to_csv(float_format='%.2f'), expected)
# same for a multi-index
self.assertEqual(
df.set_index(['a', 'b']).to_csv(float_format='%.2f'), expected)
def test_to_csv_na_rep(self):
# testing if NaN values are correctly represented in the index
# GH 11553
df = DataFrame({'a': [0, np.NaN], 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n0.0,0,2\n_,1,3\n"
self.assertEqual(df.set_index('a').to_csv(na_rep='_'), expected)
self.assertEqual(df.set_index(['a', 'b']).to_csv(na_rep='_'), expected)
# now with an index containing only NaNs
df = DataFrame({'a': np.NaN, 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n_,0,2\n_,1,3\n"
self.assertEqual(df.set_index('a').to_csv(na_rep='_'), expected)
self.assertEqual(df.set_index(['a', 'b']).to_csv(na_rep='_'), expected)
# check if na_rep parameter does not break anything when no NaN
df = DataFrame({'a': 0, 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n0,0,2\n0,1,3\n"
self.assertEqual(df.set_index('a').to_csv(na_rep='_'), expected)
self.assertEqual(df.set_index(['a', 'b']).to_csv(na_rep='_'), expected)
def test_to_csv_date_format(self):
# GH 10209
df_sec = DataFrame({'A': pd.date_range('20130101',periods=5,freq='s')})
df_day = DataFrame({'A': pd.date_range('20130101',periods=5,freq='d')})
expected_default_sec = ',A\n0,2013-01-01 00:00:00\n1,2013-01-01 00:00:01\n2,2013-01-01 00:00:02' + \
'\n3,2013-01-01 00:00:03\n4,2013-01-01 00:00:04\n'
self.assertEqual(df_sec.to_csv(), expected_default_sec)
expected_ymdhms_day = ',A\n0,2013-01-01 00:00:00\n1,2013-01-02 00:00:00\n2,2013-01-03 00:00:00' + \
'\n3,2013-01-04 00:00:00\n4,2013-01-05 00:00:00\n'
self.assertEqual(df_day.to_csv(date_format='%Y-%m-%d %H:%M:%S'), expected_ymdhms_day)
expected_ymd_sec = ',A\n0,2013-01-01\n1,2013-01-01\n2,2013-01-01\n3,2013-01-01\n4,2013-01-01\n'
self.assertEqual(df_sec.to_csv(date_format='%Y-%m-%d'), expected_ymd_sec)
expected_default_day = ',A\n0,2013-01-01\n1,2013-01-02\n2,2013-01-03\n3,2013-01-04\n4,2013-01-05\n'
self.assertEqual(df_day.to_csv(), expected_default_day)
self.assertEqual(df_day.to_csv(date_format='%Y-%m-%d'), expected_default_day)
# testing if date_format parameter is taken into account for
# multi-indexed dataframes (GH 7791)
df_sec['B'] = 0
df_sec['C'] = 1
expected_ymd_sec = 'A,B,C\n2013-01-01,0,1\n'
df_sec_grouped = df_sec.groupby([pd.Grouper(key='A', freq='1h'), 'B'])
self.assertEqual(
df_sec_grouped.mean().to_csv(date_format='%Y-%m-%d'),
expected_ymd_sec
)
# deprecation GH11274
def test_to_csv_engine_kw_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
df = DataFrame({'col1' : [1], 'col2' : ['a'], 'col3' : [10.1] })
df.to_csv(engine='python')
class TestSeriesFormatting(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series([u('\u03c3')] * 10)
repr(s)
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
self.assertIsNone(retval)
self.assertEqual(buf.getvalue().strip(), s)
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')[:-1]]
expected = [format(x) for x in self.ts]
self.assertEqual(result, expected)
# empty string
result = self.ts[:0].to_string()
self.assertEqual(result, 'Series([], Freq: B)')
result = self.ts[:0].to_string(length=0)
self.assertEqual(result, 'Series([], Freq: B)')
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split('\n')[-1].strip()
self.assertEqual(last_line, "Freq: B, Name: foo, Length: %d, dtype: float64" % len(cp))
def test_freq_name_separation(self):
s = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10), name=0)
result = repr(s)
self.assertTrue('Freq: D, Name: 0' in result)
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = (u('0 foo\n') +
u('1 NaN\n') +
u('2 -1.23\n') +
u('3 4.56'))
self.assertEqual(result, expected)
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') +
'1 NaN\n' +
'2 bar\n' +
'3 baz')
self.assertEqual(result, expected)
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') +
'1 5\n' +
'2 bar\n' +
'3 baz')
self.assertEqual(result, expected)
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = (u('0 NaN\n') +
'1 1.5678\n' +
'2 NaN\n' +
'3 -3.0000\n' +
'4 NaN')
self.assertEqual(result, expected)
def test_to_string_without_index(self):
#GH 11729 Test index=False option
s= Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = (u(' 1\n') +
' 2\n' +
' 3\n' +
' 4')
self.assertEqual(result, expected)
def test_unicode_name_in_footer(self):
s = Series([1, 2], name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf = fmt.SeriesFormatter(s, name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
if PY3:
_rep = repr
else:
_rep = unicode
# not alighned properly because of east asian width
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=[u'あ', u'いい', u'ううう', u'ええええ'])
expected = (u"あ a\nいい bb\nううう CCC\n"
u"ええええ D\ndtype: object")
self.assertEqual(_rep(s), expected)
# unicode values
s = Series([u'あ', u'いい', u'ううう', u'ええええ'], index=['a', 'bb', 'c', 'ddd'])
expected = (u"a あ\nbb いい\nc ううう\n"
u"ddd ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'],
name=u'おおおおおおお')
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'),
(u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = (u"あ いい 1\nう え 22\nおおお かかかか 3333\n"
u"き くく 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])
expected = (u"1 1\nAB 22\nNaN 3333\n"
u"あああ 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
expected = (u"1 1\nAB 22\n"
u"2011-01-01 00:00:00 3333\nあああ 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# truncate
with option_context('display.max_rows', 3):
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
expected = (u"ああ あ\n ... \n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=[u'あ', u'いい', u'ううう', u'ええええ'])
expected = (u"あ a\nいい bb\nううう CCC\n"
u"ええええ D\ndtype: object")
self.assertEqual(_rep(s), expected)
# unicode values
s = Series([u'あ', u'いい', u'ううう', u'ええええ'], index=['a', 'bb', 'c', 'ddd'])
expected = (u"a あ\nbb いい\nc ううう\n"
u"ddd ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'],
name=u'おおおおおおお')
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'),
(u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = (u"あ いい 1\nう え 22\nおおお かかかか 3333\n"
u"き くく 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])
expected = (u"1 1\nAB 22\nNaN 3333\n"
u"あああ 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
expected = (u"1 1\nAB 22\n"
u"2011-01-01 00:00:00 3333\nあああ 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# truncate
with option_context('display.max_rows', 3):
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
expected = (u"ああ あ\n ... \n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
# ambiguous unicode
s = Series([u'¡¡', u'い¡¡', u'ううう', u'ええええ'],
index=[u'ああ', u'¡¡¡¡いい', u'¡¡', u'えええ'])
expected = (u"ああ ¡¡\n¡¡¡¡いい い¡¡\n¡¡ ううう\n"
u"えええ ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
for line in repr(Series(vals)).split('\n'):
if line.startswith('dtype:'):
continue
if _three_digit_exp():
self.assertIn('+010', line)
else:
self.assertIn('+10', line)
def test_datetimeindex(self):
index = date_range('20130102',periods=6)
s = Series(1,index=index)
result = s.to_string()
self.assertTrue('2013-01-02' in result)
# nat in index
s2 = Series(2, index=[ Timestamp('20130111'), NaT ])
s = s2.append(s)
result = s.to_string()
self.assertTrue('NaT' in result)
# nat in summary
result = str(s2.index)
self.assertTrue('NaT' in result)
def test_timedelta64(self):
from datetime import datetime, timedelta
Series(np.array([1100, 20], dtype='timedelta64[ns]')).to_string()
s = Series(date_range('2012-1-1', periods=3, freq='D'))
# GH2146
# adding NaTs
y = s-s.shift(1)
result = y.to_string()
self.assertTrue('1 days' in result)
self.assertTrue('00:00:00' not in result)
self.assertTrue('NaT' in result)
# with frac seconds
o = Series([datetime(2012,1,1,microsecond=150)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-1 days +23:59:59.999850' in result)
# rounding?
o = Series([datetime(2012,1,1,1)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-1 days +23:00:00' in result)
self.assertTrue('1 days 23:00:00' in result)
o = Series([datetime(2012,1,1,1,1)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-1 days +22:59:00' in result)
self.assertTrue('1 days 22:59:00' in result)
o = Series([datetime(2012,1,1,1,1,microsecond=150)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-1 days +22:58:59.999850' in result)
self.assertTrue('0 days 22:58:59.999850' in result)
# neg time
td = timedelta(minutes=5,seconds=3)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - s2
result = y.to_string()
self.assertTrue('-1 days +23:54:57' in result)
td = timedelta(microseconds=550)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - td
result = y.to_string()
self.assertTrue('2012-01-01 23:59:59.999450' in result)
# no boxing of the actual elements
td = Series(pd.timedelta_range('1 days',periods=3))
result = td.to_string()
self.assertEqual(result,u("0 1 days\n1 2 days\n2 3 days"))
def test_mixed_datetime64(self):
df = DataFrame({'A': [1, 2],
'B': ['2012-01-01', '2012-01-02']})
df['B'] = pd.to_datetime(df.B)
result = repr(df.ix[0])
self.assertTrue('2012-01-01' in result)
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
s = Series(randn(8), index=index)
with option_context("display.max_rows", 10):
self.assertEqual(len(str(s).split('\n')),10)
with option_context("display.max_rows", 3):
self.assertEqual(len(str(s).split('\n')),5)
with option_context("display.max_rows", 2):
self.assertEqual(len(str(s).split('\n')),5)
with option_context("display.max_rows", 1):
self.assertEqual(len(str(s).split('\n')),4)
with option_context("display.max_rows", 0):
self.assertEqual(len(str(s).split('\n')),10)
# index
s = Series(randn(8), None)
with option_context("display.max_rows", 10):
self.assertEqual(len(str(s).split('\n')),9)
with option_context("display.max_rows", 3):
self.assertEqual(len(str(s).split('\n')),4)
with option_context("display.max_rows", 2):
self.assertEqual(len(str(s).split('\n')),4)
with option_context("display.max_rows", 1):
self.assertEqual(len(str(s).split('\n')),3)
with option_context("display.max_rows", 0):
self.assertEqual(len(str(s).split('\n')),9)
# Make sure #8532 is fixed
def test_consistent_format(self):
s = pd.Series([1,1,1,1,1,1,1,1,1,1,0.9999,1,1]*10)
with option_context("display.max_rows", 10):
res = repr(s)
exp = ('0 1.0000\n1 1.0000\n2 1.0000\n3 '
'1.0000\n4 1.0000\n ... \n125 '
'1.0000\n126 1.0000\n127 0.9999\n128 '
'1.0000\n129 1.0000\ndtype: float64')
self.assertEqual(res, exp)
@staticmethod
def gen_test_series():
s1 = pd.Series(['a']*100)
s2 = pd.Series(['ab']*100)
s3 = pd.Series(['a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef'])
s4 = s3[::-1]
test_sers = {'onel': s1, 'twol': s2, 'asc': s3, 'desc': s4}
return test_sers
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split('\n')
lines = [line for line in repr(s).split('\n') \
if not re.match('[^\.]*\.+', line)][:-1]
ncolsizes = len(set(len(line.strip()) for line in lines))
self.assertEqual(ncolsizes, 1)
def test_format_explicit(self):
test_sers = self.gen_test_series()
with option_context("display.max_rows", 4):
res = repr(test_sers['onel'])
exp = '0 a\n1 a\n ..\n98 a\n99 a\ndtype: object'
self.assertEqual(exp, res)
res = repr(test_sers['twol'])
exp = ('0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype:'
' object')
self.assertEqual(exp, res)
res = repr(test_sers['asc'])
exp = ('0 a\n1 ab\n ... \n4 abcde\n5'
' abcdef\ndtype: object')
self.assertEqual(exp, res)
res = repr(test_sers['desc'])
exp = ('5 abcdef\n4 abcde\n ... \n1 ab\n0'
' a\ndtype: object')
self.assertEqual(exp, res)
def test_ncols(self):
test_sers = self.gen_test_series()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10),dtype='int64')
with option_context("display.max_rows", 1):
strrepr = repr(s).split('\n')
exp1 = ['0', '0']
res1 = strrepr[0].split()
self.assertEqual(exp1, res1)
exp2 = ['..']
res2 = strrepr[1].split()
self.assertEqual(exp2, res2)
def test_truncate_ndots(self):
def getndots(s):
return len(re.match('[^\.]*(\.*)', s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
self.assertEqual(getndots(strrepr), 2)
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
self.assertEqual(getndots(strrepr), 3)
def test_to_string_name(self):
s = Series(range(100),dtype='int64')
s.name = 'myser'
res = s.to_string(max_rows=2, name=True)
exp = '0 0\n ..\n99 99\nName: myser'
self.assertEqual(res, exp)
res = s.to_string(max_rows=2, name=False)
exp = '0 0\n ..\n99 99'
self.assertEqual(res, exp)
def test_to_string_dtype(self):
s = Series(range(100),dtype='int64')
res = s.to_string(max_rows=2, dtype=True)
exp = '0 0\n ..\n99 99\ndtype: int64'
self.assertEqual(res, exp)
res = s.to_string(max_rows=2, dtype=False)
exp = '0 0\n ..\n99 99'
self.assertEqual(res, exp)
def test_to_string_length(self):
s = Series(range(100),dtype='int64')
res = s.to_string(max_rows=2, length=True)
exp = '0 0\n ..\n99 99\nLength: 100'
self.assertEqual(res, exp)
def test_to_string_na_rep(self):
s = pd.Series(index=range(100))
res = s.to_string(na_rep='foo', max_rows=2)
exp = '0 foo\n ..\n99 foo'
self.assertEqual(res, exp)
def test_to_string_float_format(self):
s = pd.Series(range(10), dtype='float64')
res = s.to_string(float_format=lambda x: '{0:2.1f}'.format(x),
max_rows=2)
exp = '0 0.0\n ..\n9 9.0'
self.assertEqual(res, exp)
def test_to_string_header(self):
s = pd.Series(range(10),dtype='int64')
s.index.name = 'foo'
res = s.to_string(header=True, max_rows=2)
exp = 'foo\n0 0\n ..\n9 9'
self.assertEqual(res, exp)
res = s.to_string(header=False, max_rows=2)
exp = '0 0\n ..\n9 9'
self.assertEqual(res, exp)
class TestEngFormatter(tm.TestCase):
_multiprocess_can_split_ = True
def test_eng_float_formatter(self):
df = DataFrame({'A': [1.41, 141., 14100, 1410000.]})
fmt.set_eng_float_format()
result = df.to_string()
expected = (' A\n'
'0 1.410E+00\n'
'1 141.000E+00\n'
'2 14.100E+03\n'
'3 1.410E+06')
self.assertEqual(result, expected)
fmt.set_eng_float_format(use_eng_prefix=True)
result = df.to_string()
expected = (' A\n'
'0 1.410\n'
'1 141.000\n'
'2 14.100k\n'
'3 1.410M')
self.assertEqual(result, expected)
fmt.set_eng_float_format(accuracy=0)
result = df.to_string()
expected = (' A\n'
'0 1E+00\n'
'1 141E+00\n'
'2 14E+03\n'
'3 1E+06')
self.assertEqual(result, expected)
self.reset_display_options()
def compare(self, formatter, input, output):
formatted_input = formatter(input)
msg = ("formatting of %s results in '%s', expected '%s'"
% (str(input), formatted_input, output))
self.assertEqual(formatted_input, output, msg)
def compare_all(self, formatter, in_out):
"""
Parameters:
-----------
formatter: EngFormatter under test
in_out: list of tuples. Each tuple = (number, expected_formatting)
It is tested if 'formatter(number) == expected_formatting'.
*number* should be >= 0 because formatter(-number) == fmt is also
tested. *fmt* is derived from *expected_formatting*
"""
for input, output in in_out:
self.compare(formatter, input, output)
self.compare(formatter, -input, "-" + output[1:])
def test_exponents_with_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
f = np.sqrt(2)
in_out = [(f * 10 ** -24, " 1.414y"),
(f * 10 ** -23, " 14.142y"),
(f * 10 ** -22, " 141.421y"),
(f * 10 ** -21, " 1.414z"),
(f * 10 ** -20, " 14.142z"),
(f * 10 ** -19, " 141.421z"),
(f * 10 ** -18, " 1.414a"),
(f * 10 ** -17, " 14.142a"),
(f * 10 ** -16, " 141.421a"),
(f * 10 ** -15, " 1.414f"),
(f * 10 ** -14, " 14.142f"),
(f * 10 ** -13, " 141.421f"),
(f * 10 ** -12, " 1.414p"),
(f * 10 ** -11, " 14.142p"),
(f * 10 ** -10, " 141.421p"),
(f * 10 ** -9, " 1.414n"),
(f * 10 ** -8, " 14.142n"),
(f * 10 ** -7, " 141.421n"),
(f * 10 ** -6, " 1.414u"),
(f * 10 ** -5, " 14.142u"),
(f * 10 ** -4, " 141.421u"),
(f * 10 ** -3, " 1.414m"),
(f * 10 ** -2, " 14.142m"),
(f * 10 ** -1, " 141.421m"),
(f * 10 ** 0, " 1.414"),
(f * 10 ** 1, " 14.142"),
(f * 10 ** 2, " 141.421"),
(f * 10 ** 3, " 1.414k"),
(f * 10 ** 4, " 14.142k"),
(f * 10 ** 5, " 141.421k"),
(f * 10 ** 6, " 1.414M"),
(f * 10 ** 7, " 14.142M"),
(f * 10 ** 8, " 141.421M"),
(f * 10 ** 9, " 1.414G"),
(f * 10 ** 10, " 14.142G"),
(f * 10 ** 11, " 141.421G"),
(f * 10 ** 12, " 1.414T"),
(f * 10 ** 13, " 14.142T"),
(f * 10 ** 14, " 141.421T"),
(f * 10 ** 15, " 1.414P"),
(f * 10 ** 16, " 14.142P"),
(f * 10 ** 17, " 141.421P"),
(f * 10 ** 18, " 1.414E"),
(f * 10 ** 19, " 14.142E"),
(f * 10 ** 20, " 141.421E"),
(f * 10 ** 21, " 1.414Z"),
(f * 10 ** 22, " 14.142Z"),
(f * 10 ** 23, " 141.421Z"),
(f * 10 ** 24, " 1.414Y"),
(f * 10 ** 25, " 14.142Y"),
(f * 10 ** 26, " 141.421Y")]
self.compare_all(formatter, in_out)
def test_exponents_without_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=4, use_eng_prefix=False)
f = np.pi
in_out = [(f * 10 ** -24, " 3.1416E-24"),
(f * 10 ** -23, " 31.4159E-24"),
(f * 10 ** -22, " 314.1593E-24"),
(f * 10 ** -21, " 3.1416E-21"),
(f * 10 ** -20, " 31.4159E-21"),
(f * 10 ** -19, " 314.1593E-21"),
(f * 10 ** -18, " 3.1416E-18"),
(f * 10 ** -17, " 31.4159E-18"),
(f * 10 ** -16, " 314.1593E-18"),
(f * 10 ** -15, " 3.1416E-15"),
(f * 10 ** -14, " 31.4159E-15"),
(f * 10 ** -13, " 314.1593E-15"),
(f * 10 ** -12, " 3.1416E-12"),
(f * 10 ** -11, " 31.4159E-12"),
(f * 10 ** -10, " 314.1593E-12"),
(f * 10 ** -9, " 3.1416E-09"),
(f * 10 ** -8, " 31.4159E-09"),
(f * 10 ** -7, " 314.1593E-09"),
(f * 10 ** -6, " 3.1416E-06"),
(f * 10 ** -5, " 31.4159E-06"),
(f * 10 ** -4, " 314.1593E-06"),
(f * 10 ** -3, " 3.1416E-03"),
(f * 10 ** -2, " 31.4159E-03"),
(f * 10 ** -1, " 314.1593E-03"),
(f * 10 ** 0, " 3.1416E+00"),
(f * 10 ** 1, " 31.4159E+00"),
(f * 10 ** 2, " 314.1593E+00"),
(f * 10 ** 3, " 3.1416E+03"),
(f * 10 ** 4, " 31.4159E+03"),
(f * 10 ** 5, " 314.1593E+03"),
(f * 10 ** 6, " 3.1416E+06"),
(f * 10 ** 7, " 31.4159E+06"),
(f * 10 ** 8, " 314.1593E+06"),
(f * 10 ** 9, " 3.1416E+09"),
(f * 10 ** 10, " 31.4159E+09"),
(f * 10 ** 11, " 314.1593E+09"),
(f * 10 ** 12, " 3.1416E+12"),
(f * 10 ** 13, " 31.4159E+12"),
(f * 10 ** 14, " 314.1593E+12"),
(f * 10 ** 15, " 3.1416E+15"),
(f * 10 ** 16, " 31.4159E+15"),
(f * 10 ** 17, " 314.1593E+15"),
(f * 10 ** 18, " 3.1416E+18"),
(f * 10 ** 19, " 31.4159E+18"),
(f * 10 ** 20, " 314.1593E+18"),
(f * 10 ** 21, " 3.1416E+21"),
(f * 10 ** 22, " 31.4159E+21"),
(f * 10 ** 23, " 314.1593E+21"),
(f * 10 ** 24, " 3.1416E+24"),
(f * 10 ** 25, " 31.4159E+24"),
(f * 10 ** 26, " 314.1593E+24")]
self.compare_all(formatter, in_out)
def test_rounding(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
in_out = [(5.55555, ' 5.556'),
(55.5555, ' 55.556'),
(555.555, ' 555.555'),
(5555.55, ' 5.556k'),
(55555.5, ' 55.556k'),
(555555, ' 555.555k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
in_out = [(5.55555, ' 5.6'),
(55.5555, ' 55.6'),
(555.555, ' 555.6'),
(5555.55, ' 5.6k'),
(55555.5, ' 55.6k'),
(555555, ' 555.6k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=0, use_eng_prefix=True)
in_out = [(5.55555, ' 6'),
(55.5555, ' 56'),
(555.555, ' 556'),
(5555.55, ' 6k'),
(55555.5, ' 56k'),
(555555, ' 556k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
result = formatter(0)
self.assertEqual(result, u(' 0.000'))
def _three_digit_exp():
return '%.4g' % 1.7e8 == '1.7e+008'
class TestFloatArrayFormatter(tm.TestCase):
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
self.assertTrue(len(result) == 0)
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
self.assertEqual(result[0], " 12")
self.assertEqual(result[1], " 0")
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with pd.option_context('display.precision', 6):
# DataFrame example from issue #9764
d=pd.DataFrame({'col1':[9.999e-8, 1e-7, 1.0001e-7, 2e-7, 4.999e-7, 5e-7, 5.0001e-7, 6e-7, 9.999e-7, 1e-6, 1.0001e-6, 2e-6, 4.999e-6, 5e-6, 5.0001e-6, 6e-6]})
expected_output={
(0,6):' col1\n0 9.999000e-08\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07',
(1,6):' col1\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07',
(1,8):' col1\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07\n6 5.000100e-07\n7 6.000000e-07',
(8,16):' col1\n8 9.999000e-07\n9 1.000000e-06\n10 1.000100e-06\n11 2.000000e-06\n12 4.999000e-06\n13 5.000000e-06\n14 5.000100e-06\n15 6.000000e-06',
(9,16):' col1\n9 0.000001\n10 0.000001\n11 0.000002\n12 0.000005\n13 0.000005\n14 0.000005\n15 0.000006'
}
for (start, stop), v in expected_output.items():
self.assertEqual(str(d[start:stop]), v)
def test_too_long(self):
# GH 10451
with pd.option_context('display.precision', 4):
# need both a number > 1e8 and something that normally formats to having length > display.precision + 6
df = pd.DataFrame(dict(x=[12345.6789]))
self.assertEqual(str(df), ' x\n0 12345.6789')
df = pd.DataFrame(dict(x=[2e8]))
self.assertEqual(str(df), ' x\n0 200000000')
df = pd.DataFrame(dict(x=[12345.6789, 2e8]))
self.assertEqual(str(df), ' x\n0 1.2346e+04\n1 2.0000e+08')
class TestRepr_timedelta64(tm.TestCase):
def test_none(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base()
self.assertEqual(drepr(delta_1d), "1 days")
self.assertEqual(drepr(-delta_1d), "-1 days")
self.assertEqual(drepr(delta_0d), "0 days")
self.assertEqual(drepr(delta_1s), "0 days 00:00:01")
self.assertEqual(drepr(delta_500ms), "0 days 00:00:00.500000")
self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
def test_even_day(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='even_day')
self.assertEqual(drepr(delta_1d), "1 days")
self.assertEqual(drepr(-delta_1d), "-1 days")
self.assertEqual(drepr(delta_0d), "0 days")
self.assertEqual(drepr(delta_1s), "0 days 00:00:01")
self.assertEqual(drepr(delta_500ms), "0 days 00:00:00.500000")
self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='sub_day')
self.assertEqual(drepr(delta_1d), "1 days")
self.assertEqual(drepr(-delta_1d), "-1 days")
self.assertEqual(drepr(delta_0d), "00:00:00")
self.assertEqual(drepr(delta_1s), "00:00:01")
self.assertEqual(drepr(delta_500ms), "00:00:00.500000")
self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
def test_long(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='long')
self.assertEqual(drepr(delta_1d), "1 days 00:00:00")
self.assertEqual(drepr(-delta_1d), "-1 days +00:00:00")
self.assertEqual(drepr(delta_0d), "0 days 00:00:00")
self.assertEqual(drepr(delta_1s), "0 days 00:00:01")
self.assertEqual(drepr(delta_500ms), "0 days 00:00:00.500000")
self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
def test_all(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1ns = pd.to_timedelta(1, unit='ns')
drepr = lambda x: x._repr_base(format='all')
self.assertEqual(drepr(delta_1d), "1 days 00:00:00.000000000")
self.assertEqual(drepr(delta_0d), "0 days 00:00:00.000000000")
self.assertEqual(drepr(delta_1ns), "0 days 00:00:00.000000001")
class TestTimedelta64Formatter(tm.TestCase):
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x,box=True).get_result()
self.assertEqual(result[0].strip(), "'0 days'")
self.assertEqual(result[1].strip(), "'1 days'")
result = fmt.Timedelta64Formatter(x[1:2],box=True).get_result()
self.assertEqual(result[0].strip(), "'1 days'")
result = fmt.Timedelta64Formatter(x,box=False).get_result()
self.assertEqual(result[0].strip(), "0 days")
self.assertEqual(result[1].strip(), "1 days")
result = fmt.Timedelta64Formatter(x[1:2],box=False).get_result()
self.assertEqual(result[0].strip(), "1 days")
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(-x,box=True).get_result()
self.assertEqual(result[0].strip(), "'0 days'")
self.assertEqual(result[1].strip(), "'-1 days'")
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(y,box=True).get_result()
self.assertEqual(result[0].strip(), "'00:00:00'")
self.assertEqual(result[1].strip(), "'00:00:01'")
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(-y,box=True).get_result()
self.assertEqual(result[0].strip(), "'00:00:00'")
self.assertEqual(result[1].strip(), "'-1 days +23:59:59'")
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x,box=True).get_result()
self.assertEqual(result[0].strip(), "'0 days'")
x = pd.to_timedelta(list(range(1)), unit='D')
result = fmt.Timedelta64Formatter(x,box=True).get_result()
self.assertEqual(result[0].strip(), "'0 days'")
class TestDatetime64Formatter(tm.TestCase):
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 00:00:00")
self.assertEqual(result[1].strip(), "2013-01-01 12:00:00")
def test_dates(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01")
self.assertEqual(result[1].strip(), "2013-01-02")
def test_date_nanos(self):
x = Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "1970-01-01 00:00:00.000000200")
def test_dates_display(self):
# 10170
# make sure that we are consistently display date formatting
x = Series(date_range('20130101 09:00:00',periods=5,freq='D'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 09:00:00")
self.assertEqual(result[1].strip(), "NaT")
self.assertEqual(result[4].strip(), "2013-01-05 09:00:00")
x = Series(date_range('20130101 09:00:00',periods=5,freq='s'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 09:00:00")
self.assertEqual(result[1].strip(), "NaT")
self.assertEqual(result[4].strip(), "2013-01-01 09:00:04")
x = Series(date_range('20130101 09:00:00',periods=5,freq='ms'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 09:00:00.000")
self.assertEqual(result[1].strip(), "NaT")
self.assertEqual(result[4].strip(), "2013-01-01 09:00:00.004")
x = Series(date_range('20130101 09:00:00',periods=5,freq='us'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 09:00:00.000000")
self.assertEqual(result[1].strip(), "NaT")
self.assertEqual(result[4].strip(), "2013-01-01 09:00:00.000004")
x = Series(date_range('20130101 09:00:00',periods=5,freq='N'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 09:00:00.000000000")
self.assertEqual(result[1].strip(), "NaT")
self.assertEqual(result[4].strip(), "2013-01-01 09:00:00.000000004")
class TestNaTFormatting(tm.TestCase):
def test_repr(self):
self.assertEqual(repr(pd.NaT), "NaT")
def test_str(self):
self.assertEqual(str(pd.NaT), "NaT")
class TestDatetimeIndexFormat(tm.TestCase):
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
self.assertEqual(formatted[0], "2003-01-01 12:00:00")
self.assertEqual(formatted[1], "NaT")
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
self.assertEqual(formatted[0], "2003-01-01")
self.assertEqual(formatted[1], "NaT")
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013,1,1)], utc=True).format()
self.assertEqual(formatted[0], "2013-01-01 00:00:00+00:00")
formatted = pd.to_datetime([datetime(2013,1,1), pd.NaT], utc=True).format()
self.assertEqual(formatted[0], "2013-01-01 00:00:00+00:00")
def test_date_explict_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(date_format="%m-%d-%Y", na_rep="UT")
self.assertEqual(formatted[0], "02-01-2003")
self.assertEqual(formatted[1], "UT")
class TestDatetimeIndexUnicode(tm.TestCase):
def test_dates(self):
text = str(pd.to_datetime([datetime(2013,1,1), datetime(2014,1,1)]))
self.assertTrue("['2013-01-01'," in text)
self.assertTrue(", '2014-01-01']" in text)
def test_mixed(self):
text = str(pd.to_datetime([datetime(2013,1,1), datetime(2014,1,1,12), datetime(2014,1,1)]))
self.assertTrue("'2013-01-01 00:00:00'," in text)
self.assertTrue("'2014-01-01 00:00:00']" in text)
class TestStringRepTimestamp(tm.TestCase):
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
self.assertEqual(str(dt_date), str(Timestamp(dt_date)))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime)))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us)))
ts_nanos_only = Timestamp(200)
self.assertEqual(str(ts_nanos_only), "1970-01-01 00:00:00.000000200")
ts_nanos_micros = Timestamp(1200)
self.assertEqual(str(ts_nanos_micros), "1970-01-01 00:00:00.000001200")
def test_tz_pytz(self):
tm._skip_if_no_pytz()
import pytz
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
self.assertEqual(str(dt_date), str(Timestamp(dt_date)))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime)))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us)))
def test_tz_dateutil(self):
tm._skip_if_no_dateutil()
import dateutil
utc = dateutil.tz.tzutc()
dt_date = datetime(2013, 1, 2, tzinfo=utc)
self.assertEqual(str(dt_date), str(Timestamp(dt_date)))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc)
self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime)))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc)
self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us)))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| [
"[email protected]"
] | |
39643dc2ed9ecf04dec6ff9dde56590ba88e04a0 | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_relevel.py | cd2911c2ef668d093498efb7a014986253e94b6f | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 930 | py | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import numpy as np
def h2o_H2OFrame_relevel():
"""
Python API test: h2o.frame.H2OFrame.relevel(y)
"""
python_lists = np.random.randint(-5,5, (100, 2))
h2oframe = h2o.H2OFrame(python_obj=python_lists)
newFrame = h2oframe.asfactor()
allLevels = newFrame.levels()
lastLevels = len(allLevels[0])-1
newZeroLevel = allLevels[0][lastLevels]
newFrame[0] = newFrame[0].relevel(newZeroLevel) # set last level as 0
newLevels = newFrame.levels()
assert allLevels != newLevels, "h2o.H2OFrame.relevel() command is not working." # should not equal
assert newLevels[0][0]==allLevels[0][lastLevels], "h2o.H2OFrame.relevel() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_relevel())
else:
h2o_H2OFrame_relevel()
| [
"[email protected]"
] | |
20441db34cdbb7bf136e9eb5365a49f9a7aa8058 | 65fce73a1e6a36718238cdef09a17493b19532a0 | /16/test/test_document_frequency_response_entity.py | c5787167fde15302326b94889bfb8d06f4cdb9e8 | [
"Apache-2.0"
] | permissive | apitore/apitore-sdk-python | eb419589609efb86bd279cd1733c2a03cdc03680 | c0814c5635ddd09e9a20fcb155b62122bee41d33 | refs/heads/master | 2020-03-21T10:06:34.557781 | 2018-06-23T21:26:27 | 2018-06-23T21:26:27 | 138,434,217 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | # coding: utf-8
"""
Document frequency APIs
Document frequency of Wikipedia.<BR />[Endpoint] https://api.apitore.com/api/16 # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.document_frequency_response_entity import DocumentFrequencyResponseEntity # noqa: E501
from swagger_client.rest import ApiException
class TestDocumentFrequencyResponseEntity(unittest.TestCase):
"""DocumentFrequencyResponseEntity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocumentFrequencyResponseEntity(self):
"""Test DocumentFrequencyResponseEntity"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.document_frequency_response_entity.DocumentFrequencyResponseEntity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
cd5c3d30b568aaa4322afa96b7a059e9f2d2c049 | 68d9fffda9c1ee0f4819371067adfd4985332319 | /python/108.将有序数组转换为二叉搜索树.py | 987eb6ba870034bcba75c49281ce7e0aac793c31 | [
"MIT"
] | permissive | Wanger-SJTU/leetcode-solutions | ade9486cef05ede6fa44cbbb5d726037518fac15 | eb7f2fb142b8a30d987c5ac8002a96ead0aa56f4 | refs/heads/master | 2023-04-11T19:56:13.561234 | 2021-05-10T12:00:28 | 2021-05-10T12:00:28 | 129,606,869 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
return self.buildBST(nums)
def buildBST(self, nums):
if not nums:
return None
mid = len(nums)//2
node = TreeNode(nums[mid])
node.left = self.buildBST(nums[:mid])
node.right = self.buildBST(nums[mid+1:])
return node | [
"[email protected]"
] | |
36066f0105b092789e16eb202a786c374dd4c3c3 | bfdab27f224d9cac02e319fe55b53172fbf8d1a2 | /motion_editor_core/data/atlas_old/positions/arm/vi4_kal_arm56l.py | 85fc1994e6fb0a439a7f15c15feb99ec38484963 | [] | no_license | tu-darmstadt-ros-pkg/motion_editor | c18294b4f035f737ff33d1dcbdfa87d4bb4e6f71 | 178a7564b18420748e1ca4413849a44965823655 | refs/heads/master | 2020-04-06T12:37:30.763325 | 2016-09-15T14:11:48 | 2016-09-15T14:11:48 | 35,028,245 | 2 | 3 | null | 2015-05-05T13:20:27 | 2015-05-04T10:18:22 | Python | UTF-8 | Python | false | false | 71 | py | { 'vi4_kal_arm56l': [-1.936, -0.7718, 3.1416, -0.2321, 0.509, 0.2098]}
| [
"[email protected]"
] | |
00b5fbd1b10e3a326da7fa05e534df9b62574feb | 5fd32c6c52fda2739f3423be9d11fb932611bea5 | /Python/bosch/bbs_mop_crawl.py | c13f3b74571579a28adc094bd5c4d05c085f4bd4 | [] | no_license | luogangyi/Spider | 446765886c328602cd4224bfe6b7b5e51633a7d7 | e703f69399854331a141a2f2270d4e9b9e2c63e3 | refs/heads/master | 2021-01-18T18:13:03.048432 | 2014-06-10T17:21:46 | 2014-06-10T17:21:46 | 11,663,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | #!/usr/bin/python
#-*-coding:utf-8-*-
from config import *
from bbs_utils import *
from utils import *
from baidu import Baidu
from goole_search import Google
MOP_INFO_SOURCE_ID = 20
def main():
try:
obj = Baidu(id,'dzh.mop.com','bbs')
obj.main()
except Exception, e:
store_error(id)
bbs_logger.exception(e)
try:
obj = Google(id,'dzh.mop.com','bbs')
obj.main()
except Exception, e:
store_error(id)
bbs_logger.exception(e)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
38bbe3bd03f55ab1849071c514c13807a7883541 | 488de2f3cadeb866ccbe4064411f7db5d3dc3a57 | /ttt.py | 252be389ef5f8e434dbf58c94879cffea666b1a6 | [] | no_license | yp000925/Holo_synthetic | 8a23d587462e79efe5ba27f8c0a6ad8d9fc028b9 | 711b449bd6295a95d2a2a6e73fcea47c8058dad4 | refs/heads/master | 2023-08-30T04:39:16.820671 | 2021-10-31T04:14:08 | 2021-10-31T04:14:08 | 317,855,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,162 | py | from __future__ import print_function, division
import sys
import os
import torch
import numpy as np
import random
import csv
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
import pandas as pd
from PIL import Image
class myDataloader(Dataset):
"""
Dataset for 3D particle detection using capsule net
"""
def __init__(self, root_dir, file_name = 'train_data.csv', transform = None, size=1024):
'''
:param holo_dir: directory for holograms
:param depthmap_dir: directory for depthmap
:param xycentre_dir: directory for xycentre
:param file_name: file_name
:param transform:
'''
# self.holo_dir = 'holo_dir'
# self.depthmap_dir = 'depthmap_dir'
# self.xycentre_dir = xycentre_dir
self.root_dir = root_dir
self.file_name = file_name
self.transform = transform
self.file = pd.read_csv(os.path.join(root_dir,file_name))
self.N =size
def __getitem__(self, idx):
data = self.file.iloc[idx]
holo_path = os.path.join(self.root_dir, 'hologram', data['hologram'])
param_path = os.path.join(self.root_dir, 'param', data['param'])
img = self.read_img(holo_path)
param = self.load_param(param_path)
size_projection,xycentre,xy_mask = self.get_maps(param)
return img,size_projection,xycentre,xy_mask
def __len__(self):
return len(self.file)
def get_maps(self,param):
size_projection, xy_mask = self.get_xy_projection(param)
xycentre = self.get_xycentre(param)
return (size_projection,xycentre,xy_mask)
def get_xy_projection(self,param):
"""
:param param: px,py,pz,psize stored in dataframe
:return: map: the xy_projection map, the pixel value is the corresponding depth, range from 0-1
mask: the indication map for overlapping 0: the overlap exists -> ignored when calculate the loss
"""
arr = np.zeros((256,self.N,self.N))
particle_field = np.zeros(arr.shape) # one stands for the exist of particle
for _,particle in param.iterrows():
px,py,pz,psize = particle.x,particle.y,particle.z,particle.size
Y, X = np.mgrid[:self.N, :self.N]
Y = Y - py
X = X - px
dist_sq = Y ** 2 + X ** 2
z_slice = np.zeros((self.N,self.N))
particle_field_slice = np.zeros((self.N,self.N))
z_slice[dist_sq <= psize ** 2] = pz
particle_field_slice[dist_sq <= psize ** 2] = 1
arr[pz,:,:] += z_slice # 可能某个depth上面有多个particles
particle_field[pz,:,:] += particle_field_slice
map = arr.sum(axis=0)/255.0
# check whether there are overlapping
particle_field_proj = particle_field.sum(axis=0)
mask_map = np.ones((self.N,self.N))
mask_map[particle_field_proj>1] = 0 #在后面计算loss的时候,只计算没有overlap的pixel,即mask里面为0的情况忽略
return map, mask_map
def get_xycentre(self,param):
arr = np.zeros((self.N, self.N))
idx_x = np.array(param['x'].values)
idx_y = np.array(param['y'].values)
arr[(idx_y,idx_x)] = 1.0
return arr
def load_param(self,param_path):
param = pd.read_csv(param_path)
x = param['x'].values
y = param['y'].values
z = param['z'].values
size = param['size']
frame = 10 * 1e-3
N=1024
xyres = frame/N
px = (x / frame * N + N / 2).astype(np.int)
py = (N / 2 + y / frame * N).astype(np.int)
pz = ((z - 1 * 1e-2)/ (3 * 1e-2 - 1 * 1e-2)*255).astype(np.int)
psize = (size/xyres).astype(np.int)
param_pixel = pd.DataFrame()
param_pixel['x'] = px
param_pixel['y'] = py
param_pixel['z'] = pz
param_pixel['size'] = psize
return param_pixel
def read_img(self,img_name):
img = Image.open(img_name)
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.array(img).astype(np.float32)
return img/255.0
if __name__ == "__main__":
root_dir ='/Users/zhangyunping/PycharmProjects/Holo_synthetic/data_holo'
file_path = 'check.csv'
dataset = myDataloader(root_dir,file_path)
# img = Image.open("/Users/zhangyunping/PycharmProjects/Holo_synthetic/data_holo/hologram/0.jpg")
# param = dataloader.load_param(root_dir + '/param/0.csv')
# img = dataloader.read_img(root_dir + '/hologram/0.jpg')
# size_projection, xycentre, xy_mask = dataloader.get_maps(param)
# size_p = Image.fromarray(size_projection*255.0)
# size_p.show()
# xyc = Image.fromarray(xycentre*255.0)
# xyc.show()
# xy_m = Image.fromarray(xy_mask*255.0)
# xy_m.show()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False,
num_workers=1)
for data in dataloader:
img, size_projection, xycentre, xy_mask = data
break
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.